mirror of
https://github.com/fail0verflow/switch-linux.git
synced 2025-05-04 02:34:21 -04:00
reiserfs: Make cancel_old_flush() reliable
Currently canceling of delayed work that flushes old data using cancel_old_flush() does not prevent work from being requeued. Thus in theory new work can be queued after cancel_old_flush() from reiserfs_freeze() has run. This will become larger problem once flush_old_commits() can requeue the work itself. Fix the problem by recording in sbi->work_queue that flushing work is canceled and should not be requeued. Signed-off-by: Jan Kara <jack@suse.cz>
This commit is contained in:
parent
6554766150
commit
71b0576bdb
3 changed files with 17 additions and 7 deletions
|
@ -1961,7 +1961,7 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
|
||||||
* will be requeued because superblock is being shutdown and doesn't
|
* will be requeued because superblock is being shutdown and doesn't
|
||||||
* have MS_ACTIVE set.
|
* have MS_ACTIVE set.
|
||||||
*/
|
*/
|
||||||
cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work);
|
reiserfs_cancel_old_flush(sb);
|
||||||
/* wait for all commits to finish */
|
/* wait for all commits to finish */
|
||||||
cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work);
|
cancel_delayed_work_sync(&SB_JOURNAL(sb)->j_work);
|
||||||
|
|
||||||
|
|
|
@ -2948,6 +2948,7 @@ int reiserfs_allocate_list_bitmaps(struct super_block *s,
|
||||||
struct reiserfs_list_bitmap *, unsigned int);
|
struct reiserfs_list_bitmap *, unsigned int);
|
||||||
|
|
||||||
void reiserfs_schedule_old_flush(struct super_block *s);
|
void reiserfs_schedule_old_flush(struct super_block *s);
|
||||||
|
void reiserfs_cancel_old_flush(struct super_block *s);
|
||||||
void add_save_link(struct reiserfs_transaction_handle *th,
|
void add_save_link(struct reiserfs_transaction_handle *th,
|
||||||
struct inode *inode, int truncate);
|
struct inode *inode, int truncate);
|
||||||
int remove_save_link(struct inode *inode, int truncate);
|
int remove_save_link(struct inode *inode, int truncate);
|
||||||
|
|
|
@ -90,7 +90,9 @@ static void flush_old_commits(struct work_struct *work)
|
||||||
s = sbi->s_journal->j_work_sb;
|
s = sbi->s_journal->j_work_sb;
|
||||||
|
|
||||||
spin_lock(&sbi->old_work_lock);
|
spin_lock(&sbi->old_work_lock);
|
||||||
sbi->work_queued = 0;
|
/* Avoid clobbering the cancel state... */
|
||||||
|
if (sbi->work_queued == 1)
|
||||||
|
sbi->work_queued = 0;
|
||||||
spin_unlock(&sbi->old_work_lock);
|
spin_unlock(&sbi->old_work_lock);
|
||||||
|
|
||||||
reiserfs_sync_fs(s, 1);
|
reiserfs_sync_fs(s, 1);
|
||||||
|
@ -117,21 +119,22 @@ void reiserfs_schedule_old_flush(struct super_block *s)
|
||||||
spin_unlock(&sbi->old_work_lock);
|
spin_unlock(&sbi->old_work_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cancel_old_flush(struct super_block *s)
|
void reiserfs_cancel_old_flush(struct super_block *s)
|
||||||
{
|
{
|
||||||
struct reiserfs_sb_info *sbi = REISERFS_SB(s);
|
struct reiserfs_sb_info *sbi = REISERFS_SB(s);
|
||||||
|
|
||||||
cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
|
|
||||||
spin_lock(&sbi->old_work_lock);
|
spin_lock(&sbi->old_work_lock);
|
||||||
sbi->work_queued = 0;
|
/* Make sure no new flushes will be queued */
|
||||||
|
sbi->work_queued = 2;
|
||||||
spin_unlock(&sbi->old_work_lock);
|
spin_unlock(&sbi->old_work_lock);
|
||||||
|
cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int reiserfs_freeze(struct super_block *s)
|
static int reiserfs_freeze(struct super_block *s)
|
||||||
{
|
{
|
||||||
struct reiserfs_transaction_handle th;
|
struct reiserfs_transaction_handle th;
|
||||||
|
|
||||||
cancel_old_flush(s);
|
reiserfs_cancel_old_flush(s);
|
||||||
|
|
||||||
reiserfs_write_lock(s);
|
reiserfs_write_lock(s);
|
||||||
if (!(s->s_flags & MS_RDONLY)) {
|
if (!(s->s_flags & MS_RDONLY)) {
|
||||||
|
@ -152,7 +155,13 @@ static int reiserfs_freeze(struct super_block *s)
|
||||||
|
|
||||||
static int reiserfs_unfreeze(struct super_block *s)
|
static int reiserfs_unfreeze(struct super_block *s)
|
||||||
{
|
{
|
||||||
|
struct reiserfs_sb_info *sbi = REISERFS_SB(s);
|
||||||
|
|
||||||
reiserfs_allow_writes(s);
|
reiserfs_allow_writes(s);
|
||||||
|
spin_lock(&sbi->old_work_lock);
|
||||||
|
/* Allow old_work to run again */
|
||||||
|
sbi->work_queued = 0;
|
||||||
|
spin_unlock(&sbi->old_work_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2194,7 +2203,7 @@ error_unlocked:
|
||||||
if (sbi->commit_wq)
|
if (sbi->commit_wq)
|
||||||
destroy_workqueue(sbi->commit_wq);
|
destroy_workqueue(sbi->commit_wq);
|
||||||
|
|
||||||
cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
|
reiserfs_cancel_old_flush(s);
|
||||||
|
|
||||||
reiserfs_free_bitmap_cache(s);
|
reiserfs_free_bitmap_cache(s);
|
||||||
if (SB_BUFFER_WITH_SB(s))
|
if (SB_BUFFER_WITH_SB(s))
|
||||||
|
|
Loading…
Add table
Reference in a new issue