Linux compat 4.16: blk_queue_flag_{set,clear}

The HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY case was overlooked in
the original 10f88c5c commit because blk_queue_write_cache()
was available for the in-kernel builds.

Update the blk_queue_flag_{set,clear} wrappers to call the locked
versions to avoid confusion.  This is safe for all existing callers.

The blk_queue_set_write_cache() function has been updated to use
these wrappers.  This means setting/clearing both QUEUE_FLAG_WC
and QUEUE_FLAG_FUA is no longer atomic but this only done early
in zvol_alloc() prior to any requests so there is no issue.

Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Reviewed-by: Giuseppe Di Natale <dinatale2@llnl.gov>
Reviewed-by: Kash Pande <kash@tripleback.net>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #7428 
Closes #7431
This commit is contained in:
Brian Behlendorf 2018-04-12 19:46:14 -07:00 committed by GitHub
parent 7fab636188
commit d6bb22171b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 6 additions and 8 deletions

View File

@ -41,7 +41,7 @@ typedef unsigned __bitwise__ fmode_t;
static inline void static inline void
blk_queue_flag_set(unsigned int flag, struct request_queue *q) blk_queue_flag_set(unsigned int flag, struct request_queue *q)
{ {
queue_flag_set_unlocked(flag, q); queue_flag_set(flag, q);
} }
#endif #endif
@ -49,7 +49,7 @@ blk_queue_flag_set(unsigned int flag, struct request_queue *q)
static inline void static inline void
blk_queue_flag_clear(unsigned int flag, struct request_queue *q) blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
{ {
queue_flag_clear_unlocked(flag, q); queue_flag_clear(flag, q);
} }
#endif #endif
@ -72,16 +72,14 @@ static inline void
blk_queue_set_write_cache(struct request_queue *q, bool wc, bool fua) blk_queue_set_write_cache(struct request_queue *q, bool wc, bool fua)
{ {
#if defined(HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY) #if defined(HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY)
spin_lock_irq(q->queue_lock);
if (wc) if (wc)
queue_flag_set(QUEUE_FLAG_WC, q); blk_queue_flag_set(QUEUE_FLAG_WC, q);
else else
queue_flag_clear(QUEUE_FLAG_WC, q); blk_queue_flag_clear(QUEUE_FLAG_WC, q);
if (fua) if (fua)
queue_flag_set(QUEUE_FLAG_FUA, q); blk_queue_flag_set(QUEUE_FLAG_FUA, q);
else else
queue_flag_clear(QUEUE_FLAG_FUA, q); blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
spin_unlock_irq(q->queue_lock);
#elif defined(HAVE_BLK_QUEUE_WRITE_CACHE) #elif defined(HAVE_BLK_QUEUE_WRITE_CACHE)
blk_queue_write_cache(q, wc, fua); blk_queue_write_cache(q, wc, fua);
#elif defined(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY) #elif defined(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY)