Use KM_PUSHPAGE in l2arc_write_buffers
There is potential for deadlock in the l2arc_feed thread if KM_PUSHPAGE is not used for the allocations made in l2arc_write_buffers. Specifically, if KM_PUSHPAGE is not used for these allocations, it is possible for reclaim to be triggered which can cause the l2arc_feed thread to deadlock itself on the ARC_mru mutex. An example of this is demonstrated in the following backtrace of the l2arc_feed thread: crash> bt 4123 PID: 4123 TASK: ffff88062f8c1500 CPU: 6 COMMAND: "l2arc_feed" 0 [ffff88062511d610] schedule at ffffffff814eeee0 1 [ffff88062511d6d8] __mutex_lock_slowpath at ffffffff814f057e 2 [ffff88062511d748] mutex_lock at ffffffff814f041b 3 [ffff88062511d768] arc_evict at ffffffffa05130ca [zfs] 4 [ffff88062511d858] arc_adjust at ffffffffa05139a9 [zfs] 5 [ffff88062511d878] arc_shrink at ffffffffa0513a95 [zfs] 6 [ffff88062511d898] arc_kmem_reap_now at ffffffffa0513be8 [zfs] 7 [ffff88062511d8c8] arc_shrinker_func at ffffffffa0513ccc [zfs] 8 [ffff88062511d8f8] shrink_slab at ffffffff8112a17a 9 [ffff88062511d958] do_try_to_free_pages at ffffffff8112bfdf 10 [ffff88062511d9e8] try_to_free_pages at ffffffff8112c3ed 11 [ffff88062511da98] __alloc_pages_nodemask at ffffffff8112431d 12 [ffff88062511dbb8] kmem_getpages at ffffffff8115e632 13 [ffff88062511dbe8] fallback_alloc at ffffffff8115f24a 14 [ffff88062511dc68] ____cache_alloc_node at ffffffff8115efc9 15 [ffff88062511dcc8] __kmalloc at ffffffff8115fbf9 16 [ffff88062511dd18] kmem_alloc_debug at ffffffffa047b8cb [spl] 17 [ffff88062511dda8] l2arc_feed_thread at ffffffffa0511e71 [zfs] 18 [ffff88062511dea8] thread_generic_wrapper at ffffffffa047d1a1 [spl] 19 [ffff88062511dee8] kthread at ffffffff81090a86 20 [ffff88062511df48] kernel_thread at ffffffff8100c14a Signed-off-by: Prakash Surya <surya1@llnl.gov> Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
This commit is contained in:
parent
cf81b00a73
commit
409dc1a570
|
@ -1017,7 +1017,8 @@ arc_cksum_compute(arc_buf_t *buf, boolean_t force)
|
||||||
mutex_exit(&buf->b_hdr->b_freeze_lock);
|
mutex_exit(&buf->b_hdr->b_freeze_lock);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
|
buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t),
|
||||||
|
KM_PUSHPAGE);
|
||||||
fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
|
fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
|
||||||
buf->b_hdr->b_freeze_cksum);
|
buf->b_hdr->b_freeze_cksum);
|
||||||
mutex_exit(&buf->b_hdr->b_freeze_lock);
|
mutex_exit(&buf->b_hdr->b_freeze_lock);
|
||||||
|
@ -4623,8 +4624,8 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
|
||||||
*/
|
*/
|
||||||
list_insert_head(dev->l2ad_buflist, head);
|
list_insert_head(dev->l2ad_buflist, head);
|
||||||
|
|
||||||
cb = kmem_alloc(
|
cb = kmem_alloc(sizeof (l2arc_write_callback_t),
|
||||||
sizeof (l2arc_write_callback_t), KM_SLEEP);
|
KM_PUSHPAGE);
|
||||||
cb->l2wcb_dev = dev;
|
cb->l2wcb_dev = dev;
|
||||||
cb->l2wcb_head = head;
|
cb->l2wcb_head = head;
|
||||||
pio = zio_root(spa, l2arc_write_done, cb,
|
pio = zio_root(spa, l2arc_write_done, cb,
|
||||||
|
@ -4634,7 +4635,8 @@ l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
|
||||||
/*
|
/*
|
||||||
* Create and add a new L2ARC header.
|
* Create and add a new L2ARC header.
|
||||||
*/
|
*/
|
||||||
hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
|
hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t),
|
||||||
|
KM_PUSHPAGE);
|
||||||
hdrl2->b_dev = dev;
|
hdrl2->b_dev = dev;
|
||||||
hdrl2->b_daddr = dev->l2ad_hand;
|
hdrl2->b_daddr = dev->l2ad_hand;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue