Restore ARC MFU/MRU pressure
Before r305323 (MFV r302991: 6950 ARC should cache compressed data) arc_read() code did this for access to a ghost buffer: arc_adapt() (from arc_get_data_buf()) arc_access(hdr, hash_lock) I.e., we first checked access to the MFU ghost/MRU ghost buffer and adapt MFU/MRU sizes (in arc_adapt()) and next move buffer from the ghost state to regular. After r305323 the sequence is different: arc_access(hdr, hash_lock); arc_hdr_alloc_pabd(hdr); I.e., we first move the buffer from the ghost state in arc_access() and then we check access to buffer in ghost state (in arc_hdr_alloc_pabd() -> arc_get_data_abd() -> arc_get_data_impl() -> arc_adapt()). This is incorrect: arc_adapt() never see access to the ghost buffer because arc_access() already migrated the buffer from the ghost state to regular. So, the fix is to restore a call to arc_adapt() before arc_access() and to suppress the call to arc_adapt() after arc_access(). Submitted by: Slawa Olhovchenkov <slw@zxy.spb.ru> MFC after: 2 weeks Sponsored by: Integros [integros.com] Differential Revision: https://reviews.freebsd.org/D19094
This commit is contained in:
parent
27c3a85d07
commit
d8b12a2162
@ -1560,14 +1560,14 @@ static kmutex_t l2arc_feed_thr_lock;
|
||||
static kcondvar_t l2arc_feed_thr_cv;
|
||||
static uint8_t l2arc_thread_exit;
|
||||
|
||||
static abd_t *arc_get_data_abd(arc_buf_hdr_t *, uint64_t, void *);
|
||||
static abd_t *arc_get_data_abd(arc_buf_hdr_t *, uint64_t, void *, boolean_t);
|
||||
static void *arc_get_data_buf(arc_buf_hdr_t *, uint64_t, void *);
|
||||
static void arc_get_data_impl(arc_buf_hdr_t *, uint64_t, void *);
|
||||
static void arc_get_data_impl(arc_buf_hdr_t *, uint64_t, void *, boolean_t);
|
||||
static void arc_free_data_abd(arc_buf_hdr_t *, abd_t *, uint64_t, void *);
|
||||
static void arc_free_data_buf(arc_buf_hdr_t *, void *, uint64_t, void *);
|
||||
static void arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag);
|
||||
static void arc_hdr_free_pabd(arc_buf_hdr_t *);
|
||||
static void arc_hdr_alloc_pabd(arc_buf_hdr_t *);
|
||||
static void arc_hdr_alloc_pabd(arc_buf_hdr_t *, boolean_t);
|
||||
static void arc_access(arc_buf_hdr_t *, kmutex_t *);
|
||||
static boolean_t arc_is_overflowing();
|
||||
static void arc_buf_watch(arc_buf_t *);
|
||||
@ -3353,14 +3353,14 @@ arc_buf_destroy_impl(arc_buf_t *buf)
|
||||
}
|
||||
|
||||
static void
|
||||
arc_hdr_alloc_pabd(arc_buf_hdr_t *hdr)
|
||||
arc_hdr_alloc_pabd(arc_buf_hdr_t *hdr, boolean_t do_adapt)
|
||||
{
|
||||
ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
|
||||
ASSERT(HDR_HAS_L1HDR(hdr));
|
||||
ASSERT(!HDR_SHARED_DATA(hdr));
|
||||
|
||||
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
|
||||
hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr);
|
||||
hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr, do_adapt);
|
||||
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
|
||||
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
|
||||
|
||||
@ -3424,7 +3424,7 @@ arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
|
||||
* the compressed or uncompressed data depending on the block
|
||||
* it references and compressed arc enablement.
|
||||
*/
|
||||
arc_hdr_alloc_pabd(hdr);
|
||||
arc_hdr_alloc_pabd(hdr, B_TRUE);
|
||||
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
|
||||
|
||||
return (hdr);
|
||||
@ -5145,11 +5145,11 @@ arc_is_overflowing(void)
|
||||
}
|
||||
|
||||
static abd_t *
|
||||
arc_get_data_abd(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
|
||||
arc_get_data_abd(arc_buf_hdr_t *hdr, uint64_t size, void *tag, boolean_t do_adapt)
|
||||
{
|
||||
arc_buf_contents_t type = arc_buf_type(hdr);
|
||||
|
||||
arc_get_data_impl(hdr, size, tag);
|
||||
arc_get_data_impl(hdr, size, tag, do_adapt);
|
||||
if (type == ARC_BUFC_METADATA) {
|
||||
return (abd_alloc(size, B_TRUE));
|
||||
} else {
|
||||
@ -5163,7 +5163,7 @@ arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
|
||||
{
|
||||
arc_buf_contents_t type = arc_buf_type(hdr);
|
||||
|
||||
arc_get_data_impl(hdr, size, tag);
|
||||
arc_get_data_impl(hdr, size, tag, B_TRUE);
|
||||
if (type == ARC_BUFC_METADATA) {
|
||||
return (zio_buf_alloc(size));
|
||||
} else {
|
||||
@ -5179,12 +5179,13 @@ arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
|
||||
* limit, we'll only signal the reclaim thread and continue on.
|
||||
*/
|
||||
static void
|
||||
arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
|
||||
arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag, boolean_t do_adapt)
|
||||
{
|
||||
arc_state_t *state = hdr->b_l1hdr.b_state;
|
||||
arc_buf_contents_t type = arc_buf_type(hdr);
|
||||
|
||||
arc_adapt(size, state);
|
||||
if (do_adapt)
|
||||
arc_adapt(size, state);
|
||||
|
||||
/*
|
||||
* If arc_size is currently overflowing, and has grown past our
|
||||
@ -5935,8 +5936,9 @@ top:
|
||||
* do this after we've called arc_access() to
|
||||
* avoid hitting an assert in remove_reference().
|
||||
*/
|
||||
arc_adapt(arc_hdr_size(hdr), hdr->b_l1hdr.b_state);
|
||||
arc_access(hdr, hash_lock);
|
||||
arc_hdr_alloc_pabd(hdr);
|
||||
arc_hdr_alloc_pabd(hdr, B_FALSE);
|
||||
}
|
||||
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
|
||||
size = arc_hdr_size(hdr);
|
||||
@ -6350,7 +6352,7 @@ arc_release(arc_buf_t *buf, void *tag)
|
||||
if (arc_can_share(hdr, lastbuf)) {
|
||||
arc_share_buf(hdr, lastbuf);
|
||||
} else {
|
||||
arc_hdr_alloc_pabd(hdr);
|
||||
arc_hdr_alloc_pabd(hdr, B_TRUE);
|
||||
abd_copy_from_buf(hdr->b_l1hdr.b_pabd,
|
||||
buf->b_data, psize);
|
||||
}
|
||||
@ -6513,7 +6515,7 @@ arc_write_ready(zio_t *zio)
|
||||
* the data into it; otherwise, we share the data directly if we can.
|
||||
*/
|
||||
if (zfs_abd_scatter_enabled || !arc_can_share(hdr, buf)) {
|
||||
arc_hdr_alloc_pabd(hdr);
|
||||
arc_hdr_alloc_pabd(hdr, B_TRUE);
|
||||
|
||||
/*
|
||||
* Ideally, we would always copy the io_abd into b_pabd, but the
|
||||
|
Loading…
x
Reference in New Issue
Block a user