Add TRIM support for L2ARC.
This adds TRIM support to cache vdevs. When ARC buffers are removed
from the L2ARC in arc_hdr_destroy(), arc_release() or l2arc_evict(),
the size previously occupied by the buffer gets scheduled for TRIMming.
As always, actual TRIMs are only issued to the L2ARC after
txg_trim_limit.
Reviewed by: pjd (mentor)
Approved by: pjd (mentor)
Obtained from: 31aae37399
MFC after: 2 weeks
This commit is contained in:
parent
05f49d92ef
commit
e05aad2d33
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=248572
@ -130,6 +130,7 @@
|
||||
#endif
|
||||
#include <sys/callb.h>
|
||||
#include <sys/kstat.h>
|
||||
#include <sys/trim_map.h>
|
||||
#include <zfs_fletcher.h>
|
||||
#include <sys/sdt.h>
|
||||
|
||||
@ -1691,6 +1692,8 @@ arc_hdr_destroy(arc_buf_hdr_t *hdr)
|
||||
}
|
||||
|
||||
if (l2hdr != NULL) {
|
||||
trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr,
|
||||
hdr->b_size);
|
||||
list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
|
||||
ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
|
||||
kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
|
||||
@ -3528,6 +3531,8 @@ arc_release(arc_buf_t *buf, void *tag)
|
||||
buf->b_private = NULL;
|
||||
|
||||
if (l2hdr) {
|
||||
trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr,
|
||||
hdr->b_size);
|
||||
list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
|
||||
kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
|
||||
ARCSTAT_INCR(arcstat_l2_size, -buf_size);
|
||||
@ -4442,6 +4447,8 @@ l2arc_write_done(zio_t *zio)
|
||||
list_remove(buflist, ab);
|
||||
abl2 = ab->b_l2hdr;
|
||||
ab->b_l2hdr = NULL;
|
||||
trim_map_free(abl2->b_dev->l2ad_vdev, abl2->b_daddr,
|
||||
ab->b_size);
|
||||
kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
|
||||
ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ extern "C" {
|
||||
|
||||
extern void trim_map_create(vdev_t *vd);
|
||||
extern void trim_map_destroy(vdev_t *vd);
|
||||
extern void trim_map_free(zio_t *zio);
|
||||
extern void trim_map_free(vdev_t *vd, uint64_t offset, uint64_t size);
|
||||
extern boolean_t trim_map_write_start(zio_t *zio);
|
||||
extern void trim_map_write_done(zio_t *zio);
|
||||
|
||||
|
@ -36,8 +36,8 @@
|
||||
* than it would otherwise be as well as ensuring that entire
|
||||
* blocks are invalidated by writes.
|
||||
*/
|
||||
#define TRIM_ZIO_END(zio) ((zio)->io_offset + \
|
||||
P2ROUNDUP((zio)->io_size, 1ULL << (zio)->io_vd->vdev_top->vdev_ashift))
|
||||
#define TRIM_ZIO_END(vd, offset, size) (offset + \
|
||||
P2ROUNDUP(size, 1ULL << vd->vdev_top->vdev_ashift))
|
||||
|
||||
typedef struct trim_map {
|
||||
list_t tm_head; /* List of segments sorted by txg. */
|
||||
@ -272,16 +272,15 @@ trim_map_free_locked(trim_map_t *tm, uint64_t start, uint64_t end, uint64_t txg)
|
||||
}
|
||||
|
||||
void
|
||||
trim_map_free(zio_t *zio)
|
||||
trim_map_free(vdev_t *vd, uint64_t offset, uint64_t size)
|
||||
{
|
||||
vdev_t *vd = zio->io_vd;
|
||||
trim_map_t *tm = vd->vdev_trimmap;
|
||||
|
||||
if (zfs_notrim || vd->vdev_notrim || tm == NULL)
|
||||
return;
|
||||
|
||||
mutex_enter(&tm->tm_lock);
|
||||
trim_map_free_locked(tm, zio->io_offset, TRIM_ZIO_END(zio),
|
||||
trim_map_free_locked(tm, offset, TRIM_ZIO_END(vd, offset, size),
|
||||
vd->vdev_spa->spa_syncing_txg);
|
||||
mutex_exit(&tm->tm_lock);
|
||||
}
|
||||
@ -299,7 +298,7 @@ trim_map_write_start(zio_t *zio)
|
||||
return (B_TRUE);
|
||||
|
||||
start = zio->io_offset;
|
||||
end = TRIM_ZIO_END(zio);
|
||||
end = TRIM_ZIO_END(zio->io_vd, start, zio->io_size);
|
||||
tsearch.ts_start = start;
|
||||
tsearch.ts_end = end;
|
||||
|
||||
|
@ -2475,7 +2475,7 @@ zio_vdev_io_start(zio_t *zio)
|
||||
}
|
||||
|
||||
if (vd->vdev_ops->vdev_op_leaf && zio->io_type == ZIO_TYPE_FREE) {
|
||||
trim_map_free(zio);
|
||||
trim_map_free(vd, zio->io_offset, zio->io_size);
|
||||
return (ZIO_PIPELINE_CONTINUE);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user