From e55d3b03643f92500610e8e6066b59304e09b93b Mon Sep 17 00:00:00 2001 From: Rich Ercolani Date: Fri, 15 Mar 2024 06:54:40 -0400 Subject: [PATCH] An experiment Signed-off-by: Rich Ercolani --- module/zfs/zfs_vnops.c | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c index 7f39ad6fc775..275b35994e90 100644 --- a/module/zfs/zfs_vnops.c +++ b/module/zfs/zfs_vnops.c @@ -1068,6 +1068,7 @@ zfs_clone_range(znode_t *inzp, uint64_t *inoffp, znode_t *outzp, uint_t inblksz; uint64_t clear_setid_bits_txg = 0; uint64_t last_synced_txg = 0; + int busy = 0; inoff = *inoffp; outoff = *outoffp; @@ -1187,6 +1188,12 @@ zfs_clone_range(znode_t *inzp, uint64_t *inoffp, znode_t *outzp, } } + + if (zn_has_cached_data(inzp, inoff, inoff + size - 1)) { +redo: + zn_flush_cached_data(inzp, B_TRUE); + } + /* * Maintain predictable lock order. */ @@ -1202,6 +1209,35 @@ zfs_clone_range(znode_t *inzp, uint64_t *inoffp, znode_t *outzp, RL_READER); } + if (zn_has_cached_data(inzp, inoff, inoff + size - 1)) { + if (busy == 0) { + /* + * We somehow dirtied between flushing and taking the lock, try again + */ + busy = 1; + /* + * If we have wait_dirty set, we might as well try a forced flush + * here too. + */ + if (zfs_bclone_wait_dirty) { + last_synced_txg = spa_last_synced_txg(dmu_objset_spa(inos)); + txg_wait_synced(dmu_objset_pool(inos), + last_synced_txg + 1); + } + + zfs_rangelock_exit(outlr); + zfs_rangelock_exit(inlr); + goto redo; + } + else { + /* + * We tried once, it's churning too much, give up. + */ + error = EAGAIN; + goto unlock; + } + } + inblksz = inzp->z_blksz; /* @@ -1306,6 +1342,8 @@ zfs_clone_range(znode_t *inzp, uint64_t *inoffp, znode_t *outzp, } nbps = maxblocks; + + last_synced_txg = spa_last_synced_txg(dmu_objset_spa(inos)); error = dmu_read_l0_bps(inos, inzp->z_id, inoff, size, bps, &nbps);