Background delete operation. Scan the b+tree on background and find entries that have unused snapshot IDs. Delete these entries and the associated chunks. Signed-off-by: Mikulas Patocka --- drivers/md/dm-multisnap-delete.c | 137 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 137 insertions(+) Index: linux-2.6.34-rc1-devel/drivers/md/dm-multisnap-delete.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ linux-2.6.34-rc1-devel/drivers/md/dm-multisnap-delete.c 2010-03-17 15:31:36.000000000 +0100 @@ -0,0 +1,137 @@ +/* + * Copyright (C) 2009 Red Hat Czech, s.r.o. + * + * Mikulas Patocka + * + * This file is released under the GPL. + */ + +#include "dm-multisnap-mikulas.h" + +/* + * Commit after this number of deleted entries. + * Too big number causes spurious overflows on nearly-full device. + * Too small number degrades delete performance. + */ +#define COMMIT_AFTER 128 + +struct list_cookie { + struct bt_key key; + chunk_t new_chunk; +}; + +#define RET_END 1 +#define RET_DO_FREE 2 +#define RET_RESCHEDULE 3 + +static int list_callback(struct dm_exception_store *s, + struct dm_multisnap_bt_node *node, + struct dm_multisnap_bt_entry *bt, void *cookie) +{ + struct list_cookie *lc = cookie; + mikulas_snapid_t found_from, found_to; + + lc->key.chunk = read_48(bt, orig_chunk); + lc->key.snap_from = mikulas_snapid_to_cpu(bt->snap_from); + lc->key.snap_to = mikulas_snapid_to_cpu(bt->snap_to); + + if (unlikely(lc->key.chunk > DM_CHUNK_T_MAX)) + return RET_END; + + s->delete_rover_chunk = lc->key.chunk; + s->delete_rover_snapid = lc->key.snap_to + 1; + if (unlikely(!s->delete_rover_snapid)) + s->delete_rover_chunk++; + + if (!dm_multisnap_find_next_snapid_range(s, lc->key.snap_from, + &found_from, &found_to) || + found_from > lc->key.snap_to) { + /* + * This range maps unused snapshots, delete it. + * But we can't do it now, so submit it to the caller; + */ + lc->new_chunk = read_48(bt, new_chunk); + return RET_DO_FREE; + } + + /* + * If we are at a last entry in the btree node, drop the lock and + * allow other requests to be processed. + * + * This avoids a starvation when there are no nodes to delete. + */ + if (bt == &node->entries[le32_to_cpu(node->n_entries) - 1]) + return RET_RESCHEDULE; + + return 0; +} + +static void delete_step(struct dm_exception_store *s) +{ + struct bt_key key; + int r; + struct list_cookie lc; + + key.chunk = s->delete_rover_chunk; + key.snap_from = s->delete_rover_snapid; + key.snap_to = s->delete_rover_snapid; + + r = dm_multisnap_list_btree(s, &key, list_callback, &lc); + + if (unlikely(r < 0)) + return; + + switch (r) { + + case RET_END: + s->flags &= ~DM_MULTISNAP_FLAG_DELETING; + + /* If we finished the job and there is no pending I/O, commit */ + if (dm_multisnap_can_commit(s->dm)) + dm_multisnap_call_commit(s->dm); + + return; + case RET_DO_FREE: + if (unlikely(dm_multisnap_has_error(s->dm))) + return; + + dm_multisnap_delete_from_btree(s, &lc.key); + + dm_multisnap_transition_mark(s); + + dm_multisnap_free_block(s, lc.new_chunk, FREELIST_DATA_FLAG); + + /* fall through */ + case RET_RESCHEDULE: + if (dm_multisnap_can_commit(s->dm)) { + if (++s->delete_commit_count >= COMMIT_AFTER) { + s->delete_commit_count = 0; + dm_multisnap_call_commit(s->dm); + } + } + return; + default: + printk(KERN_CRIT "delete_step: invalid return value %d", r); + BUG(); + + } +} + +void dm_multisnap_background_delete(struct dm_exception_store *s, + struct dm_multisnap_background_work *bw) +{ + if (unlikely(dm_multisnap_has_error(s->dm))) + return; + + if (s->flags & DM_MULTISNAP_FLAG_DELETING) { + delete_step(s); + } else if (s->flags & DM_MULTISNAP_FLAG_PENDING_DELETE) { + s->flags &= ~DM_MULTISNAP_FLAG_PENDING_DELETE; + s->flags |= DM_MULTISNAP_FLAG_DELETING; + s->delete_rover_chunk = 0; + s->delete_rover_snapid = 0; + } else + return; + + dm_multisnap_queue_work(s->dm, &s->delete_work); +}