Callbacks from dm-multisnap.c These functions are called directly from exception-store-neutral code. The find the chunk or perform chunk reallocations. Signed-off-by: Mikulas Patocka --- drivers/md/multisnap/dm-rolling-io.c | 262 +++++++++++++++++++++++++++++++++++ 1 file changed, 262 insertions(+) Index: linux-2.6.39-rc7-fast/drivers/md/multisnap/dm-rolling-io.c =================================================================== --- /dev/null 1970-01-01 00:00:00.000000000 +0000 +++ linux-2.6.39-rc7-fast/drivers/md/multisnap/dm-rolling-io.c 2011-05-10 12:06:01.000000000 +0200 @@ -0,0 +1,262 @@ +/* + * Copyright (C) 2009 Red Hat Czech, s.r.o. + * + * Mikulas Patocka + * + * This file is released under the GPL. + */ + +#include "dm-rolling.h" + +/* + * These flags are just a check to make sure the sequence described in + * dm-multisnap.h is not violated. + */ +#define QA_MAY_ADD_NEXT_REMAP 1 +#define QA_MAY_MAKE_CHUNK_WRITEABLE 2 +/* + * Use the flag DM_MULTISNAP_BT_PREVIOUS_COVERED to mark that all previous + * chunks are covered. + */ +#define QA_PREVIOUS_COVERED 4 + +/* + * This function will check if there is remapping for a given snapid/chunk. + * It returns 1 if remapping exists and is read-only (shared by other snapshots) + * and 2 if it exists and is read-write (not shared by anyone). + */ +int dm_rolling_find_snapshot_chunk(struct dm_exception_store *s, + snapid_t snapid, chunk_t chunk, + int write, chunk_t *result) +{ + int r; + struct bt_key key; + rolling_snapid_t from, to; + rolling_snapid_t find_from, find_to; + __u32 sink_flags; + + from = dm_rolling_find_next_subsnapshot(s, snapid); + to = snapid; + + key.chunk = chunk; + key.snap_from = snapid; + key.snap_to = snapid; + r = dm_rolling_find_in_btree(s, &key, result, &sink_flags); + if (unlikely(r < 0)) + return r; + + if (!r) { + s->query_new_key.chunk = chunk; + s->query_new_key.snap_from = from; + s->query_new_key.snap_to = to; + s->query_active = QA_MAY_ADD_NEXT_REMAP; + return 0; + } + + if (!write) + return 1; + + /* + * We are writing to a snapshot --- check if anything outside + * range exists, if it does, it needs to be copied. + */ + if (key.snap_from < from) { + if (likely(dm_rolling_find_next_snapid_range(s, key.snap_from, + &find_from, + &find_to))) { + if (find_from < from) { + s->query_new_key.chunk = chunk; + s->query_new_key.snap_from = from; + s->query_new_key.snap_to = key.snap_to; + s->query_block_from = key.snap_from; + s->query_block_to = key.snap_to; + s->query_active = QA_MAY_MAKE_CHUNK_WRITEABLE; + return 1; + } + if (unlikely(find_from > from)) + BUG(); /* SNAPID not in our tree */ + } else + BUG(); /* we're asking for a SNAPID not in our tree */ + } + if (key.snap_to > to) { + if (likely(dm_rolling_find_next_snapid_range(s, to + 1, + &find_from, &find_to))) { + if (find_from <= key.snap_to) { + s->query_new_key.chunk = chunk; + s->query_new_key.snap_from = key.snap_from; + s->query_new_key.snap_to = to; + s->query_block_from = key.snap_from; + s->query_block_to = key.snap_to; + s->query_active = QA_MAY_MAKE_CHUNK_WRITEABLE; + return 1; + } + } + } + return 2; +} + +/* + * Reset the query/remap state machine. + */ +void dm_rolling_start_origin_query(struct dm_exception_store *s, chunk_t chunk) +{ + int r; + struct bt_key key; + chunk_t sink; + __u32 flags; + + s->query_active = 0; + s->query_snapid = 0; + s->query_chunk = chunk; + + /* + * Here comes an optimization to avoid O(number_of_snapshots) complexity + * + * We ask for the last remapped entry for the requested chunk, if it + * has DM_MULTISNAP_BT_PREVIOUS_COVERED set, we assume that all previous + * snapshots are remapped and we don't have to check them. + */ + + key.chunk = chunk; + key.snap_from = key.snap_to = DM_SNAPID_T_LAST; + r = dm_rolling_find_previous_in_btree(s, &key); + if (unlikely(r <= 0)) + return; + if (key.chunk != chunk) + return; + r = dm_rolling_find_in_btree(s, &key, &sink, &flags); + if (unlikely(r <= 0)) { + if (!r) { + DM_MULTISNAP_SET_ERROR(s->dm, -EFSERROR, + ("%s: key (%llx, %llx-%llx) was returned as a previous key, but was not found in the btree", + __func__, + (unsigned long long)key.chunk, + (unsigned long long)key.snap_from, + (unsigned long long)key.snap_to)); + } + return; + } + if (likely(flags & DM_MULTISNAP_BT_PREVIOUS_COVERED)) + s->query_snapid = key.snap_to + 1; +} + +/* + * Find the next snapid range to remap. + */ +int dm_rolling_query_next_remap(struct dm_exception_store *s) +{ + int r; + chunk_t sink; + __u32 sink_flags; + rolling_snapid_t from, to; + + s->query_active = 0; + + while (dm_rolling_find_next_snapid_range(s, s->query_snapid, &from, &to)) { + struct bt_key key; +next_btree_search: + if (dm_multisnap_has_error(s->dm)) + return -1; + key.chunk = s->query_chunk; + key.snap_from = from; + key.snap_to = to; + r = dm_rolling_find_in_btree(s, &key, &sink, &sink_flags); + if (unlikely(r < 0)) + return -1; + + if (!r) { + s->query_new_key.chunk = s->query_chunk; + s->query_new_key.snap_from = from; + s->query_new_key.snap_to = to; + s->query_active = QA_MAY_ADD_NEXT_REMAP | QA_PREVIOUS_COVERED; + return 1; + } + + if (key.snap_from > from) { + s->query_new_key.chunk = s->query_chunk; + s->query_new_key.snap_from = from; + s->query_new_key.snap_to = key.snap_from - 1; + s->query_active = QA_MAY_ADD_NEXT_REMAP | QA_PREVIOUS_COVERED; + return 1; + } + + if (key.snap_to < to) { + from = key.snap_to + 1; + goto next_btree_search; + } + + s->query_snapid = to + 1; + } + + return 0; +} + +/* + * Perform the remap on the range returned by dm_rolling_query_next_remap. + */ +void dm_rolling_add_next_remap(struct dm_exception_store *s, + union chunk_descriptor *cd, chunk_t *new_chunk) +{ + int r; + __u32 bt_flags = s->query_active & QA_PREVIOUS_COVERED ? DM_MULTISNAP_BT_PREVIOUS_COVERED : 0; + + BUG_ON(!(s->query_active & QA_MAY_ADD_NEXT_REMAP)); + s->query_active = 0; + + cd->range.from = s->query_new_key.snap_from; + cd->range.to = s->query_new_key.snap_to; + + r = dm_rolling_alloc_blocks(s, new_chunk, 1, 0); + if (unlikely(r < 0)) + return; + + dm_multisnap_status_lock(s->dm); + s->data_allocated++; + dm_multisnap_status_unlock(s->dm); + + dm_rolling_add_to_btree(s, &s->query_new_key, *new_chunk, bt_flags); + dm_rolling_transition_mark(s); +} + +/* + * Make the chunk writeable (i.e. unshare multiple snapshots). + */ +void dm_rolling_make_chunk_writeable(struct dm_exception_store *s, + union chunk_descriptor *cd, chunk_t *new_chunk) +{ + int r; + __u32 bt_flags = s->query_active & QA_PREVIOUS_COVERED ? DM_MULTISNAP_BT_PREVIOUS_COVERED : 0; + + BUG_ON(!(s->query_active & QA_MAY_MAKE_CHUNK_WRITEABLE)); + s->query_active = 0; + + cd->range.from = s->query_block_from; + cd->range.to = s->query_block_to; + + r = dm_rolling_alloc_blocks(s, new_chunk, 1, 0); + if (unlikely(r < 0)) + return; + + dm_multisnap_status_lock(s->dm); + s->data_allocated++; + dm_multisnap_status_unlock(s->dm); + + dm_rolling_restrict_btree_entry(s, &s->query_new_key); + dm_rolling_transition_mark(s); + + if (unlikely(dm_multisnap_has_error(s->dm))) + return; + + dm_rolling_add_to_btree(s, &s->query_new_key, *new_chunk, bt_flags); + dm_rolling_transition_mark(s); +} + +/* + * Check if the snapshot belongs to the remap range specified by "cd". + */ +int dm_rolling_check_conflict(struct dm_exception_store *s, + union chunk_descriptor *cd, snapid_t snapid) +{ + return snapid >= cd->range.from && snapid <= cd->range.to; +} +