From: Vitaly Fertman Date: Fri, 13 Aug 2010 08:55:00 +0000 (+0400) Subject: b=23221 merge lock descripters if overlapped X-Git-Tag: 2.0.50~1 X-Git-Url: https://git.whamcloud.com/?a=commitdiff_plain;h=1d33a9a46cbf7e401343fd12dba916d0849bf8a1;p=fs%2Flustre-release.git b=23221 merge lock descripters if overlapped i=green i=ericm lock descriptors prepared by vvp_mmap_locks() and ccc_io_one_lock_index() may overlap and must be merged --- diff --git a/lustre/llite/vvp_io.c b/lustre/llite/vvp_io.c index 4d6e5b3..2034759 100644 --- a/lustre/llite/vvp_io.c +++ b/lustre/llite/vvp_io.c @@ -180,6 +180,11 @@ static int vvp_mmap_locks(const struct lu_env *env, policy.l_extent.end); descr->cld_enq_flags = flags; result = cl_io_lock_alloc_add(env, io, descr); + + CDEBUG(D_VFSTRACE, "lock: %i: [%lu, %lu]\n", + descr->cld_mode, descr->cld_start, + descr->cld_end); + if (result < 0) RETURN(result); diff --git a/lustre/obdclass/cl_io.c b/lustre/obdclass/cl_io.c index 64ef712..db2e6e6 100644 --- a/lustre/obdclass/cl_io.c +++ b/lustre/obdclass/cl_io.c @@ -233,13 +233,41 @@ cl_lock_descr_fid(const struct cl_lock_descr *descr) return lu_object_fid(&descr->cld_obj->co_lu); } -static int cl_lock_descr_cmp(const struct cl_lock_descr *d0, - const struct cl_lock_descr *d1) +static int cl_lock_descr_sort(const struct cl_lock_descr *d0, + const struct cl_lock_descr *d1) { return lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1)) ?: __diff_normalize(d0->cld_start, d1->cld_start); } +static int cl_lock_descr_cmp(const struct cl_lock_descr *d0, + const struct cl_lock_descr *d1) +{ + int ret; + + ret = lu_fid_cmp(cl_lock_descr_fid(d0), cl_lock_descr_fid(d1)); + if (ret) + return ret; + if (d0->cld_end < d1->cld_start) + return -1; + if (d0->cld_start > d0->cld_end) + return 1; + return 0; +} + +static void cl_lock_descr_merge(struct cl_lock_descr *d0, + const struct cl_lock_descr *d1) +{ + d0->cld_start = min(d0->cld_start, d1->cld_start); + d0->cld_end = max(d0->cld_end, d1->cld_end); + + if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE) + d0->cld_mode = CLM_WRITE; + + if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP) + d0->cld_mode = CLM_GROUP; +} + /* * Sort locks in lexicographical order of their (fid, start-offset) pairs. */ @@ -261,7 +289,7 @@ static void cl_io_locks_sort(struct cl_io *io) &io->ci_lockset.cls_todo, cill_linkage) { if (prev != NULL) { - switch (cl_lock_descr_cmp(&prev->cill_descr, + switch (cl_lock_descr_sort(&prev->cill_descr, &curr->cill_descr)) { case 0: /* @@ -303,16 +331,41 @@ int cl_queue_match(const cfs_list_t *queue, if (cl_lock_descr_match(&scan->cill_descr, need)) RETURN(+1); } - return 0; + RETURN(0); } EXPORT_SYMBOL(cl_queue_match); +static int cl_queue_merge(const cfs_list_t *queue, + const struct cl_lock_descr *need) +{ + struct cl_io_lock_link *scan; + + ENTRY; + cfs_list_for_each_entry(scan, queue, cill_linkage) { + if (cl_lock_descr_cmp(&scan->cill_descr, need)) + continue; + cl_lock_descr_merge(&scan->cill_descr, need); + CDEBUG(D_VFSTRACE, "lock: %i: [%lu, %lu]\n", + scan->cill_descr.cld_mode, scan->cill_descr.cld_start, + scan->cill_descr.cld_end); + RETURN(+1); + } + RETURN(0); + +} + static int cl_lockset_match(const struct cl_lockset *set, - const struct cl_lock_descr *need, int all_queues) + const struct cl_lock_descr *need) +{ + return cl_queue_match(&set->cls_curr, need) || + cl_queue_match(&set->cls_done, need); +} + +static int cl_lockset_merge(const struct cl_lockset *set, + const struct cl_lock_descr *need) { - return (all_queues ? cl_queue_match(&set->cls_todo, need) : 0) || - cl_queue_match(&set->cls_curr, need) || - cl_queue_match(&set->cls_done, need); + return cl_queue_merge(&set->cls_todo, need) || + cl_lockset_match(set, need); } static int cl_lockset_lock_one(const struct lu_env *env, @@ -367,7 +420,7 @@ static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io, ENTRY; result = 0; cfs_list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) { - if (!cl_lockset_match(set, &link->cill_descr, 0)) { + if (!cl_lockset_match(set, &link->cill_descr)) { /* XXX some locking to guarantee that locks aren't * expanded in between. */ result = cl_lockset_lock_one(env, io, set, link); @@ -555,7 +608,7 @@ int cl_io_lock_add(const struct lu_env *env, struct cl_io *io, int result; ENTRY; - if (cl_lockset_match(&io->ci_lockset, &link->cill_descr, 1)) + if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr)) result = +1; else { cfs_list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);