X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Fldlm%2Fldlm_extent.c;h=8240eab288633b0eceaf678db99741e569c02aba;hb=55e8287b7024ee4ac280806caea9636dd454a0ed;hp=246ed17cd33763dc5514e2cb786b75445e19238c;hpb=4155844f40cb2ba58d794c1091af090d80eca102;p=fs%2Flustre-release.git diff --git a/lustre/ldlm/ldlm_extent.c b/lustre/ldlm/ldlm_extent.c index 246ed17c..8240eab 100644 --- a/lustre/ldlm/ldlm_extent.c +++ b/lustre/ldlm/ldlm_extent.c @@ -36,100 +36,328 @@ * - the maximum extent * - containing the requested extent * - and not overlapping existing conflicting extents outside the requested one - * - * An alternative policy is to not shrink the new extent when conflicts exist */ + */ static void ldlm_extent_internal_policy(struct list_head *queue, struct ldlm_lock *req, struct ldlm_extent *new_ex) { struct list_head *tmp; ldlm_mode_t req_mode = req->l_req_mode; - __u64 req_start = req->l_policy_data.l_extent.start; - __u64 req_end = req->l_policy_data.l_extent.end; + __u64 req_start = req->l_req_extent.start; + __u64 req_end = req->l_req_extent.end; + int conflicting = 0; ENTRY; - if (new_ex->start == req_start && new_ex->end == req_end) { - EXIT; - return; - } + lockmode_verify(req_mode); list_for_each(tmp, queue) { struct ldlm_lock *lock; + struct ldlm_extent *l_extent; + lock = list_entry(tmp, struct ldlm_lock, l_res_link); + l_extent = &lock->l_policy_data.l_extent; - if (req == lock) { + if (new_ex->start == req_start && new_ex->end == req_end) { EXIT; return; } - /* if lock doesn't overlap new_ex, skip it. */ - if (lock->l_policy_data.l_extent.end < new_ex->start || - lock->l_policy_data.l_extent.start > new_ex->end) + /* Don't conflict with ourselves */ + if (req == lock) continue; /* Locks are compatible, overlap doesn't matter */ - if (lockmode_compat(lock->l_req_mode, req_mode)) + /* Until bug 20 is fixed, try to avoid granting overlapping + * locks on one client (they take a long time to cancel) */ + if (lockmode_compat(lock->l_req_mode, req_mode) && + lock->l_export != req->l_export) + continue; + + /* If this is a high-traffic lock, don't grow downwards at all + * or grow upwards too much */ + ++conflicting; + if (conflicting > 4) + new_ex->start = req_start; + + /* If lock doesn't overlap new_ex, skip it. */ + if (l_extent->end < new_ex->start || + l_extent->start > new_ex->end) + continue; + + /* Locks conflicting in requested extents and we can't satisfy + * both locks, so ignore it. Either we will ping-pong this + * extent (we would regardless of what extent we granted) or + * lock is unused and it shouldn't limit our extent growth. */ + if (lock->l_req_extent.end >= req_start && + lock->l_req_extent.start <= req_end) continue; - if (lock->l_policy_data.l_extent.start < req_start) { - if (lock->l_policy_data.l_extent.end == ~0) { + /* We grow extents downwards only as far as they don't overlap + * with already-granted locks, on the assumtion that clients + * will be writing beyond the initial requested end and would + * then need to enqueue a new lock beyond previous request. + * l_req_extent->end strictly < req_start, checked above. */ + if (l_extent->start < req_start && new_ex->start != req_start) { + if (l_extent->end >= req_start) new_ex->start = req_start; - new_ex->end = req_end; - EXIT; - return; - } - new_ex->start = MIN(lock->l_policy_data.l_extent.end+1, - req_start); + else + new_ex->start = min(l_extent->end+1, req_start); } - if (lock->l_policy_data.l_extent.end > req_end) { - if (lock->l_policy_data.l_extent.start == 0) { - new_ex->start = req_start; - new_ex->end = req_end; - EXIT; - return; - } - new_ex->end = MAX(lock->l_policy_data.l_extent.start-1, - req_end); + /* If we need to cancel this lock anyways because our request + * overlaps the granted lock, we grow up to its requested + * extent start instead of limiting this extent, assuming that + * clients are writing forwards and the lock had over grown + * its extent downwards before we enqueued our request. */ + if (l_extent->end > req_end) { + if (l_extent->start <= req_end) + new_ex->end = max(lock->l_req_extent.start - 1, + req_end); + else + new_ex->end = max(l_extent->start - 1, req_end); } } + +#define LDLM_MAX_GROWN_EXTENT (32 * 1024 * 1024 - 1) + if (conflicting > 32 && (req_mode == LCK_PW || req_mode == LCK_CW)) { + if (req_end < req_start + LDLM_MAX_GROWN_EXTENT) + new_ex->end = min(req_start + LDLM_MAX_GROWN_EXTENT, + new_ex->end); + } EXIT; } -/* Determine if the lock is compatible with all locks on the queue. */ +/* In order to determine the largest possible extent we can grant, we need + * to scan all of the queues. */ +static void ldlm_extent_policy(struct ldlm_resource *res, + struct ldlm_lock *lock, int *flags) +{ + struct ldlm_extent new_ex = { .start = 0, .end = ~0}; + + if (lock->l_req_mode == LCK_GROUP) + return; + + ldlm_extent_internal_policy(&res->lr_granted, lock, &new_ex); + ldlm_extent_internal_policy(&res->lr_waiting, lock, &new_ex); + + if (new_ex.start != lock->l_policy_data.l_extent.start || + new_ex.end != lock->l_policy_data.l_extent.end) { + *flags |= LDLM_FL_LOCK_CHANGED; + lock->l_policy_data.l_extent.start = new_ex.start; + lock->l_policy_data.l_extent.end = new_ex.end; + } +} + +/* Determine if the lock is compatible with all locks on the queue. + * We stop walking the queue if we hit ourselves so we don't take + * conflicting locks enqueued after us into accound, or we'd wait forever. + * + * 0 if the lock is not compatible + * 1 if the lock is compatible + * 2 if this group lock is compatible and requires no further checking + * negative error, such as EWOULDBLOCK for group locks + */ static int ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req, - int send_cbs) + int *flags, struct list_head *work_list, + struct list_head **insertp) { struct list_head *tmp; - struct ldlm_lock *lock; + struct list_head *save = NULL; + struct ldlm_lock *lock = NULL; ldlm_mode_t req_mode = req->l_req_mode; - __u64 req_start = req->l_policy_data.l_extent.start; - __u64 req_end = req->l_policy_data.l_extent.end; int compat = 1; + int found = 0; ENTRY; + lockmode_verify(req_mode); + + /* Extent locks are only queued once. We can get back here with + * insertp != NULL if the blocking ASTs returned -ERESTART. */ + if (!list_empty(&req->l_res_link)) + insertp = NULL; + + if (req->l_req_mode != LCK_GROUP) { + __u64 req_start = req->l_req_extent.start; + __u64 req_end = req->l_req_extent.end; + + list_for_each(tmp, queue) { + lock = list_entry(tmp, struct ldlm_lock, l_res_link); + if (req == lock) + break; + + if (lock->l_req_mode == LCK_GROUP) { + if (*flags & LDLM_FL_BLOCK_NOWAIT) + RETURN(-EWOULDBLOCK); + + /* No blocking ASTs are sent for group locks. */ + compat = 0; + + /* there's a blocking group lock in front + * of us on the queue. It can be held + * indefinitely, so don't timeout. */ + if (insertp) { + *flags |= LDLM_FL_NO_TIMEOUT; + /* lock_bitlock(req) is held here. */ + req->l_flags |= LDLM_FL_NO_TIMEOUT; + } + + if (work_list) + continue; + else + break; + } + + /* locks are compatible, overlap doesn't matter */ + if (lockmode_compat(lock->l_req_mode, req_mode)) + continue; + + if (lock->l_policy_data.l_extent.end < req_start || + lock->l_policy_data.l_extent.start > req_end) + continue; + + compat = 0; + + if (!work_list) + break; + + if (lock->l_blocking_ast) + ldlm_add_ast_work_item(lock, req, work_list); + } + + if (insertp) + *insertp = queue; + + RETURN(compat); + } + list_for_each(tmp, queue) { lock = list_entry(tmp, struct ldlm_lock, l_res_link); - if (req == lock) - RETURN(compat); + break; - /* locks are compatible, overlap doesn't matter */ - if (lockmode_compat(lock->l_req_mode, req_mode)) - continue; + if (lock->l_req_mode != LCK_GROUP) { + if (lock->l_req_mode != lock->l_granted_mode) { + /* we must be traversing the waitq. */ + + /* If a group lock was already found then + * req can be queued before any extent locks + * that come after the found group lock. */ + if (found) + break; + + if (!insertp) { + /* We've hit a conflicting extent lock + * on the waitq before hitting the req + * group lock. See comments below. */ + compat = 0; + break; + } + + /* Group locks are not normally blocked by + * waiting PR|PW locks. */ + + /* If NO_TIMEOUT was sent back to the client + * we can queue the group lock in front of + * this extent lock. */ + if (lock->l_flags & LDLM_FL_NO_TIMEOUT) { + if (save == NULL) + save = tmp; + continue; + } - /* if lock doesn't overlap skip it */ - if (lock->l_policy_data.l_extent.end < req_start || - lock->l_policy_data.l_extent.start > req_end) + /* If we did NOT send NO_TIMEOUT back to the + * client for this extent lock then the client + * could possibly timeout if we queue this + * group lock before it, so don't. This is the + * only way to get a conflicting extent lock + * in front of a group lock on the waitq. */ + } + + compat = 0; + if (!work_list) { + LASSERT(save == NULL); + break; + } + + /* If we previously skipped over some extent locks + * because we thought we were going to queue the + * group lock in front of them then we need to go back + * and send blocking ASTs for the locks we skipped. */ + if (save != NULL) { + struct ldlm_lock *lck2; + + for (; save != tmp; save = save->next) { + lck2 = list_entry(save, + struct ldlm_lock, + l_res_link); + + /* If there was a group lock after save + * then we would have exited this loop + * above. */ + LASSERT(lck2->l_req_mode!=LCK_GROUP); + + if (lck2->l_blocking_ast) { + ldlm_add_ast_work_item(lck2,req, + work_list); + } + } + save = NULL; + } + + if (lock->l_blocking_ast) + ldlm_add_ast_work_item(lock, req, work_list); continue; + } + + /* If it was safe to insert a group lock at save, + * i.e. save != NULL, then this group lock already + * on the queue would have been inserted before save. */ + LASSERT(save == NULL); + + /* Note: no blocking ASTs are sent for group locks. */ + + if (lock->l_policy_data.l_extent.gid == + req->l_policy_data.l_extent.gid) { + /* group locks with this gid already on the waitq. */ + found = 2; + + if (lock->l_req_mode == lock->l_granted_mode) { + /* if a group lock with this gid has already + * been granted then grant this one. */ + compat = 2; + break; + } + } else { + if (found == 2) + break; + + /* group locks already exist on the queue. */ + found = 1; + + if (*flags & LDLM_FL_BLOCK_NOWAIT) + RETURN(-EWOULDBLOCK); + + compat = 0; - if (!send_cbs) - RETURN(0); + /* there's a blocking group lock in front + * of us on the queue. It can be held + * indefinitely, so don't timeout. */ + *flags |= LDLM_FL_NO_TIMEOUT; + + /* the only reason to continue traversing the + * list at this point is to find the proper + * place to insert the lock in the waitq. */ + if (!insertp) + break; + } + } - compat = 0; - if (lock->l_blocking_ast) - ldlm_add_ast_work_item(lock, req, NULL, 0); + if (insertp != NULL) { + if (save != NULL) + *insertp = save; + else + *insertp = tmp; } RETURN(compat); @@ -145,67 +373,123 @@ ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req, * - the caller has NOT initialized req->lr_tmp, so we must * - must call this function with the ns lock held once */ int ldlm_process_extent_lock(struct ldlm_lock *lock, int *flags, int first_enq, - ldlm_error_t *err) + ldlm_error_t *err, struct list_head *work_list) { struct ldlm_resource *res = lock->l_resource; - struct ldlm_extent new_ex = {0, ~0}; struct list_head rpc_list = LIST_HEAD_INIT(rpc_list); - int rc; + struct list_head *insertp = NULL; + int rc, rc2; ENTRY; LASSERT(list_empty(&res->lr_converting)); + *err = ELDLM_OK; if (!first_enq) { - LASSERT(res->lr_tmp != NULL); - rc = ldlm_extent_compat_queue(&res->lr_granted, lock, 0); - if (!rc) - RETURN(LDLM_ITER_STOP); - rc = ldlm_extent_compat_queue(&res->lr_waiting, lock, 0); - if (!rc) + /* -EWOULDBLOCK can't occur here since (flags & BLOCK_NOWAIT) + * lock requests would either be granted or fail on their + * first_enq. flags should always be zero here, and if that + * ever changes we want to find out. */ + LASSERT(*flags == 0); + rc = ldlm_extent_compat_queue(&res->lr_granted, lock, + flags, NULL, NULL); + if (rc == 1) { + rc = ldlm_extent_compat_queue(&res->lr_waiting, lock, + flags, NULL, NULL); + } + if (rc == 0) RETURN(LDLM_ITER_STOP); ldlm_resource_unlink_lock(lock); - ldlm_grant_lock(lock, NULL, 0, 1); + ldlm_extent_policy(res, lock, flags); + lock_bitlock(lock); + lock->l_flags &= ~LDLM_FL_NO_TIMEOUT; + unlock_bitlock(lock); + ldlm_grant_lock(lock, work_list); RETURN(LDLM_ITER_CONTINUE); } - /* In order to determine the largest possible extent we can - * grant, we need to scan all of the queues. */ - ldlm_extent_internal_policy(&res->lr_granted, lock, &new_ex); - ldlm_extent_internal_policy(&res->lr_waiting, lock, &new_ex); - - if (new_ex.start != lock->l_policy_data.l_extent.start || - new_ex.end != lock->l_policy_data.l_extent.end) { - *flags |= LDLM_FL_LOCK_CHANGED; - lock->l_policy_data.l_extent.start = new_ex.start; - lock->l_policy_data.l_extent.end = new_ex.end; - } - restart: - LASSERT(res->lr_tmp == NULL); - res->lr_tmp = &rpc_list; - rc = ldlm_extent_compat_queue(&res->lr_granted, lock, 1); - rc += ldlm_extent_compat_queue(&res->lr_waiting, lock, 1); - res->lr_tmp = NULL; - - if (rc != 2) { - /* If either of the compat_queue()s returned 0, then we + rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, &rpc_list, + NULL); + if (rc < 0) + GOTO(destroylock, rc); + if (rc == 2) + goto grant; + + /* Traverse the waiting list in case there are other conflicting + * lock requests ahead of us in the queue and send blocking ASTs */ + rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock, flags, &rpc_list, + &insertp); + if (rc2 < 0) + GOTO(destroylock, rc); + if (rc + rc2 == 2) { + grant: + ldlm_extent_policy(res, lock, flags); + ldlm_resource_unlink_lock(lock); + lock->l_flags &= ~LDLM_FL_NO_TIMEOUT; + ldlm_grant_lock(lock, NULL); + } else { + /* If either of the compat_queue()s returned failure, then we * have ASTs to send and must go onto the waiting list. * * bug 2322: we used to unlink and re-add here, which was a * terrible folly -- if we goto restart, we could get * re-ordered! Causes deadlock, because ASTs aren't sent! */ if (list_empty(&lock->l_res_link)) - ldlm_resource_add_lock(res, &res->lr_waiting, lock); - l_unlock(&res->lr_namespace->ns_lock); - rc = ldlm_run_ast_work(res->lr_namespace, &rpc_list); - l_lock(&res->lr_namespace->ns_lock); + ldlm_resource_add_lock(res, insertp, lock); + unlock_res(res); + rc = ldlm_run_bl_ast_work(&rpc_list); + lock_res(res); if (rc == -ERESTART) GOTO(restart, -ERESTART); *flags |= LDLM_FL_BLOCK_GRANTED; - } else { - ldlm_resource_unlink_lock(lock); - ldlm_grant_lock(lock, NULL, 0, 0); } + RETURN(0); + + destroylock: + list_del_init(&lock->l_res_link); + unlock_res(res); + ldlm_lock_destroy(lock); + lock_res(res); + *err = rc; + RETURN(rc); +} + +/* When a lock is cancelled by a client, the KMS may undergo change if this + * is the "highest lock". This function returns the new KMS value. + * Caller must hold ns_lock already. + * + * NB: A lock on [x,y] protects a KMS of up to y + 1 bytes! */ +__u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms) +{ + struct ldlm_resource *res = lock->l_resource; + struct list_head *tmp; + struct ldlm_lock *lck; + __u64 kms = 0; + ENTRY; + + /* don't let another thread in ldlm_extent_shift_kms race in + * just after we finish and take our lock into account in its + * calculation of the kms */ + + lock->l_flags |= LDLM_FL_KMS_IGNORE; + + list_for_each(tmp, &res->lr_granted) { + lck = list_entry(tmp, struct ldlm_lock, l_res_link); + + if (lck->l_flags & LDLM_FL_KMS_IGNORE) + continue; + + if (lck->l_policy_data.l_extent.end >= old_kms) + RETURN(old_kms); + + /* This extent _has_ to be smaller than old_kms (checked above) + * so kms can only ever be smaller or the same as old_kms. */ + if (lck->l_policy_data.l_extent.end + 1 > kms) + kms = lck->l_policy_data.l_extent.end + 1; + } + LASSERTF(kms <= old_kms, "kms "LPU64" old_kms "LPU64"\n", kms, old_kms); + + RETURN(kms); }