Whamcloud - gitweb
LU-10841 ldlm: ASSERTION(lock->l_granted_mode!=lock->l_req_mode)
[fs/lustre-release.git] / lustre / ldlm / ldlm_plain.c
index 9b2af34..aa074f5 100644 (file)
-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
+/*
+ * GPL HEADER START
  *
- *  Copyright (c) 2002, 2003 Cluster File Systems, Inc.
- *   Author: Peter Braam <braam@clusterfs.com>
- *   Author: Phil Schwan <phil@clusterfs.com>
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
- *   This file is part of Lustre, http://www.lustre.org.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
  *
- *   Lustre is free software; you can redistribute it and/or
- *   modify it under the terms of version 2 of the GNU General Public
- *   License as published by the Free Software Foundation.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
  *
- *   Lustre is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
- *   You should have received a copy of the GNU General Public License
- *   along with Lustre; if not, write to the Free Software
- *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2017, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/ldlm/ldlm_plain.c
+ *
+ * Author: Peter Braam <braam@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
+ */
+
+/**
+ * This file contains implementation of PLAIN lock type.
+ *
+ * PLAIN locks are the simplest form of LDLM locking, and are used when
+ * there only needs to be a single lock on a resource. This avoids some
+ * of the complexity of EXTENT and IBITS lock types, but doesn't allow
+ * different "parts" of a resource to be locked concurrently.  Example
+ * use cases for PLAIN locks include locking of MGS configuration logs
+ * and (as of Lustre 2.4) quota records.
  */
 
 #define DEBUG_SUBSYSTEM S_LDLM
 
-#ifdef __KERNEL__
-#include <linux/lustre_dlm.h>
-#include <linux/obd_support.h>
-#include <linux/lustre_lib.h>
-#else
-#include <liblustre.h>
-#endif
+#include <lustre_dlm.h>
+#include <obd_support.h>
+#include <lustre_lib.h>
 
 #include "ldlm_internal.h"
 
+#ifdef HAVE_SERVER_SUPPORT
+/**
+ * Determine if the lock is compatible with all locks on the queue.
+ *
+ * If \a work_list is provided, conflicting locks are linked there.
+ * If \a work_list is not provided, we exit this function on first conflict.
+ *
+ * \retval 0 if there are conflicting locks in the \a queue
+ * \retval 1 if the lock is compatible to all locks in \a queue
+ */
 static inline int
 ldlm_plain_compat_queue(struct list_head *queue, struct ldlm_lock *req,
-                        int send_cbs)
+                       struct list_head *work_list)
 {
-        struct list_head *tmp;
-        struct ldlm_lock *lock;
-        ldlm_mode_t req_mode = req->l_req_mode;
-        int compat = 1;
-        ENTRY;
+       enum ldlm_mode req_mode = req->l_req_mode;
+       struct ldlm_lock *lock, *next_lock;
+       int compat = 1;
+       ENTRY;
+
+       lockmode_verify(req_mode);
 
-        list_for_each(tmp, queue) {
-                lock = list_entry(tmp, struct ldlm_lock, l_res_link);
+       list_for_each_entry_safe(lock, next_lock, queue, l_res_link) {
 
-                if (req == lock)
-                        RETURN(compat);
+               /* We stop walking the queue if we hit ourselves so we don't
+                * take conflicting locks enqueued after us into account,
+                * or we'd wait forever. */
+               if (req == lock)
+                       RETURN(compat);
 
-                if (lockmode_compat(lock->l_req_mode, req_mode))
+               /* Advance loop cursor to last lock of mode group. */
+               next_lock = list_entry(list_entry(lock->l_sl_mode.prev,
+                                                 struct ldlm_lock,
+                                                 l_sl_mode)->l_res_link.next,
+                                      struct ldlm_lock, l_res_link);
+
+               if (lockmode_compat(lock->l_req_mode, req_mode))
                         continue;
 
-                if (!send_cbs)
+                if (!work_list)
                         RETURN(0);
 
                 compat = 0;
+
+               /* Add locks of the mode group to \a work_list as
+                * blocking locks for \a req. */
                 if (lock->l_blocking_ast)
-                        ldlm_add_ast_work_item(lock, req, NULL, 0);
+                        ldlm_add_ast_work_item(lock, req, work_list);
+
+                {
+                       struct list_head *head;
+
+                        head = &lock->l_sl_mode;
+                       list_for_each_entry(lock, head, l_sl_mode)
+                                if (lock->l_blocking_ast)
+                                        ldlm_add_ast_work_item(lock, req,
+                                                               work_list);
+                }
         }
 
         RETURN(compat);
 }
 
-/* If first_enq is 0 (ie, called from ldlm_reprocess_queue):
- *   - blocking ASTs have already been sent
- *   - the caller has already initialized req->lr_tmp
- *   - must call this function with the ns lock held
+/**
+ * Process a granting attempt for plain lock.
+ * Must be called with ns lock held.
  *
- * If first_enq is 1 (ie, called from ldlm_lock_enqueue):
- *   - blocking ASTs have not been sent
- *   - the caller has NOT initialized req->lr_tmp, so we must
- *   - must call this function with the ns lock held once */
-int ldlm_process_plain_lock(struct ldlm_lock *lock, int *flags, int first_enq,
-                            ldlm_error_t *err)
+ * This function looks for any conflicts for \a lock in the granted or
+ * waiting queues. The lock is granted if no conflicts are found in
+ * either queue.
+ */
+int ldlm_process_plain_lock(struct ldlm_lock *lock, __u64 *flags,
+                           enum ldlm_process_intention intention,
+                           enum ldlm_error *err, struct list_head *work_list)
 {
-        struct ldlm_resource *res = lock->l_resource;
-        struct list_head rpc_list = LIST_HEAD_INIT(rpc_list);
-        int rc;
-        ENTRY;
-
-        LASSERT(list_empty(&res->lr_converting));
-
-        if (!first_enq) {
-                LASSERT(res->lr_tmp != NULL);
-                rc = ldlm_plain_compat_queue(&res->lr_granted, lock, 0);
+       struct ldlm_resource *res = lock->l_resource;
+       struct list_head *grant_work = intention == LDLM_PROCESS_ENQUEUE ?
+                                                       NULL : work_list;
+       int rc;
+       ENTRY;
+
+       LASSERT(lock->l_granted_mode != lock->l_req_mode);
+       check_res_locked(res);
+       *err = ELDLM_OK;
+
+       if (intention == LDLM_PROCESS_RESCAN) {
+                LASSERT(work_list != NULL);
+                rc = ldlm_plain_compat_queue(&res->lr_granted, lock, NULL);
                 if (!rc)
                         RETURN(LDLM_ITER_STOP);
-                rc = ldlm_plain_compat_queue(&res->lr_waiting, lock, 0);
+                rc = ldlm_plain_compat_queue(&res->lr_waiting, lock, NULL);
                 if (!rc)
                         RETURN(LDLM_ITER_STOP);
 
                 ldlm_resource_unlink_lock(lock);
-                ldlm_grant_lock(lock, NULL, 0, 1);
+               ldlm_grant_lock(lock, grant_work);
                 RETURN(LDLM_ITER_CONTINUE);
         }
 
- restart:
-        LASSERT(res->lr_tmp == NULL);
-        res->lr_tmp = &rpc_list;
-        rc = ldlm_plain_compat_queue(&res->lr_granted, lock, 1);
-        rc += ldlm_plain_compat_queue(&res->lr_waiting, lock, 1);
-        res->lr_tmp = NULL;
-
-        if (rc != 2) {
-                /* If either of the compat_queue()s returned 0, then we
-                 * have ASTs to send and must go onto the waiting list.
-                 *
-                 * bug 2322: we used to unlink and re-add here, which was a
-                 * terrible folly -- if we goto restart, we could get
-                 * re-ordered!  Causes deadlock, because ASTs aren't sent! */
-                if (list_empty(&lock->l_res_link))
-                        ldlm_resource_add_lock(res, &res->lr_waiting, lock);
-                l_unlock(&res->lr_namespace->ns_lock);
-                rc = ldlm_run_ast_work(res->lr_namespace, &rpc_list);
-                l_lock(&res->lr_namespace->ns_lock);
-                if (rc == -ERESTART)
-                        GOTO(restart, -ERESTART);
-                *flags |= LDLM_FL_BLOCK_GRANTED;
-        } else {
-                ldlm_resource_unlink_lock(lock);
-                ldlm_grant_lock(lock, NULL, 0, 0);
-        }
-        RETURN(0);
+       rc = ldlm_plain_compat_queue(&res->lr_granted, lock, work_list);
+       rc += ldlm_plain_compat_queue(&res->lr_waiting, lock, work_list);
+
+       if (rc == 2) {
+               ldlm_resource_unlink_lock(lock);
+               ldlm_grant_lock(lock, grant_work);
+       }
+
+       RETURN(LDLM_ITER_CONTINUE);
+}
+#endif /* HAVE_SERVER_SUPPORT */
+
+void ldlm_plain_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+                                    union ldlm_policy_data *lpolicy)
+{
+       /* No policy for plain locks */
+}
+
+void ldlm_plain_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
+                                    union ldlm_wire_policy_data *wpolicy)
+{
+       /* No policy for plain locks */
 }