Whamcloud - gitweb
LU-6142 ldlm: Fix style issues for ldlm folder 03/54003/2
authorArshad Hussain <arshad.hussain@aeoncomputing.com>
Mon, 12 Feb 2024 06:07:38 +0000 (11:37 +0530)
committerOleg Drokin <green@whamcloud.com>
Fri, 23 Feb 2024 07:05:44 +0000 (07:05 +0000)
This patch fixes issues reported by checkpatch
for files under folder lustre/ldlm/

Test-Parameters: trivial
Signed-off-by: Arshad Hussain <arshad.hussain@aeoncomputing.com>
Change-Id: I3c15c6a6e3d21bce9c8609e60ec481b484f00480
Reviewed-on: https://review.whamcloud.com/c/fs/lustre-release/+/54003
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Timothy Day <timday@amazon.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
lustre/ldlm/ldlm_flock.c
lustre/ldlm/ldlm_inodebits.c
lustre/ldlm/ldlm_internal.h
lustre/ldlm/ldlm_lib.c
lustre/ldlm/ldlm_lockd.c
lustre/ldlm/ldlm_pool.c
lustre/ldlm/ldlm_resource.c

index a8a4e41..05c47f9 100644 (file)
@@ -139,8 +139,7 @@ ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
 {
        ENTRY;
 
-       LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: %#llx)",
-                  mode, flags);
+       LDLM_DEBUG(lock, "%s(mode: %d, flags: %#llx)", __func__, mode, flags);
 
        /* Safe to not lock here, since it should be empty anyway */
        LASSERT(hlist_unhashed(&lock->l_exp_flock_hash));
@@ -171,7 +170,6 @@ ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
  * one client holds a lock on something and want a lock on something
  * else and at the same time another client has the opposite situation).
  */
-
 struct ldlm_flock_lookup_cb_data {
        __u64 *bl_owner;
        struct ldlm_lock *lock;
@@ -372,9 +370,9 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags,
 #endif
 
        ENTRY;
-       CDEBUG(D_DLMTRACE, "flags %#llx owner %llu pid %u mode %u start "
-              "%llu end %llu\n", *flags,
-              new->l_policy_data.l_flock.owner,
+       CDEBUG(D_DLMTRACE,
+              "flags %#llx owner %llu pid %u mode %u start %llu end %llu\n",
+              *flags, new->l_policy_data.l_flock.owner,
               new->l_policy_data.l_flock.pid, mode,
               req->l_policy_data.l_flock.start,
               req->l_policy_data.l_flock.end);
@@ -406,6 +404,7 @@ reprocess:
 #ifdef HAVE_SERVER_SUPPORT
        else {
                int reprocess_failed = 0;
+
                lockmode_verify(mode);
 
                /* This loop determines if there are existing locks
@@ -738,6 +737,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
        struct obd_device *obd;
        enum ldlm_error err;
        int rc = 0;
+
        ENTRY;
 
        CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT2, 4);
@@ -816,8 +816,7 @@ granted:
                unlock_res_and_lock(lock);
                LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
 
-               /* An error is still to be returned, to propagate it up to
-                * ldlm_cli_enqueue_fini() caller. */
+               /* error is returned up to ldlm_cli_enqueue_fini() caller. */
                RETURN(-EIO);
        }
 
@@ -842,8 +841,8 @@ granted:
                        mode = lock->l_req_mode;
 
                if (ldlm_is_flock_deadlock(lock)) {
-                       LDLM_DEBUG(lock, "client-side enqueue deadlock "
-                                  "received");
+                       LDLM_DEBUG(lock,
+                                  "client-side enqueue deadlock received");
                        rc = -EDEADLK;
                }
                ldlm_flock_destroy(lock, mode, LDLM_FL_WAIT_NOREPROC);
@@ -1004,7 +1003,7 @@ static struct cfs_hash_ops ldlm_export_flock_ops = {
 
 int ldlm_init_flock_export(struct obd_export *exp)
 {
-       ifstrcmp(exp->exp_obd->obd_type->typ_name, LUSTRE_MDT_NAME) != 0)
+       if (strcmp(exp->exp_obd->obd_type->typ_name, LUSTRE_MDT_NAME) != 0)
                RETURN(0);
 
        exp->exp_flock_hash =
index 85df949..af4e744 100644 (file)
@@ -210,7 +210,8 @@ ldlm_inodebits_compat_queue(struct list_head *queue, struct ldlm_lock *req,
 
                /* We stop walking the queue if we hit ourselves so we don't
                 * take conflicting locks enqueued after us into account,
-                * or we'd wait forever. */
+                * or we'd wait forever.
+                */
                if (req == lock)
                        RETURN(compat);
 
@@ -241,7 +242,7 @@ ldlm_inodebits_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                        }
                } else if (ldlm_cos_same_client(req, lock) ||
                           ldlm_txn_same_server(req, lock)) {
-                       /* COS/TXN locks need to be checked one by one, 
+                       /* COS/TXN locks need to be checked one by one,
                         * because client cookie or initiator id may be
                         * different for locks in mode/policy skiplist.
                         */
@@ -249,8 +250,7 @@ ldlm_inodebits_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                }
 
 
-               /* GROUP locks are placed to a head of the waiting list, but
-                * grouped by gid. */
+               /* GROUP(by gid) locks placed to a head of the waiting list */
                if (unlikely(req_mode == LCK_GROUP && !ldlm_is_granted(lock))) {
                        compat = 0;
                        if (lock->l_req_mode != LCK_GROUP) {
@@ -259,7 +259,8 @@ ldlm_inodebits_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                                break;
                        }
                        /* Still GROUP but a different gid(the same gid would
-                        * be handled above). Keep searching for the same gid */
+                        * be handled above). Keep searching for the same gid
+                        */
                        LASSERT(req->l_policy_data.l_inodebits.li_gid !=
                                lock->l_policy_data.l_inodebits.li_gid);
                        continue;
@@ -318,7 +319,8 @@ ldlm_inodebits_compat_queue(struct list_head *queue, struct ldlm_lock *req,
                                        RETURN(0);
 
                                /* Add locks of the policy group to @work_list
-                                * as blocking locks for @req */
+                                * as blocking locks for @req
+                                */
                                if (lock->l_blocking_ast)
                                        ldlm_add_ast_work_item(lock, req,
                                                               work_list);
@@ -357,6 +359,7 @@ int ldlm_process_inodebits_lock(struct ldlm_lock *lock, __u64 *ldlm_flags,
        struct list_head *grant_work = intention == LDLM_PROCESS_ENQUEUE ?
                                                        NULL : work_list;
        int rc, rc2 = 0;
+
        ENTRY;
 
        *err = ELDLM_LOCK_ABORTED;
@@ -431,7 +434,8 @@ int ldlm_process_inodebits_lock(struct ldlm_lock *lock, __u64 *ldlm_flags,
                        /* There is no sense to set LDLM_FL_NO_TIMEOUT to
                         * @ldlm_flags for DOM lock while they are enqueued
                         * through intents, i.e. @lock here is local which does
-                        * not timeout. */
+                        * not timeout.
+                        */
                        *err = ELDLM_OK;
                }
        } else {
@@ -499,7 +503,8 @@ int ldlm_inodebits_drop(struct ldlm_lock *lock, __u64 to_drop)
        }
 
        /* remove lock from a skiplist and put in the new place
-        * according with new inodebits */
+        * according with new inodebits
+        */
        ldlm_resource_unlink_lock(lock);
        lock->l_policy_data.l_inodebits.bits &= ~to_drop;
        ldlm_grant_lock_with_skiplist(lock);
@@ -659,8 +664,7 @@ void ldlm_inodebits_add_lock(struct ldlm_resource *res, struct list_head *head,
                                                    l_res_link);
                LASSERT(orig->l_policy_data.l_inodebits.bits ==
                        lock->l_policy_data.l_inodebits.bits);
-               /* The is no a use case to insert before with exactly matched
-                * set of bits */
+               /* should not insert before with exactly matched set of bits */
                LASSERT(tail == false);
 
                for (i = 0; i < MDS_INODELOCK_NUMBITS; i++) {
index db03672..1fdb119 100644 (file)
@@ -86,9 +86,9 @@ static inline int ldlm_ns_empty(struct ldlm_namespace *ns)
        return atomic_read(&ns->ns_bref) == 0;
 }
 
-void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *,
+void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *l,
                                          enum ldlm_side);
-void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *,
+void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *l,
                                            enum ldlm_side);
 struct ldlm_namespace *ldlm_namespace_first_locked(enum ldlm_side);
 
@@ -108,7 +108,7 @@ extern struct kmem_cache *ldlm_inodebits_slab;
 extern struct kmem_cache *ldlm_interval_tree_slab;
 
 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
-                                     struct ldlm_lock *new);
+                                    struct ldlm_lock *new);
 void ldlm_resource_insert_lock_before(struct ldlm_lock *original,
                                      struct ldlm_lock *new);
 
@@ -125,19 +125,21 @@ void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock);
 void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list);
 int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
                  enum req_location loc, void *data, int size);
-struct ldlm_lock *
-ldlm_lock_create(struct ldlm_namespace *ns, const struct ldlm_res_id *,
-                enum ldlm_type type, enum ldlm_mode mode,
-                const struct ldlm_callback_suite *cbs,
-                void *data, __u32 lvb_len, enum lvb_type lvb_type);
+struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
+                                  const struct ldlm_res_id *res,
+                                  enum ldlm_type type, enum ldlm_mode mode,
+                                  const struct ldlm_callback_suite *cbs,
+                                  void *data, __u32 lvb_len,
+                                  enum lvb_type lvb_type);
 enum ldlm_error ldlm_lock_enqueue(const struct lu_env *env,
-                                 struct ldlm_namespace *,
-                                 struct ldlm_lock **,
+                                 struct ldlm_namespace *l,
+                                 struct ldlm_lock **lock,
                                  void *cookie, __u64 *flags);
-void ldlm_lock_addref_internal(struct ldlm_lock *, enum ldlm_mode mode);
-void ldlm_lock_addref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode);
-void ldlm_lock_decref_internal(struct ldlm_lock *, enum ldlm_mode mode);
-void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, enum ldlm_mode mode);
+void ldlm_lock_addref_internal(struct ldlm_lock *l, enum ldlm_mode mode);
+void ldlm_lock_addref_internal_nolock(struct ldlm_lock *l, enum ldlm_mode mode);
+void ldlm_lock_decref_internal(struct ldlm_lock *l, enum ldlm_mode mode);
+void ldlm_lock_decref_internal_nolock(struct ldlm_lock *l,
+                                     enum ldlm_mode mode);
 void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
                            struct list_head *work_list);
 #ifdef HAVE_SERVER_SUPPORT
@@ -167,7 +169,7 @@ void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock);
 
 /* ldlm_lockd.c */
 int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
-                           struct ldlm_lock *lock);
+                          struct ldlm_lock *lock);
 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns,
                           struct ldlm_lock_desc *ld,
                           struct list_head *cancels, int count,
@@ -176,7 +178,7 @@ int ldlm_bl_to_thread_ns(struct ldlm_namespace *ns);
 int ldlm_bl_thread_wakeup(void);
 
 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
-                             struct ldlm_lock_desc *ld, struct ldlm_lock *lock);
+                            struct ldlm_lock_desc *ld, struct ldlm_lock *lock);
 void ldlm_bl_desc2lock(const struct ldlm_lock_desc *ld, struct ldlm_lock *lock);
 
 #ifdef HAVE_SERVER_SUPPORT
@@ -224,10 +226,10 @@ void l_check_no_ns_lock(struct ldlm_namespace *ns);
 extern struct dentry *ldlm_svc_debugfs_dir;
 
 struct ldlm_state {
-        struct ptlrpc_service *ldlm_cb_service;
-        struct ptlrpc_service *ldlm_cancel_service;
-        struct ptlrpc_client *ldlm_client;
-        struct ldlm_bl_pool *ldlm_bl_pool;
+       struct ptlrpc_service *ldlm_cb_service;
+       struct ptlrpc_service *ldlm_cancel_service;
+       struct ptlrpc_client *ldlm_client;
+       struct ldlm_bl_pool *ldlm_bl_pool;
 };
 
 /* interval tree, for LDLM_EXTENT. */
@@ -239,21 +241,22 @@ extern void ldlm_interval_free(struct ldlm_interval *node);
 static inline struct ldlm_extent *
 ldlm_interval_extent(struct ldlm_interval *node)
 {
-        struct ldlm_lock *lock;
+       struct ldlm_lock *lock;
+
        LASSERT(!list_empty(&node->li_group));
 
        lock = list_first_entry(&node->li_group, struct ldlm_lock,
                                l_sl_policy);
-        return &lock->l_policy_data.l_extent;
+       return &lock->l_policy_data.l_extent;
 }
 
 int ldlm_init(void);
 void ldlm_exit(void);
 
 enum ldlm_policy_res {
-        LDLM_POLICY_CANCEL_LOCK,
-        LDLM_POLICY_KEEP_LOCK,
-        LDLM_POLICY_SKIP_LOCK
+       LDLM_POLICY_CANCEL_LOCK,
+       LDLM_POLICY_KEEP_LOCK,
+       LDLM_POLICY_SKIP_LOCK
 };
 
 #define LDLM_POOL_SYSFS_PRINT_int(v) sprintf(buf, "%d\n", v)
@@ -278,7 +281,7 @@ enum ldlm_policy_res {
                                                                           \
                return LDLM_POOL_SYSFS_PRINT_##type(tmp);                  \
        }                                                                  \
-       struct __##var##__dummy_read {;} /* semicolon catcher */
+       struct __##var##__dummy_read { ; } /* semicolon catcher */
 
 #define LDLM_POOL_SYSFS_WRITER_STORE(var, type)                                   \
        static ssize_t var##_store(struct kobject *kobj,                   \
@@ -395,7 +398,7 @@ void ldlm_ibits_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
                                     union ldlm_policy_data *lpolicy);
 void ldlm_ibits_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
                                     union ldlm_wire_policy_data *wpolicy);
-void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpolicy,
+void ldlm_extent_policy_wire_to_local(const union ldlm_wire_policy_data *wpol,
                                      union ldlm_policy_data *lpolicy);
 void ldlm_extent_policy_local_to_wire(const union ldlm_policy_data *lpolicy,
                                      union ldlm_wire_policy_data *wpolicy);
index e64e79b..c0793b5 100644 (file)
@@ -846,7 +846,8 @@ static inline int target_check_recovery_timer(struct obd_device *target)
 
        /* the recovery timer should expire, but it isn't triggered,
         * it's better to abort the recovery of this target to speed up
-        * the recovery of the whole cluster. */
+        * the recovery of the whole cluster.
+        */
        spin_lock(&target->obd_dev_lock);
        if (target->obd_recovering) {
                CERROR("%s: Aborting recovery\n", target->obd_name);
@@ -1797,9 +1798,9 @@ static void target_finish_recovery(struct lu_target *lut)
                CERROR("%s: Recovery queues ( %s%s%s) are not empty\n",
                       obd->obd_name,
                       list_empty(&obd->obd_req_replay_queue) ? "" : "req ",
-                      list_empty(&obd->obd_lock_replay_queue) ? \
+                      list_empty(&obd->obd_lock_replay_queue) ?
                                  "" : "lock ",
-                      list_empty(&obd->obd_final_req_queue) ? \
+                      list_empty(&obd->obd_final_req_queue) ?
                                  "" : "final ");
                spin_unlock(&obd->obd_recovery_task_lock);
                LBUG();
index 20985bd..3a89924 100644 (file)
@@ -242,7 +242,7 @@ static int expired_lock_main(void *arg)
 
                        /* Check if we need to prolong timeout */
                        if (!CFS_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_TIMEOUT) &&
-                           lock->l_callback_timestamp != 0 && /* not AST error */
+                           lock->l_callback_timestamp != 0 && /* not AST err */
                            ldlm_lock_busy(lock)) {
                                LDLM_DEBUG(lock, "prolong the busy lock");
                                lock_res_and_lock(lock);
@@ -699,7 +699,8 @@ timeout_t ldlm_bl_timeout_by_rpc(struct ptlrpc_request *req)
        timeout = at_timeout + INITIAL_CONNECT_TIMEOUT + netl + req_timeout;
 
        /* Client's timeout is calculated as at_est2timeout(), let's be a bit
-        * more conservative than client */
+        * more conservative than client
+        */
        return max(timeout + (timeout >> 4),
                   (timeout_t)obd_get_ldlm_enqueue_min(obd));
 }
@@ -1009,7 +1010,8 @@ int ldlm_server_blocking_ast(struct ldlm_lock *lock,
        body->lock_handle[0] = lock->l_remote_handle;
        body->lock_handle[1].cookie = lock->l_handle.h_cookie;
        body->lock_desc = *desc;
-       body->lock_flags |= ldlm_flags_to_wire(lock->l_flags & LDLM_FL_AST_MASK);
+       body->lock_flags |= ldlm_flags_to_wire(lock->l_flags &
+                                              LDLM_FL_AST_MASK);
 
        LDLM_DEBUG(lock, "server preparing blocking AST");
 
@@ -1110,6 +1112,7 @@ int ldlm_server_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
        ldlm_lock2desc(lock, &body->lock_desc);
        if (lvb_len > 0) {
                void *lvb = req_capsule_client_get(&req->rq_pill, &RMF_DLM_LVB);
+
                lvb_len = ldlm_lvbo_fill(lock, lvb, &lvb_len);
                if (lvb_len < 0) {
                        /*
@@ -1445,7 +1448,8 @@ existing_lock:
        if (!(flags & LDLM_FL_HAS_INTENT)) {
                /* based on the assumption that lvb size never changes during
                 * resource life time otherwise it need resource->lr_lock's
-                * protection */
+                * protection
+                */
                req_capsule_set_size(pill, &RMF_DLM_LVB,
                                     RCL_SERVER, ldlm_lvbo_size(lock));
 
@@ -1539,6 +1543,7 @@ out:
                req->rq_status = rc ?: err; /* return either error - b=11190 */
                if (!req->rq_packed_final) {
                        int rc1 = lustre_pack_reply(req, 1, NULL, NULL);
+
                        if (rc == 0)
                                rc = rc1;
                }
@@ -1616,7 +1621,8 @@ retry:
                                ldlm_lock_destroy_nolock(lock);
                                unlock_res_and_lock(lock);
                        }
-                       ldlm_reprocess_all(lock->l_resource, lock->l_policy_data.l_inodebits.bits);
+                       ldlm_reprocess_all(lock->l_resource,
+                                          lock->l_policy_data.l_inodebits.bits);
                }
 
                if (!err && !ldlm_is_cbpending(lock) &&
@@ -1660,9 +1666,7 @@ void ldlm_clear_blocking_data(struct ldlm_lock *lock)
        ldlm_clear_blocking_lock(lock);
 }
 
-/**
- * Main LDLM entry point for server code to process lock conversion requests.
- */
+/* Main LDLM entry point for server code to process lock conversion requests */
 int ldlm_handle_convert0(struct ptlrpc_request *req,
                         const struct ldlm_request *dlm_req)
 {
@@ -1731,7 +1735,7 @@ int ldlm_handle_convert0(struct ptlrpc_request *req,
 
                /* All old bits should be reprocessed to send new BL AST if
                 * it wasn't sent earlier due to LDLM_FL_AST_SENT bit set.
-                * */
+                */
                ldlm_reprocess_all(lock->l_resource, bits);
        }
 
@@ -1989,9 +1993,9 @@ static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
  * This only can happen on client side.
  */
 static int ldlm_handle_cp_callback(struct ptlrpc_request *req,
-                                    struct ldlm_namespace *ns,
-                                    struct ldlm_request *dlm_req,
-                                    struct ldlm_lock *lock)
+                                  struct ldlm_namespace *ns,
+                                  struct ldlm_request *dlm_req,
+                                  struct ldlm_lock *lock)
 {
        LIST_HEAD(ast_list);
        int lvb_len;
@@ -2788,6 +2792,7 @@ void ldlm_revoke_export_locks(struct obd_export *exp)
 {
        int rc;
        LIST_HEAD(rpc_list);
+
        ENTRY;
 
        cfs_hash_for_each_nolock(exp->exp_lock_hash,
@@ -3116,9 +3121,7 @@ void ldlm_put_ref(void)
        EXIT;
 }
 
-/*
- * Export handle<->lock hash operations.
- */
+/* Export handle<->lock hash operations. */
 static unsigned
 ldlm_export_lock_hash(struct cfs_hash *hs, const void *key,
                      const unsigned int bits)
@@ -3329,9 +3332,9 @@ static int ldlm_setup(void)
                        .so_req_handler         = ldlm_callback_handler,
                },
        };
-       ldlm_state->ldlm_cb_service = \
-                       ptlrpc_register_service(&conf, ldlm_svc_kset,
-                                               ldlm_svc_debugfs_dir);
+       ldlm_state->ldlm_cb_service = ptlrpc_register_service(&conf,
+                                                             ldlm_svc_kset,
+                                                             ldlm_svc_debugfs_dir);
        if (IS_ERR(ldlm_state->ldlm_cb_service)) {
                CERROR("failed to start service\n");
                rc = PTR_ERR(ldlm_state->ldlm_cb_service);
@@ -3361,8 +3364,8 @@ static int ldlm_setup(void)
                        .tc_nthrs_max           = LDLM_NTHRS_MAX,
                        .tc_nthrs_user          = ldlm_num_threads,
                        .tc_cpu_bind            = ldlm_cpu_bind,
-                       .tc_ctx_tags            = LCT_MD_THREAD | \
-                                                 LCT_DT_THREAD | \
+                       .tc_ctx_tags            = LCT_MD_THREAD |
+                                                 LCT_DT_THREAD |
                                                  LCT_CL_THREAD,
                },
                .psc_cpt                = {
@@ -3374,7 +3377,7 @@ static int ldlm_setup(void)
                        .so_hpreq_handler       = ldlm_hpreq_handler,
                },
        };
-       ldlm_state->ldlm_cancel_service = \
+       ldlm_state->ldlm_cancel_service =
                        ptlrpc_register_service(&conf, ldlm_svc_kset,
                                                ldlm_svc_debugfs_dir);
        if (IS_ERR(ldlm_state->ldlm_cancel_service)) {
@@ -3403,7 +3406,7 @@ static int ldlm_setup(void)
                blp->blp_min_threads = LDLM_NTHRS_INIT;
                blp->blp_max_threads = LDLM_NTHRS_MAX;
        } else {
-               blp->blp_min_threads = blp->blp_max_threads = \
+               blp->blp_min_threads = blp->blp_max_threads =
                        min_t(int, LDLM_NTHRS_MAX, max_t(int, LDLM_NTHRS_INIT,
                                                         ldlm_num_threads));
        }
@@ -3574,7 +3577,7 @@ out_resource:
 void ldlm_exit(void)
 {
        if (ldlm_refcount)
-               CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
+               CERROR("ldlm_refcount is %d in %s\n", ldlm_refcount, __func__);
        synchronize_rcu();
        kmem_cache_destroy(ldlm_resource_slab);
        /*
index 83ca764..2e1db64 100644 (file)
@@ -753,9 +753,8 @@ static ssize_t lock_volume_factor_store(struct kobject *kobj,
        int rc;
 
        rc = kstrtoul(buffer, 10, &tmp);
-       if (rc < 0) {
+       if (rc < 0)
                return rc;
-       }
 
        tmp = (tmp << 8) / 100;
        atomic_set(&pl->pl_lock_volume_factor, tmp);
@@ -1517,17 +1516,14 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
 
 void ldlm_pool_fini(struct ldlm_pool *pl)
 {
-       return;
 }
 
 void ldlm_pool_add(struct ldlm_pool *pl, struct ldlm_lock *lock)
 {
-       return;
 }
 
 void ldlm_pool_del(struct ldlm_pool *pl, struct ldlm_lock *lock)
 {
-       return;
 }
 
 __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
@@ -1537,7 +1533,6 @@ __u64 ldlm_pool_get_slv(struct ldlm_pool *pl)
 
 void ldlm_pool_set_slv(struct ldlm_pool *pl, __u64 slv)
 {
-       return;
 }
 
 __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
@@ -1547,7 +1542,6 @@ __u64 ldlm_pool_get_clv(struct ldlm_pool *pl)
 
 void ldlm_pool_set_clv(struct ldlm_pool *pl, __u64 clv)
 {
-       return;
 }
 
 __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
@@ -1557,7 +1551,6 @@ __u32 ldlm_pool_get_limit(struct ldlm_pool *pl)
 
 void ldlm_pool_set_limit(struct ldlm_pool *pl, __u32 limit)
 {
-       return;
 }
 
 __u32 ldlm_pool_get_lvf(struct ldlm_pool *pl)
@@ -1572,7 +1565,6 @@ int ldlm_pools_init(void)
 
 void ldlm_pools_fini(void)
 {
-       return;
 }
 
 #endif /* HAVE_LRU_RESIZE_SUPPORT */
index 09cdaab..3ee5f81 100644 (file)
@@ -45,16 +45,16 @@ struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
 struct kmem_cache *ldlm_interval_tree_slab;
 struct kmem_cache *ldlm_inodebits_slab;
 
-int ldlm_srv_namespace_nr = 0;
-int ldlm_cli_namespace_nr = 0;
+int ldlm_srv_namespace_nr;
+int ldlm_cli_namespace_nr;
 
 DEFINE_MUTEX(ldlm_srv_namespace_lock);
 LIST_HEAD(ldlm_srv_namespace_list);
 
 DEFINE_MUTEX(ldlm_cli_namespace_lock);
-/* Client Namespaces that have active resources in them.
- * Once all resources go away, ldlm_poold moves such namespaces to the
- * inactive list */
+/* Client Namespaces that have active resources in them. Once all resources go
+ * away, ldlm_poold moves such namespaces to the inactive list
+ */
 LIST_HEAD(ldlm_cli_active_namespace_list);
 /* Client namespaces that don't have any locks in them */
 LIST_HEAD(ldlm_cli_inactive_namespace_list);
@@ -63,8 +63,7 @@ static struct dentry *ldlm_debugfs_dir;
 static struct dentry *ldlm_ns_debugfs_dir;
 struct dentry *ldlm_svc_debugfs_dir;
 
-/* during debug dump certain amount of granted locks for one resource to avoid
- * DDOS. */
+/* For debug dump, amount of granted locks for one resource to avoid DDOS. */
 static unsigned int ldlm_dump_granted_max = 256;
 
 static ssize_t ldebugfs_dump_ns_seq_write(struct file *file,
@@ -141,8 +140,7 @@ static ssize_t seq_watermark_write(struct file *file,
 
        if (wm_low) {
                if (ldlm_lock_limit_mb != 0 && watermark > ldlm_lock_limit_mb) {
-                       CERROR("lock_reclaim_threshold_mb must be smaller than "
-                              "lock_limit_mb.\n");
+                       CERROR("lock_reclaim_threshold_mb must be smaller than lock_limit_mb.\n");
                        return -EINVAL;
                }
 
@@ -768,7 +766,7 @@ static int ldlm_namespace_debugfs_register(struct ldlm_namespace *ns)
 }
 #undef MAX_STRING_SIZE
 
-static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
+static unsigned int ldlm_res_hop_hash(struct cfs_hash *hs,
                                  const void *key, unsigned int mask)
 {
        const struct ldlm_res_id *id = key;
@@ -780,7 +778,8 @@ static unsigned ldlm_res_hop_hash(struct cfs_hash *hs,
        return val & mask;
 }
 
-static unsigned int ldlm_res_hop_fid_hash(const struct ldlm_res_id *id, unsigned int bits)
+static unsigned int ldlm_res_hop_fid_hash(const struct ldlm_res_id *id,
+                                         unsigned int bits)
 {
        struct lu_fid       fid;
        __u32               hash;
@@ -792,11 +791,12 @@ static unsigned int ldlm_res_hop_fid_hash(const struct ldlm_res_id *id, unsigned
 
        hash = fid_flatten32(&fid);
        hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
-       if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0) {
+
+       if (id->name[LUSTRE_RES_ID_HSH_OFF] != 0)
                val = id->name[LUSTRE_RES_ID_HSH_OFF];
-       } else {
+       else
                val = fid_oid(&fid);
-       }
+
        hash += (val >> 5) + (val << 11);
        return cfs_hash_32(hash, bits);
 }
@@ -852,9 +852,9 @@ static struct cfs_hash_ops ldlm_ns_hash_ops = {
 
 static struct {
        /** hash bucket bits */
-       unsigned                nsd_bkt_bits;
+       unsigned int            nsd_bkt_bits;
        /** hash bits */
-       unsigned                nsd_all_bits;
+       unsigned int            nsd_all_bits;
 } ldlm_ns_hash_defs[] = {
        [LDLM_NS_TYPE_MDC] = {
                .nsd_bkt_bits   = 11,
@@ -1010,7 +1010,7 @@ out_hash:
        kfree(ns->ns_name);
        cfs_hash_putref(ns->ns_rs_hash);
 out_ns:
-        OBD_FREE_PTR(ns);
+       OBD_FREE_PTR(ns);
 out_ref:
        ldlm_put_ref();
        RETURN(ERR_PTR(rc));
@@ -1034,8 +1034,9 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
        do {
                struct ldlm_lock *lock = NULL, *tmp;
 
-               /* First, we look for non-cleaned-yet lock
-                * all cleaned locks are marked by CLEANED flag. */
+               /* First, we look for non-cleaned-yet lock. all cleaned locks
+                * are marked by CLEANED flag.
+                */
                lock_res(res);
                list_for_each_entry(tmp, q, l_res_link) {
                        if (ldlm_is_cleaned(tmp))
@@ -1052,8 +1053,9 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
                        break;
                }
 
-               /* Set CBPENDING so nothing in the cancellation path
-                * can match this lock. */
+               /* Set CBPENDING so nothing is in the cancellation path
+                * can match this lock.
+                */
                ldlm_set_cbpending(lock);
                ldlm_set_failed(lock);
                ldlm_clear_converting(lock);
@@ -1166,6 +1168,7 @@ static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
 
        if (atomic_read(&ns->ns_bref) > 0) {
                int rc;
+
                CDEBUG(D_DLMTRACE,
                       "dlm namespace %s free waiting on refcount %d\n",
                       ldlm_ns_name(ns), atomic_read(&ns->ns_bref));
@@ -1180,7 +1183,8 @@ force_wait:
                                ns->ns_waitq, atomic_read(&ns->ns_bref) == 0);
 
                /* Forced cleanups should be able to reclaim all references,
-                * so it's safe to wait forever... we can't leak locks... */
+                * so it's safe to wait forever... we can't leak locks...
+                */
                if (force && rc == 0) {
                        rc = -ETIMEDOUT;
                        LCONSOLE_ERROR("Forced cleanup waiting for %s "
@@ -1214,8 +1218,7 @@ force_wait:
  * (1) Clear all locks in \a ns.
  */
 void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
-                               struct obd_import *imp,
-                               int force)
+                              struct obd_import *imp, int force)
 {
        int rc;
 
@@ -1229,9 +1232,7 @@ void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
        ns->ns_stopping = 1;
        spin_unlock(&ns->ns_lock);
 
-       /*
-        * Can fail with -EINTR when force == 0 in which case try harder.
-        */
+       /* Can fail with -EINTR when force == 0 in which case try harder. */
        rc = __ldlm_namespace_free(ns, force);
        if (rc != ELDLM_OK) {
                if (imp) {
@@ -1267,7 +1268,8 @@ void ldlm_namespace_free_post(struct ldlm_namespace *ns)
        ldlm_namespace_unregister(ns, ns->ns_client);
        /* Fini pool _before_ parent proc dir is removed. This is important as
         * ldlm_pool_fini() removes own proc dir which is child to @dir.
-        * Removing it after @dir may cause oops. */
+        * Removing it after @dir may cause oops.
+        */
        ldlm_pool_fini(&ns->ns_pool);
 
        ldlm_namespace_debugfs_unregister(ns);
@@ -1349,7 +1351,8 @@ void ldlm_namespace_unregister(struct ldlm_namespace *ns, enum ldlm_side client)
        LASSERT(!list_empty(&ns->ns_list_chain));
        /* Some asserts and possibly other parts of the code are still
         * using list_empty(&ns->ns_list_chain). This is why it is
-        * important to use list_del_init() here. */
+        * important to use list_del_init() here.
+        */
        list_del_init(&ns->ns_list_chain);
        ldlm_namespace_nr_dec(client);
        mutex_unlock(ldlm_namespace_lock(client));
@@ -1458,7 +1461,8 @@ static struct ldlm_resource *ldlm_resource_new(enum ldlm_type ldlm_type)
        lu_ref_init(&res->lr_reference);
 
        /* Since LVB init can be delayed now, there is no longer need to
-        * immediatelly acquire mutex here. */
+        * immediatelly acquire mutex here.
+        */
        mutex_init(&res->lr_lvb_mutex);
        res->lr_lvb_initialized = false;
 
@@ -1558,7 +1562,8 @@ found:
        /* Let's see if we happened to be the very first resource in this
         * namespace. If so, and this is a client namespace, we need to move
         * the namespace into the active namespaces list to be patrolled by
-        * the ldlm_poold. */
+        * the ldlm_poold.
+        */
        if (ns_is_client(ns) && ns_refcount == 1) {
                mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
                ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
@@ -1652,9 +1657,7 @@ static void __ldlm_resource_add_lock(struct ldlm_resource *res,
        ldlm_resource_dump(D_INFO, res);
 }
 
-/**
- * Add a lock into a given resource into specified lock list.
- */
+/* Add a lock into a given resource into specified lock list. */
 void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
                            struct ldlm_lock *lock)
 {
@@ -1663,9 +1666,7 @@ void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
        __ldlm_resource_add_lock(res, head, lock, true);
 }
 
-/**
- * Insert a lock into resource after specified lock.
- */
+/* Insert a lock into resource after specified lock. */
 void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
                                     struct ldlm_lock *new)
 {
@@ -1686,7 +1687,7 @@ void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
  * lock and insert after.
  */
 void ldlm_resource_insert_lock_before(struct ldlm_lock *original,
-                                      struct ldlm_lock *new)
+                                     struct ldlm_lock *new)
 {
        LASSERT(!list_empty(&original->l_res_link));
 
@@ -1724,10 +1725,7 @@ void ldlm_res2desc(struct ldlm_resource *res, struct ldlm_resource_desc *desc)
        desc->lr_name = res->lr_name;
 }
 
-/**
- * Print information about all locks in all namespaces on this node to debug
- * log.
- */
+/* Print info about all locks in all namespaces on this node to debug log. */
 void ldlm_dump_all_namespaces(enum ldlm_side client, int level)
 {
        struct ldlm_namespace *ns;
@@ -1756,10 +1754,7 @@ static int ldlm_res_hash_dump(struct cfs_hash *hs, struct cfs_hash_bd *bd,
        return 0;
 }
 
-/**
- * Print information about all locks in this namespace on this node to debug
- * log.
- */
+/* Print info about all locks in this namespace on this node to debug log. */
 void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
 {
        if (!((libcfs_debug | D_ERROR) & level))
@@ -1780,9 +1775,7 @@ void ldlm_namespace_dump(int level, struct ldlm_namespace *ns)
        spin_unlock(&ns->ns_lock);
 }
 
-/**
- * Print information about all locks in this resource to debug log.
- */
+/* Print information about all locks in this resource to debug log. */
 void ldlm_resource_dump(int level, struct ldlm_resource *res)
 {
        struct ldlm_lock *lock;