/* XXX - if ldlm_lock_new() can sleep we should
* release the lr_lock, allocate the new lock,
* and restart processing this lock. */
- if (!new2) {
- unlock_res_and_lock(req);
+ if (new2 == NULL) {
+ unlock_res_and_lock(req);
new2 = ldlm_lock_create(ns, &res->lr_name, LDLM_FLOCK,
lock->l_granted_mode, &null_cbs,
NULL, 0, LVB_T_NONE);
- lock_res_and_lock(req);
- if (!new2) {
- ldlm_flock_destroy(req, lock->l_granted_mode,
- *flags);
- *err = -ENOLCK;
- RETURN(LDLM_ITER_STOP);
- }
- goto reprocess;
- }
+ lock_res_and_lock(req);
+ if (IS_ERR(new2)) {
+ ldlm_flock_destroy(req, lock->l_granted_mode,
+ *flags);
+ *err = PTR_ERR(new2);
+ RETURN(LDLM_ITER_STOP);
+ }
+ goto reprocess;
+ }
splitted = 1;
type = oldres->lr_type;
unlock_res_and_lock(lock);
- newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
- if (newres == NULL)
- RETURN(-ENOMEM);
+ newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
+ if (IS_ERR(newres))
+ RETURN(PTR_ERR(newres));
lu_ref_add(&newres->lr_reference, "lock", lock);
/*
mode = old_lock->l_req_mode;
}
- res = ldlm_resource_get(ns, NULL, res_id, type, 0);
- if (res == NULL) {
- LASSERT(old_lock == NULL);
- RETURN(0);
- }
+ res = ldlm_resource_get(ns, NULL, res_id, type, 0);
+ if (IS_ERR(res)) {
+ LASSERT(old_lock == NULL);
+ RETURN(0);
+ }
LDLM_RESOURCE_ADDREF(res);
lock_res(res);
* Returns a referenced lock
*/
struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- ldlm_type_t type,
- ldlm_mode_t mode,
- const struct ldlm_callback_suite *cbs,
+ const struct ldlm_res_id *res_id,
+ ldlm_type_t type,
+ ldlm_mode_t mode,
+ const struct ldlm_callback_suite *cbs,
void *data, __u32 lvb_len,
enum lvb_type lvb_type)
{
- struct ldlm_lock *lock;
- struct ldlm_resource *res;
- ENTRY;
-
- res = ldlm_resource_get(ns, NULL, res_id, type, 1);
- if (res == NULL)
- RETURN(NULL);
+ struct ldlm_lock *lock;
+ struct ldlm_resource *res;
+ int rc;
+ ENTRY;
- lock = ldlm_lock_new(res);
+ res = ldlm_resource_get(ns, NULL, res_id, type, 1);
+ if (IS_ERR(res))
+ RETURN(ERR_CAST(res));
- if (lock == NULL)
- RETURN(NULL);
+ lock = ldlm_lock_new(res);
+ if (lock == NULL)
+ RETURN(ERR_PTR(-ENOMEM));
lock->l_req_mode = mode;
lock->l_ast_data = data;
lock->l_pid = current_pid();
if (ns_is_server(ns))
ldlm_set_ns_srv(lock);
- if (cbs) {
- lock->l_blocking_ast = cbs->lcs_blocking;
- lock->l_completion_ast = cbs->lcs_completion;
- lock->l_glimpse_ast = cbs->lcs_glimpse;
- }
-
- lock->l_tree_node = NULL;
- /* if this is the extent lock, allocate the interval tree node */
- if (type == LDLM_EXTENT) {
- if (ldlm_interval_alloc(lock) == NULL)
- GOTO(out, 0);
- }
+ if (cbs) {
+ lock->l_blocking_ast = cbs->lcs_blocking;
+ lock->l_completion_ast = cbs->lcs_completion;
+ lock->l_glimpse_ast = cbs->lcs_glimpse;
+ }
- if (lvb_len) {
- lock->l_lvb_len = lvb_len;
- OBD_ALLOC_LARGE(lock->l_lvb_data, lvb_len);
- if (lock->l_lvb_data == NULL)
- GOTO(out, 0);
- }
+ lock->l_tree_node = NULL;
+ /* if this is the extent lock, allocate the interval tree node */
+ if (type == LDLM_EXTENT)
+ if (ldlm_interval_alloc(lock) == NULL)
+ GOTO(out, rc = -ENOMEM);
+
+ if (lvb_len) {
+ lock->l_lvb_len = lvb_len;
+ OBD_ALLOC_LARGE(lock->l_lvb_data, lvb_len);
+ if (lock->l_lvb_data == NULL)
+ GOTO(out, rc = -ENOMEM);
+ }
lock->l_lvb_type = lvb_type;
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK))
- GOTO(out, 0);
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK))
+ GOTO(out, rc = -ENOENT);
- RETURN(lock);
+ RETURN(lock);
out:
- ldlm_lock_destroy(lock);
- LDLM_LOCK_RELEASE(lock);
- return NULL;
+ ldlm_lock_destroy(lock);
+ LDLM_LOCK_RELEASE(lock);
+ RETURN(ERR_PTR(rc));
}
/**
}
}
- /* The lock's callback data might be set in the policy function */
- lock = ldlm_lock_create(ns, &dlm_req->lock_desc.l_resource.lr_name,
- dlm_req->lock_desc.l_resource.lr_type,
- dlm_req->lock_desc.l_req_mode,
+ /* The lock's callback data might be set in the policy function */
+ lock = ldlm_lock_create(ns, &dlm_req->lock_desc.l_resource.lr_name,
+ dlm_req->lock_desc.l_resource.lr_type,
+ dlm_req->lock_desc.l_req_mode,
cbs, NULL, 0, LVB_T_NONE);
- if (!lock)
- GOTO(out, rc = -ENOMEM);
+ if (IS_ERR(lock))
+ GOTO(out, rc = PTR_ERR(lock));
lock->l_last_activity = cfs_time_current_sec();
lock->l_remote_handle = dlm_req->lock_handle[0];
/* The LOCK_CHANGED code in ldlm_lock_enqueue depends on this
* ldlm_reprocess_all. If this moves, revisit that code. -phil */
- if (lock) {
- LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
- "(err=%d, rc=%d)", err, rc);
+ if (!IS_ERR(lock)) {
+ LDLM_DEBUG(lock, "server-side enqueue handler, sending reply"
+ "(err=%d, rc=%d)", err, rc);
- if (rc == 0) {
+ if (rc == 0) {
if (req_capsule_has_field(&req->rq_pill, &RMF_DLM_LVB,
RCL_SERVER) &&
ldlm_lvbo_size(lock) > 0) {
lock = ldlm_lock_create(ns, res_id, type, mode, &cbs, data, lvb_len,
lvb_type);
- if (unlikely(!lock))
- GOTO(out_nolock, err = -ENOMEM);
+ if (IS_ERR(lock))
+ GOTO(out_nolock, err = PTR_ERR(lock));
ldlm_lock2handle(lock, lockh);
* that needs to be performed.
*/
int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
- int version, int opc, int canceloff,
- cfs_list_t *cancels, int count)
-{
- struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
- struct req_capsule *pill = &req->rq_pill;
- struct ldlm_request *dlm = NULL;
- int flags, avail, to_free, pack = 0;
- CFS_LIST_HEAD(head);
- int rc;
- ENTRY;
+ int version, int opc, int canceloff,
+ struct list_head *cancels, int count)
+ {
+ struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
+ struct req_capsule *pill = &req->rq_pill;
+ struct ldlm_request *dlm = NULL;
+ struct list_head head = LIST_HEAD_INIT(head);
+ int flags, avail, to_free, pack = 0;
+ int rc;
+ ENTRY;
- if (cancels == NULL)
- cancels = &head;
+ if (cancels == NULL)
+ cancels = &head;
if (ns_connect_cancelset(ns)) {
/* Estimate the amount of available space in the request. */
req_capsule_filled_sizes(pill, RCL_CLIENT);
lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
einfo->ei_mode, &cbs, einfo->ei_cbdata,
lvb_len, lvb_type);
- if (lock == NULL)
- RETURN(-ENOMEM);
+ if (IS_ERR(lock))
+ RETURN(PTR_ERR(lock));
/* for the local lock, add the reference */
ldlm_lock_addref_internal(lock, einfo->ei_mode);
ldlm_lock2handle(lock, lockh);
int ldlm_cli_cancel(struct lustre_handle *lockh,
ldlm_cancel_flags_t cancel_flags)
{
- struct obd_export *exp;
+ struct obd_export *exp;
int avail, flags, count = 1;
__u64 rc = 0;
- struct ldlm_namespace *ns;
- struct ldlm_lock *lock;
- CFS_LIST_HEAD(cancels);
- ENTRY;
+ struct ldlm_namespace *ns;
+ struct ldlm_lock *lock;
+ struct list_head cancels = LIST_HEAD_INIT(cancels);
+ ENTRY;
/* concurrent cancels on the same handle can happen */
lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING);
* Return the number of cancelled locks.
*/
int ldlm_cli_cancel_list_local(cfs_list_t *cancels, int count,
- ldlm_cancel_flags_t flags)
+ ldlm_cancel_flags_t flags)
{
- CFS_LIST_HEAD(head);
- struct ldlm_lock *lock, *next;
+ struct list_head head = LIST_HEAD_INIT(head);
+ struct ldlm_lock *lock, *next;
int left = 0, bl_ast = 0;
__u64 rc;
ldlm_cancel_flags_t cancel_flags,
int flags)
{
- CFS_LIST_HEAD(cancels);
+ struct list_head cancels = LIST_HEAD_INIT(cancels);
int count, rc;
ENTRY;
* If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
* to notify the server. */
int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- ldlm_policy_data_t *policy,
- ldlm_mode_t mode,
- ldlm_cancel_flags_t flags,
- void *opaque)
+ const struct ldlm_res_id *res_id,
+ ldlm_policy_data_t *policy,
+ ldlm_mode_t mode,
+ ldlm_cancel_flags_t flags,
+ void *opaque)
{
- struct ldlm_resource *res;
- CFS_LIST_HEAD(cancels);
- int count;
- int rc;
- ENTRY;
+ struct ldlm_resource *res;
+ struct list_head cancels = LIST_HEAD_INIT(cancels);
+ int count;
+ int rc;
+ ENTRY;
- res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
- if (res == NULL) {
- /* This is not a problem. */
- CDEBUG(D_INFO, "No resource "LPU64"\n", res_id->name[0]);
- RETURN(0);
- }
+ res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
+ if (IS_ERR(res)) {
+ /* This is not a problem. */
+ CDEBUG(D_INFO, "No resource "LPU64"\n", res_id->name[0]);
+ RETURN(0);
+ }
- LDLM_RESOURCE_ADDREF(res);
- count = ldlm_cancel_resource_local(res, &cancels, policy, mode,
- 0, flags | LCF_BL_AST, opaque);
- rc = ldlm_cli_cancel_list(&cancels, count, NULL, flags);
- if (rc != ELDLM_OK)
+ LDLM_RESOURCE_ADDREF(res);
+ count = ldlm_cancel_resource_local(res, &cancels, policy, mode,
+ 0, flags | LCF_BL_AST, opaque);
+ rc = ldlm_cli_cancel_list(&cancels, count, NULL, flags);
+ if (rc != ELDLM_OK)
CERROR("canceling unused lock "DLDLMRES": rc = %d\n",
PLDLMRES(res), rc);
- LDLM_RESOURCE_DELREF(res);
- ldlm_resource_putref(res);
- RETURN(0);
+ LDLM_RESOURCE_DELREF(res);
+ ldlm_resource_putref(res);
+ RETURN(0);
}
EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
* < 0: errors
*/
int ldlm_resource_iterate(struct ldlm_namespace *ns,
- const struct ldlm_res_id *res_id,
- ldlm_iterator_t iter, void *data)
+ const struct ldlm_res_id *res_id,
+ ldlm_iterator_t iter, void *data)
{
- struct ldlm_resource *res;
- int rc;
- ENTRY;
+ struct ldlm_resource *res;
+ int rc;
+ ENTRY;
- if (ns == NULL) {
- CERROR("must pass in namespace\n");
- LBUG();
- }
+ LASSERTF(ns != NULL, "must pass in namespace\n");
- res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
- if (res == NULL)
- RETURN(0);
+ res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
+ if (IS_ERR(res))
+ RETURN(0);
- LDLM_RESOURCE_ADDREF(res);
- rc = ldlm_resource_foreach(res, iter, data);
- LDLM_RESOURCE_DELREF(res);
- ldlm_resource_putref(res);
- RETURN(rc);
+ LDLM_RESOURCE_ADDREF(res);
+ rc = ldlm_resource_foreach(res, iter, data);
+ LDLM_RESOURCE_DELREF(res);
+ ldlm_resource_putref(res);
+ RETURN(rc);
}
EXPORT_SYMBOL(ldlm_resource_iterate);
*/
static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
{
- int canceled;
- CFS_LIST_HEAD(cancels);
+ int canceled;
+ struct list_head cancels = LIST_HEAD_INIT(cancels);
- CDEBUG(D_DLMTRACE, "Dropping as many unused locks as possible before"
- "replay for namespace %s (%d)\n",
- ldlm_ns_name(ns), ns->ns_nr_unused);
+ CDEBUG(D_DLMTRACE, "Dropping as many unused locks as possible before"
+ "replay for namespace %s (%d)\n",
+ ldlm_ns_name(ns), ns->ns_nr_unused);
- /* We don't need to care whether or not LRU resize is enabled
- * because the LDLM_CANCEL_NO_WAIT policy doesn't use the
- * count parameter */
- canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0,
- LCF_LOCAL, LDLM_CANCEL_NO_WAIT);
+ /* We don't need to care whether or not LRU resize is enabled
+ * because the LDLM_CANCEL_NO_WAIT policy doesn't use the
+ * count parameter */
+ canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0,
+ LCF_LOCAL, LDLM_CANCEL_NO_WAIT);
- CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n",
- canceled, ldlm_ns_name(ns));
+ CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n",
+ canceled, ldlm_ns_name(ns));
}
int ldlm_replay_locks(struct obd_import *imp)
{
struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
- CFS_LIST_HEAD(list);
+ struct list_head list = LIST_HEAD_INIT(list);
struct ldlm_lock *lock, *next;
int rc = 0;
ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
const struct ldlm_res_id *name, ldlm_type_t type, int create)
{
- cfs_hlist_node_t *hnode;
- struct ldlm_resource *res;
- cfs_hash_bd_t bd;
- __u64 version;
- int ns_refcount = 0;
+ struct hlist_node *hnode;
+ struct ldlm_resource *res = NULL;
+ cfs_hash_bd_t bd;
+ __u64 version;
+ int ns_refcount = 0;
LASSERT(ns != NULL);
LASSERT(parent == NULL);
hnode = cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
if (hnode != NULL) {
cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
- res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
- /* Synchronize with regard to resource creation. */
- if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
- mutex_lock(&res->lr_lvb_mutex);
- mutex_unlock(&res->lr_lvb_mutex);
- }
-
- if (unlikely(res->lr_lvb_len < 0)) {
- ldlm_resource_putref(res);
- res = NULL;
- }
- return res;
- }
+ GOTO(lvbo_init, res);
+ }
- version = cfs_hash_bd_version_get(&bd);
- cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
+ version = cfs_hash_bd_version_get(&bd);
+ cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 0);
- if (create == 0)
- return NULL;
+ if (create == 0)
+ return ERR_PTR(-ENOENT);
- LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
- "type: %d\n", type);
- res = ldlm_resource_new();
- if (!res)
- return NULL;
+ LASSERTF(type >= LDLM_MIN_TYPE && type < LDLM_MAX_TYPE,
+ "type: %d\n", type);
+ res = ldlm_resource_new();
+ if (res == NULL)
+ return ERR_PTR(-ENOMEM);
- res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
- res->lr_name = *name;
- res->lr_type = type;
- res->lr_most_restr = LCK_NL;
+ res->lr_ns_bucket = cfs_hash_bd_extra_get(ns->ns_rs_hash, &bd);
+ res->lr_name = *name;
+ res->lr_type = type;
+ res->lr_most_restr = LCK_NL;
- cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
- hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL :
- cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
+ cfs_hash_bd_lock(ns->ns_rs_hash, &bd, 1);
+ hnode = (version == cfs_hash_bd_version_get(&bd)) ? NULL :
+ cfs_hash_bd_lookup_locked(ns->ns_rs_hash, &bd, (void *)name);
- if (hnode != NULL) {
+ if (hnode != NULL) {
/* Someone won the race and already added the resource. */
cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
/* Clean lu_ref for failed resource. */
/* We have taken lr_lvb_mutex. Drop it. */
mutex_unlock(&res->lr_lvb_mutex);
OBD_SLAB_FREE(res, ldlm_resource_slab, sizeof *res);
-
- res = cfs_hlist_entry(hnode, struct ldlm_resource, lr_hash);
+lvbo_init:
+ res = hlist_entry(hnode, struct ldlm_resource, lr_hash);
/* Synchronize with regard to resource creation. */
if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
mutex_lock(&res->lr_lvb_mutex);
if (unlikely(res->lr_lvb_len < 0)) {
ldlm_resource_putref(res);
- res = NULL;
+ res = ERR_PTR(res->lr_lvb_len);
}
return res;
}
res->lr_lvb_len = rc;
mutex_unlock(&res->lr_lvb_mutex);
ldlm_resource_putref(res);
- return NULL;
+ return ERR_PTR(rc);
}
}
fid_build_reg_res_name(fid, &res_id);
res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
- if(res == NULL)
+ if (IS_ERR(res))
RETURN(0);
lock_res(res);
__u64 bits)
{
struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
- ldlm_policy_data_t policy = {{0}};
- struct ldlm_res_id res_id;
- struct ldlm_resource *res;
- int count;
- ENTRY;
+ ldlm_policy_data_t policy = { {0} };
+ struct ldlm_res_id res_id;
+ struct ldlm_resource *res;
+ int count;
+ ENTRY;
/* Return, i.e. cancel nothing, only if ELC is supported (flag in
* export) but disabled through procfs (flag in NS).
if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
RETURN(0);
- fid_build_reg_res_name(fid, &res_id);
- res = ldlm_resource_get(exp->exp_obd->obd_namespace,
- NULL, &res_id, 0, 0);
- if (res == NULL)
- RETURN(0);
- LDLM_RESOURCE_ADDREF(res);
- /* Initialize ibits lock policy. */
- policy.l_inodebits.bits = bits;
- count = ldlm_cancel_resource_local(res, cancels, &policy,
- mode, 0, 0, NULL);
- LDLM_RESOURCE_DELREF(res);
- ldlm_resource_putref(res);
- RETURN(count);
+ fid_build_reg_res_name(fid, &res_id);
+ res = ldlm_resource_get(exp->exp_obd->obd_namespace,
+ NULL, &res_id, 0, 0);
+ if (IS_ERR(res))
+ RETURN(0);
+ LDLM_RESOURCE_ADDREF(res);
+ /* Initialize ibits lock policy. */
+ policy.l_inodebits.bits = bits;
+ count = ldlm_cancel_resource_local(res, cancels, &policy,
+ mode, 0, 0, NULL);
+ LDLM_RESOURCE_DELREF(res);
+ ldlm_resource_putref(res);
+ RETURN(count);
}
int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
* to go... deadlock! */
res = ldlm_resource_get(ofd->ofd_namespace, NULL,
&tsi->tsi_resid, LDLM_EXTENT, 0);
- if (res != NULL) {
+ if (!IS_ERR(res)) {
ldlm_res_lvbo_update(res, NULL, 0);
ldlm_resource_putref(res);
}
* to go... deadlock! */
res = ldlm_resource_get(ns, NULL, &tsi->tsi_resid,
LDLM_EXTENT, 0);
- if (res != NULL) {
+ if (!IS_ERR(res)) {
ldlm_res_lvbo_update(res, NULL, 0);
ldlm_resource_putref(res);
}
ost_fid_build_resid(fid, &info->fti_resid);
rs = ldlm_resource_get(ns, NULL, &info->fti_resid,
LDLM_EXTENT, 0);
- if (rs != NULL) {
+ if (!IS_ERR(rs)) {
ns->ns_lvbo->lvbo_update(rs, NULL, 1);
ldlm_resource_putref(rs);
}
* for 2nd lu_object_find() waiting for the first reference
* to go... deadlock! */
res = ldlm_resource_get(ns, NULL, &info->fti_resid, LDLM_EXTENT, 0);
- if (res != NULL) {
+ if (!IS_ERR(res)) {
ldlm_res_lvbo_update(res, NULL, 0);
ldlm_resource_putref(res);
}
ostid_build_res_name(&oa->o_oi, &res_id);
res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
- if (res == NULL)
+ if (IS_ERR(res))
RETURN(0);
LDLM_RESOURCE_ADDREF(res);
fid_build_reg_res_name(&qti->qti_fid, &qti->qti_resid);
res = ldlm_resource_get(pool->qpi_qmt->qmt_ns, NULL, &qti->qti_resid,
LDLM_PLAIN, 0);
- if (res == NULL) {
+ if (IS_ERR(res)) {
/* this might happen if no slaves have enqueued global quota
* locks yet */
LQUOTA_DEBUG(lqe, "failed to lookup ldlm resource associated "
fid_build_quota_res_name(&qti->qti_fid, &lqe->lqe_id, &qti->qti_resid);
res = ldlm_resource_get(qmt->qmt_ns, NULL, &qti->qti_resid, LDLM_PLAIN,
0);
- if (res == NULL) {
+ if (IS_ERR(res)) {
/* this might legitimately happens if slaves haven't had the
* opportunity to enqueue quota lock yet. */
LQUOTA_DEBUG(lqe, "failed to lookup ldlm resource for per-ID "