4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * lustre/osp/osp_object.c
31 * Lustre OST Proxy Device (OSP) is the agent on the local MDT for the OST
34 * OSP object attributes cache
35 * ---------------------------
36 * OSP object is the stub of the remote OST-object or MDT-object. Both the
37 * attribute and the extended attributes are stored on the peer side remotely.
38 * It is inefficient to send RPC to peer to fetch those attributes when every
39 * get_attr()/get_xattr() called. For a large system, the LFSCK synchronous
40 * mode scanning is prohibitively inefficient.
42 * So the OSP maintains the OSP object attributes cache to cache some
43 * attributes on the local MDT. The cache is organized against the OSP
46 * struct osp_xattr_entry {
47 * struct list_head oxe_list;
53 * unsigned int oxe_exist:1,
60 * struct lu_attr opo_attr;
61 * struct list_head opo_xattr_list;
62 * spinlock_t opo_lock;
66 * The basic attributes, such as owner/mode/flags, are stored in the
67 * osp_object::opo_attr. The extended attributes will be stored
68 * as osp_xattr_entry. Every extended attribute has an independent
69 * osp_xattr_entry, and all the osp_xattr_entry are linked into the
70 * osp_object::opo_xattr_list. The OSP object attributes cache
71 * is protected by the osp_object::opo_lock.
73 * Not all OSP objects have an attributes cache because maintaining
74 * the cache requires some resources. Currently, the OSP object
75 * attributes cache will be initialized when the attributes or the
76 * extended attributes are pre-fetched via osp_declare_attr_get()
77 * or osp_declare_xattr_get(). That is usually for LFSCK purpose,
78 * but it also can be shared by others.
81 * XXX: NOT prepare out RPC for remote transaction. ((please refer to the
82 * comment of osp_trans_create() for remote transaction)
84 * According to our current transaction/dt_object_lock framework (to make
85 * the cross-MDTs modification for DNE1 to be workable), the transaction
86 * sponsor will start the transaction firstly, then try to acquire related
87 * dt_object_lock if needed. Under such rules, if we want to prepare the
88 * OUT RPC in the transaction declare phase, then related attr/xattr
89 * should be known without dt_object_lock. But such condition maybe not
90 * true for some remote transaction case. For example:
92 * For linkEA repairing (by LFSCK) case, before the LFSCK thread obtained
93 * the dt_object_lock on the target MDT-object, it cannot know whether
94 * the MDT-object has linkEA or not, neither invalid or not.
96 * Since the LFSCK thread cannot hold dt_object_lock before the remote
97 * transaction start (otherwise there will be some potential deadlock),
98 * it cannot prepare related OUT RPC for repairing during the declare
99 * phase as other normal transactions do.
101 * To resolve the trouble, we will make OSP to prepare related OUT RPC
102 * after remote transaction started, and trigger the remote updating
103 * (send RPC) when trans_stop. Then the up layer users, such as LFSCK,
104 * can follow the general rule to handle trans_start/dt_object_lock
105 * for repairing linkEA inconsistency without distinguishing remote
108 * In fact, above solution for remote transaction should be the normal
109 * model without considering DNE1. The trouble brought by DNE1 will be
110 * resolved in DNE2. At that time, this patch can be removed.
113 * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
114 * Author: Mikhail Pershin <mike.tappro@intel.com>
117 #define DEBUG_SUBSYSTEM S_MDS
119 #include <lustre_obdo.h>
120 #include <lustre_swab.h>
122 #include "osp_internal.h"
124 static inline __u32 osp_dev2node(struct osp_device *osp)
126 return osp->opd_storage->dd_lu_dev.ld_site->ld_seq_site->ss_node_id;
129 static inline const char *osp_dto2name(struct osp_object *obj)
131 return obj->opo_obj.do_lu.lo_dev->ld_obd->obd_name;
134 static inline bool is_ost_obj(struct lu_object *lo)
136 return !lu2osp_dev(lo->lo_dev)->opd_connect_mdt;
139 static inline void __osp_oac_xattr_assignment(struct osp_object *obj,
140 struct osp_xattr_entry *oxe,
141 const struct lu_buf *buf)
144 memcpy(oxe->oxe_value, buf->lb_buf, buf->lb_len);
146 oxe->oxe_vallen = buf->lb_len;
152 * Assign FID to the OST object.
154 * This function will assign the FID to the OST object of a striped file.
156 * \param[in] env pointer to the thread context
157 * \param[in] d pointer to the OSP device
158 * \param[in] o pointer to the OSP object that the FID will be
161 static void osp_object_assign_fid(const struct lu_env *env,
162 struct osp_device *d, struct osp_object *o)
164 struct osp_thread_info *osi = osp_env_info(env);
166 LASSERT(fid_is_zero(lu_object_fid(&o->opo_obj.do_lu)));
167 LASSERT(o->opo_reserved);
170 osp_precreate_get_fid(env, d, &osi->osi_fid);
172 lu_object_assign_fid(env, &o->opo_obj.do_lu, &osi->osi_fid);
175 #define OXE_DEFAULT_LEN 16
178 * Release reference from the OSP object extended attribute entry.
180 * If it is the last reference, then free the entry.
182 * \param[in] oxe pointer to the OSP object extended attribute entry.
184 static inline void osp_oac_xattr_put(struct osp_xattr_entry *oxe)
186 if (atomic_dec_and_test(&oxe->oxe_ref)) {
187 LASSERT(list_empty(&oxe->oxe_list));
189 OBD_FREE_LARGE(oxe, oxe->oxe_buflen);
194 * Find the named extended attribute in the OSP object attributes cache.
196 * The caller should take the osp_object::opo_lock before calling
199 * \param[in] obj pointer to the OSP object
200 * \param[in] name the name of the extended attribute
201 * \param[in] namelen the name length of the extended attribute
203 * \retval pointer to the found extended attribute entry
204 * \retval NULL if the specified extended attribute is not
207 static struct osp_xattr_entry *
208 osp_oac_xattr_find_locked(struct osp_object *obj, const char *name,
211 struct osp_xattr_entry *oxe;
213 list_for_each_entry(oxe, &obj->opo_xattr_list, oxe_list) {
214 if (namelen == oxe->oxe_namelen &&
215 strncmp(name, oxe->oxe_buf, namelen) == 0)
223 * Find the named extended attribute in the OSP object attributes cache.
225 * Call osp_oac_xattr_find_locked() with the osp_object::opo_lock held.
227 * \param[in] obj pointer to the OSP object
228 * \param[in] name the name of the extended attribute
229 * \param[in] unlink true if the extended attribute entry is to be removed
232 * \retval pointer to the found extended attribute entry
233 * \retval NULL if the specified extended attribute is not
236 static struct osp_xattr_entry *osp_oac_xattr_find(struct osp_object *obj,
237 const char *name, bool unlink)
239 struct osp_xattr_entry *oxe = NULL;
241 spin_lock(&obj->opo_lock);
242 oxe = osp_oac_xattr_find_locked(obj, name, strlen(name));
245 list_del_init(&oxe->oxe_list);
247 atomic_inc(&oxe->oxe_ref);
249 spin_unlock(&obj->opo_lock);
255 * Find the named extended attribute in the OSP object attributes cache.
257 * If it is not in the cache, then add an empty entry (that will be
258 * filled later) to cache with the given name.
260 * \param[in] obj pointer to the OSP object
261 * \param[in] name the name of the extended attribute
262 * \param[in] len the length of the extended attribute value
264 * \retval pointer to the found or new-created extended
266 * \retval NULL if the specified extended attribute is not in the
267 * cache or fail to add new empty entry to the cache.
269 static struct osp_xattr_entry *
270 osp_oac_xattr_find_or_add(struct osp_object *obj, const char *name, size_t len)
272 struct osp_xattr_entry *oxe;
273 struct osp_xattr_entry *tmp = NULL;
274 size_t namelen = strlen(name);
275 size_t size = sizeof(*oxe) + namelen + 1 +
276 (len ? len : OXE_DEFAULT_LEN);
278 oxe = osp_oac_xattr_find(obj, name, false);
282 OBD_ALLOC_LARGE(oxe, size);
286 INIT_LIST_HEAD(&oxe->oxe_list);
287 oxe->oxe_buflen = size;
288 oxe->oxe_namelen = namelen;
289 memcpy(oxe->oxe_buf, name, namelen);
290 oxe->oxe_value = oxe->oxe_buf + namelen + 1;
291 /* One ref is for the caller, the other is for the entry on the list. */
292 atomic_set(&oxe->oxe_ref, 2);
294 spin_lock(&obj->opo_lock);
295 tmp = osp_oac_xattr_find_locked(obj, name, namelen);
297 list_add_tail(&oxe->oxe_list, &obj->opo_xattr_list);
299 atomic_inc(&tmp->oxe_ref);
300 spin_unlock(&obj->opo_lock);
303 OBD_FREE_LARGE(oxe, size);
311 * Assign the cached OST-object's EA with the given value.
313 * If the current EA entry in cache has not enough space to hold the new
314 * value, remove it, create a new one, then assign with the given value.
316 * \param[in] obj pointer to the OSP object
317 * \param[in] oxe pointer to the cached EA entry to be assigned
318 * \param[in] buf pointer to the buffer with new EA value
320 * \retval pointer to the new created EA entry in cache if
321 * current entry is not big enough; otherwise, the
322 * input 'oxe' will be returned.
324 static struct osp_xattr_entry *
325 osp_oac_xattr_assignment(struct osp_object *obj, struct osp_xattr_entry *oxe,
326 const struct lu_buf *buf)
328 struct osp_xattr_entry *new = NULL;
329 struct osp_xattr_entry *old = NULL;
330 int namelen = oxe->oxe_namelen;
331 size_t size = sizeof(*oxe) + namelen + 1 + buf->lb_len;
332 bool unlink_only = false;
334 if (oxe->oxe_buflen < size) {
335 OBD_ALLOC_LARGE(new, size);
337 INIT_LIST_HEAD(&new->oxe_list);
338 new->oxe_buflen = size;
339 new->oxe_namelen = namelen;
340 memcpy(new->oxe_buf, oxe->oxe_buf, namelen);
341 new->oxe_value = new->oxe_buf + namelen + 1;
342 /* One ref is for the caller,
343 * the other is for the entry on the list. */
344 atomic_set(&new->oxe_ref, 2);
345 __osp_oac_xattr_assignment(obj, new, buf);
348 CWARN("%s: cannot update cached xattr %.*s of "DFID"\n",
349 osp_dto2name(obj), namelen, oxe->oxe_buf,
350 PFID(lu_object_fid(&obj->opo_obj.do_lu)));
354 spin_lock(&obj->opo_lock);
355 old = osp_oac_xattr_find_locked(obj, oxe->oxe_buf, namelen);
358 /* Unlink the 'old'. */
359 list_del_init(&old->oxe_list);
361 /* Drop the ref for 'old' on list. */
362 osp_oac_xattr_put(old);
364 /* Drop the ref for current using. */
365 osp_oac_xattr_put(oxe);
368 /* Insert 'new' into list. */
369 list_add_tail(&new->oxe_list, &obj->opo_xattr_list);
370 } else if (unlink_only) {
371 /* Unlink the 'old'. */
372 list_del_init(&old->oxe_list);
374 /* Drop the ref for 'old' on list. */
375 osp_oac_xattr_put(old);
377 __osp_oac_xattr_assignment(obj, oxe, buf);
380 /* Drop the ref for current using. */
381 osp_oac_xattr_put(oxe);
384 /* Someone unlinked the 'old' by race,
385 * insert the 'new' one into list. */
386 list_add_tail(&new->oxe_list, &obj->opo_xattr_list);
388 spin_unlock(&obj->opo_lock);
394 * Parse the OSP object attribute from the RPC reply.
396 * If the attribute is valid, then it will be added to the OSP object
399 * \param[in] env pointer to the thread context
400 * \param[in] reply pointer to the RPC reply
401 * \param[in] req pointer to the RPC request
402 * \param[out] attr pointer to buffer to hold the output attribute
403 * \param[in] obj pointer to the OSP object
404 * \param[in] index the index of the attribute buffer in the reply
406 * \retval 0 for success
407 * \retval negative error number on failure
409 static int osp_get_attr_from_reply(const struct lu_env *env,
410 struct object_update_reply *reply,
411 struct ptlrpc_request *req,
412 struct lu_attr *attr,
413 struct osp_object *obj, int index)
415 struct osp_thread_info *osi = osp_env_info(env);
416 struct lu_buf *rbuf = &osi->osi_lb2;
417 struct obdo *lobdo = &osi->osi_obdo;
421 rc = object_update_result_data_get(reply, rbuf, index);
425 wobdo = rbuf->lb_buf;
426 if (rbuf->lb_len != sizeof(*wobdo))
429 LASSERT(req != NULL);
430 if (ptlrpc_req_need_swab(req))
431 lustre_swab_obdo(wobdo);
433 lustre_get_wire_obdo(NULL, lobdo, wobdo);
435 spin_lock(&obj->opo_lock);
436 la_from_obdo(&obj->opo_attr, lobdo, lobdo->o_valid);
437 spin_unlock(&obj->opo_lock);
440 la_from_obdo(attr, lobdo, lobdo->o_valid);
446 * Interpreter function for getting OSP object attribute asynchronously.
448 * Called to interpret the result of an async mode RPC for getting the
449 * OSP object attribute.
451 * \param[in] env pointer to the thread context
452 * \param[in] reply pointer to the RPC reply
453 * \param[in] req pointer to the RPC request
454 * \param[in] obj pointer to the OSP object
455 * \param[out] data pointer to buffer to hold the output attribute
456 * \param[in] index the index of the attribute buffer in the reply
457 * \param[in] rc the result for handling the RPC
459 * \retval 0 for success
460 * \retval negative error number on failure
462 static int osp_attr_get_interpterer(const struct lu_env *env,
463 struct object_update_reply *reply,
464 struct ptlrpc_request *req,
465 struct osp_object *obj,
466 void *data, int index, int rc)
468 struct lu_attr *attr = data;
471 osp2lu_obj(obj)->lo_header->loh_attr |= LOHA_EXISTS;
472 obj->opo_non_exist = 0;
474 return osp_get_attr_from_reply(env, reply, req, NULL, obj,
478 osp2lu_obj(obj)->lo_header->loh_attr &= ~LOHA_EXISTS;
479 obj->opo_non_exist = 1;
482 spin_lock(&obj->opo_lock);
484 spin_unlock(&obj->opo_lock);
491 * Implement OSP layer dt_object_operations::do_declare_attr_get() interface.
493 * Declare that the caller will get attribute from the specified OST object.
495 * This function adds an Object Unified Target (OUT) sub-request to the per-OSP
496 * based shared asynchronous request queue. The osp_attr_get_interpterer()
497 * is registered as the interpreter function to handle the result of this
500 * \param[in] env pointer to the thread context
501 * \param[in] dt pointer to the OSP layer dt_object
503 * \retval 0 for success
504 * \retval negative error number on failure
506 static int osp_declare_attr_get(const struct lu_env *env, struct dt_object *dt)
508 struct osp_object *obj = dt2osp_obj(dt);
509 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
512 mutex_lock(&osp->opd_async_requests_mutex);
513 rc = osp_insert_async_request(env, OUT_ATTR_GET, obj, 0, NULL, NULL,
514 &obj->opo_attr, sizeof(struct obdo),
515 osp_attr_get_interpterer);
516 mutex_unlock(&osp->opd_async_requests_mutex);
522 * Implement OSP layer dt_object_operations::do_attr_get() interface.
524 * Get attribute from the specified MDT/OST object.
526 * If the attribute is in the OSP object attributes cache, then return
527 * the cached attribute directly. Otherwise it will trigger an OUT RPC
528 * to the peer to get the attribute synchronously, if successful, add it
529 * to the OSP attributes cache. (\see lustre/osp/osp_trans.c for OUT RPC.)
531 * \param[in] env pointer to the thread context
532 * \param[in] dt pointer to the OSP layer dt_object
533 * \param[out] attr pointer to the buffer to hold the output attribute
535 * \retval 0 for success
536 * \retval negative error number on failure
538 int osp_attr_get(const struct lu_env *env, struct dt_object *dt,
539 struct lu_attr *attr)
541 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
542 struct osp_object *obj = dt2osp_obj(dt);
543 struct dt_device *dev = &osp->opd_dt_dev;
544 struct osp_update_request *update;
545 struct object_update_reply *reply;
546 struct ptlrpc_request *req = NULL;
547 int invalidated, cache = 0, rc = 0;
550 if (is_ost_obj(&dt->do_lu) && obj->opo_non_exist)
553 spin_lock(&obj->opo_lock);
554 if (obj->opo_attr.la_valid != 0 && !obj->opo_stale) {
555 *attr = obj->opo_attr;
556 spin_unlock(&obj->opo_lock);
560 spin_unlock(&obj->opo_lock);
562 update = osp_update_request_create(dev);
564 RETURN(PTR_ERR(update));
566 rc = OSP_UPDATE_RPC_PACK(env, out_attr_get_pack, update,
567 lu_object_fid(&dt->do_lu));
569 CERROR("%s: Insert update error "DFID": rc = %d\n",
570 dev->dd_lu_dev.ld_obd->obd_name,
571 PFID(lu_object_fid(&dt->do_lu)), rc);
576 invalidated = atomic_read(&obj->opo_invalidate_seq);
578 rc = osp_remote_sync(env, osp, update, &req);
580 down_read(&obj->opo_invalidate_sem);
581 if (invalidated == atomic_read(&obj->opo_invalidate_seq)) {
582 /* no invalited has came so far, we can cache the attrs */
588 osp2lu_obj(obj)->lo_header->loh_attr &= ~LOHA_EXISTS;
590 obj->opo_non_exist = 1;
592 CERROR("%s: osp_attr_get update error "DFID": rc = %d\n",
593 dev->dd_lu_dev.ld_obd->obd_name,
594 PFID(lu_object_fid(&dt->do_lu)), rc);
600 osp2lu_obj(obj)->lo_header->loh_attr |= LOHA_EXISTS;
601 obj->opo_non_exist = 0;
602 reply = req_capsule_server_sized_get(&req->rq_pill,
603 &RMF_OUT_UPDATE_REPLY,
604 OUT_UPDATE_REPLY_SIZE);
605 if (reply == NULL || reply->ourp_magic != UPDATE_REPLY_MAGIC)
606 GOTO(out, rc = -EPROTO);
608 rc = osp_get_attr_from_reply(env, reply, req, attr,
609 cache ? obj : NULL, 0);
613 spin_lock(&obj->opo_lock);
616 spin_unlock(&obj->opo_lock);
621 up_read(&obj->opo_invalidate_sem);
625 ptlrpc_req_finished(req);
627 osp_update_request_destroy(env, update);
633 * Implement OSP layer dt_object_operations::do_declare_attr_set() interface.
635 * If the transaction is not remote one, then declare the credits that will
636 * be used for the subsequent llog record for the object's attributes.
638 * \param[in] env pointer to the thread context
639 * \param[in] dt pointer to the OSP layer dt_object
640 * \param[in] attr pointer to the attribute to be set
641 * \param[in] th pointer to the transaction handler
643 * \retval 0 for success
644 * \retval negative error number on failure
646 static int osp_declare_attr_set(const struct lu_env *env, struct dt_object *dt,
647 const struct lu_attr *attr, struct thandle *th)
649 struct osp_device *d = lu2osp_dev(dt->do_lu.lo_dev);
650 struct osp_object *o = dt2osp_obj(dt);
653 if (is_only_remote_trans(th))
654 return osp_md_declare_attr_set(env, dt, attr, th);
656 * Usually we don't allow server stack to manipulate size
657 * but there is a special case when striping is created
658 * late, after stripeless file got truncated to non-zero.
660 * In this case we do the following:
662 * 1) grab id in declare - this can lead to leaked OST objects
663 * but we don't currently have proper mechanism and the only
664 * options we have are to do truncate RPC holding transaction
665 * open (very bad) or to grab id in declare at cost of leaked
666 * OST object in same very rare unfortunate case (just bad)
667 * notice 1.6-2.0 do assignment outside of running transaction
668 * all the time, meaning many more chances for leaked objects.
670 * 2) send synchronous truncate RPC with just assigned id
673 /* there are few places in MDD code still passing NULL
674 * XXX: to be fixed soon */
678 if (attr->la_valid & LA_SIZE && attr->la_size > 0 &&
679 fid_is_zero(lu_object_fid(&o->opo_obj.do_lu))) {
680 LASSERT(!dt_object_exists(dt));
681 osp_object_assign_fid(env, d, o);
682 rc = osp_object_truncate(env, dt, attr->la_size);
687 if (!(attr->la_valid & LA_REMOTE_ATTR_SET))
690 /* track all UID/GID, projid, and layout version changes via llog */
691 rc = osp_sync_declare_add(env, o, MDS_SETATTR64_REC, th);
697 * Implement OSP layer dt_object_operations::do_attr_set() interface.
699 * Set attribute to the specified OST object.
701 * If the transaction is a remote one, then add OUT_ATTR_SET sub-request
702 * in the OUT RPC that will be flushed when the remote transaction stop.
703 * Otherwise, it will generate a MDS_SETATTR64_REC record in the llog that
704 * will be handled by a dedicated thread asynchronously.
706 * If the attribute entry exists in the OSP object attributes cache,
707 * then update the cached attribute according to given attribute.
709 * \param[in] env pointer to the thread context
710 * \param[in] dt pointer to the OSP layer dt_object
711 * \param[in] attr pointer to the attribute to be set
712 * \param[in] th pointer to the transaction handler
714 * \retval 0 for success
715 * \retval negative error number on failure
717 static int osp_attr_set(const struct lu_env *env, struct dt_object *dt,
718 const struct lu_attr *attr, struct thandle *th)
720 struct osp_object *o = dt2osp_obj(dt);
724 /* we're interested in uid/gid/projid/layout version changes only */
725 if (!(attr->la_valid & LA_REMOTE_ATTR_SET))
728 if (!is_only_remote_trans(th)) {
729 if (attr->la_flags & LUSTRE_SET_SYNC_FL) {
730 struct ptlrpc_request *req = NULL;
731 struct osp_update_request *update = NULL;
732 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
734 update = osp_update_request_create(&osp->opd_dt_dev);
736 RETURN(PTR_ERR(update));
738 rc = OSP_UPDATE_RPC_PACK(env, out_attr_set_pack, update,
739 lu_object_fid(&dt->do_lu),
742 CERROR("%s: update error "DFID": rc = %d\n",
743 osp->opd_obd->obd_name,
744 PFID(lu_object_fid(&dt->do_lu)), rc);
746 osp_update_request_destroy(env, update);
750 rc = osp_remote_sync(env, osp, update, &req);
752 ptlrpc_req_finished(req);
754 osp_update_request_destroy(env, update);
756 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
758 rc = osp_sync_add(env, o, MDS_SETATTR64_REC, th, attr);
759 /* send layout version to OST ASAP */
760 if (attr->la_valid & LA_LAYOUT_VERSION)
761 wake_up(&osp->opd_sync_waitq);
762 /* XXX: send new uid/gid to OST ASAP? */
767 /* It is for OST-object attr_set directly without updating
768 * local MDT-object attribute. It is usually used by LFSCK. */
769 rc = osp_md_attr_set(env, dt, attr, th);
770 CDEBUG(D_INFO, "(1) set attr "DFID": rc = %d\n",
771 PFID(&dt->do_lu.lo_header->loh_fid), rc);
776 /* Update the OSP object attributes cache. */
778 spin_lock(&o->opo_lock);
779 if (attr->la_valid & LA_UID) {
780 la->la_uid = attr->la_uid;
781 la->la_valid |= LA_UID;
784 if (attr->la_valid & LA_GID) {
785 la->la_gid = attr->la_gid;
786 la->la_valid |= LA_GID;
788 if (attr->la_valid & LA_PROJID) {
789 la->la_projid = attr->la_projid;
790 la->la_valid |= LA_PROJID;
792 spin_unlock(&o->opo_lock);
799 * Interpreter function for getting OSP object extended attribute asynchronously
801 * Called to interpret the result of an async mode RPC for getting the
802 * OSP object extended attribute.
804 * \param[in] env pointer to the thread context
805 * \param[in] reply pointer to the RPC reply
806 * \param[in] req pointer to the RPC request
807 * \param[in] obj pointer to the OSP object
808 * \param[out] data pointer to OSP object attributes cache
809 * \param[in] index the index of the attribute buffer in the reply
810 * \param[in] rc the result for handling the RPC
812 * \retval 0 for success
813 * \retval negative error number on failure
815 static int osp_xattr_get_interpterer(const struct lu_env *env,
816 struct object_update_reply *reply,
817 struct ptlrpc_request *req,
818 struct osp_object *obj,
819 void *data, int index, int rc)
821 struct osp_xattr_entry *oxe = data;
823 spin_lock(&obj->opo_lock);
825 struct lu_buf *rbuf = &osp_env_info(env)->osi_lb2;
826 size_t len = sizeof(*oxe) + oxe->oxe_namelen + 1;
828 rc = object_update_result_data_get(reply, rbuf, index);
829 if (rc == -ENOENT || rc == -ENODATA || rc == 0) {
835 if (unlikely(rc < 0) ||
836 rbuf->lb_len > (oxe->oxe_buflen - len)) {
841 __osp_oac_xattr_assignment(obj, oxe, rbuf);
842 } else if (rc == -ENOENT || rc == -ENODATA) {
850 spin_unlock(&obj->opo_lock);
852 /* Put the reference obtained in the osp_declare_xattr_get(). */
853 osp_oac_xattr_put(oxe);
859 * Implement OSP dt_object_operations::do_declare_xattr_get() interface.
861 * Declare that the caller will get extended attribute from the specified
864 * This function will add an OUT_XATTR_GET sub-request to the per OSP
865 * based shared asynchronous request queue with the interpreter function:
866 * osp_xattr_get_interpterer().
868 * \param[in] env pointer to the thread context
869 * \param[in] dt pointer to the OSP layer dt_object
870 * \param[out] buf pointer to the lu_buf to hold the extended attribute
871 * \param[in] name the name for the expected extended attribute
873 * \retval 0 for success
874 * \retval negative error number on failure
876 static int osp_declare_xattr_get(const struct lu_env *env, struct dt_object *dt,
877 struct lu_buf *buf, const char *name)
879 struct osp_object *obj = dt2osp_obj(dt);
880 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
881 struct osp_xattr_entry *oxe;
885 LASSERT(buf != NULL);
886 LASSERT(name != NULL);
888 if (unlikely(buf->lb_len == 0))
891 oxe = osp_oac_xattr_find_or_add(obj, name, buf->lb_len);
895 len = strlen(name) + 1;
896 mutex_lock(&osp->opd_async_requests_mutex);
897 rc = osp_insert_async_request(env, OUT_XATTR_GET, obj, 1,
898 &len, (const void **)&name,
900 osp_xattr_get_interpterer);
902 mutex_unlock(&osp->opd_async_requests_mutex);
903 osp_oac_xattr_put(oxe);
905 struct osp_update_request *our;
906 struct osp_update_request_sub *ours;
908 /* XXX: Currently, we trigger the batched async OUT
909 * RPC via dt_declare_xattr_get(). It is not
910 * perfect solution, but works well now.
912 * We will improve it in the future. */
913 our = osp->opd_async_requests;
914 ours = osp_current_object_update_request(our);
915 if (ours != NULL && ours->ours_req != NULL &&
916 ours->ours_req->ourq_count > 0) {
917 osp->opd_async_requests = NULL;
918 mutex_unlock(&osp->opd_async_requests_mutex);
919 rc = osp_unplug_async_request(env, osp, our);
921 mutex_unlock(&osp->opd_async_requests_mutex);
929 * Implement OSP layer dt_object_operations::do_xattr_get() interface.
931 * Get extended attribute from the specified MDT/OST object.
933 * If the extended attribute is in the OSP object attributes cache, then
934 * return the cached extended attribute directly. Otherwise it will get
935 * the extended attribute synchronously, if successful, add it to the OSP
936 * attributes cache. (\see lustre/osp/osp_trans.c for OUT RPC.)
938 * There is a race condition: some other thread has added the named extended
939 * attributed entry to the OSP object attributes cache during the current
940 * OUT_XATTR_GET handling. If such case happens, the OSP will replace the
941 * (just) existing extended attribute entry with the new replied one.
943 * \param[in] env pointer to the thread context
944 * \param[in] dt pointer to the OSP layer dt_object
945 * \param[out] buf pointer to the lu_buf to hold the extended attribute
946 * \param[in] name the name for the expected extended attribute
948 * \retval 0 for success
949 * \retval negative error number on failure
951 int osp_xattr_get(const struct lu_env *env, struct dt_object *dt,
952 struct lu_buf *buf, const char *name)
954 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
955 struct osp_object *obj = dt2osp_obj(dt);
956 struct dt_device *dev = &osp->opd_dt_dev;
957 struct lu_buf *rbuf = &osp_env_info(env)->osi_lb2;
958 struct osp_update_request *update = NULL;
959 struct ptlrpc_request *req = NULL;
960 struct object_update_reply *reply;
961 struct osp_xattr_entry *oxe = NULL;
962 const char *dname = osp_dto2name(obj);
963 int invalidated, rc = 0;
966 LASSERT(buf != NULL);
967 LASSERT(name != NULL);
969 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_NETWORK) &&
970 osp->opd_index == cfs_fail_val) {
971 if (is_ost_obj(&dt->do_lu)) {
972 if (osp_dev2node(osp) == cfs_fail_val)
975 if (strcmp(name, XATTR_NAME_LINK) == 0)
980 if (unlikely(obj->opo_non_exist))
983 invalidated = atomic_read(&obj->opo_invalidate_seq);
985 oxe = osp_oac_xattr_find(obj, name, false);
987 spin_lock(&obj->opo_lock);
988 if (oxe->oxe_ready) {
990 GOTO(unlock, rc = -ENODATA);
992 if (buf->lb_buf == NULL)
993 GOTO(unlock, rc = oxe->oxe_vallen);
995 if (buf->lb_len < oxe->oxe_vallen)
996 GOTO(unlock, rc = -ERANGE);
998 memcpy(buf->lb_buf, oxe->oxe_value,
1001 GOTO(unlock, rc = oxe->oxe_vallen);
1004 spin_unlock(&obj->opo_lock);
1005 osp_oac_xattr_put(oxe);
1009 spin_unlock(&obj->opo_lock);
1011 update = osp_update_request_create(dev);
1013 GOTO(out_req, rc = PTR_ERR(update));
1015 rc = OSP_UPDATE_RPC_PACK(env, out_xattr_get_pack, update,
1016 lu_object_fid(&dt->do_lu), name, buf->lb_len);
1018 CERROR("%s: Insert update error "DFID": rc = %d\n",
1019 dname, PFID(lu_object_fid(&dt->do_lu)), rc);
1023 rc = osp_remote_sync(env, osp, update, &req);
1025 down_read(&obj->opo_invalidate_sem);
1026 if (invalidated != atomic_read(&obj->opo_invalidate_seq)) {
1027 /* invalidated has been requested, we can't cache the result */
1030 dt->do_lu.lo_header->loh_attr &= ~LOHA_EXISTS;
1033 reply = req_capsule_server_sized_get(&req->rq_pill,
1034 &RMF_OUT_UPDATE_REPLY,
1035 OUT_UPDATE_REPLY_SIZE);
1036 if (reply->ourp_magic != UPDATE_REPLY_MAGIC) {
1037 CERROR("%s: Wrong version %x expected %x "DFID
1038 ": rc = %d\n", dname, reply->ourp_magic,
1040 PFID(lu_object_fid(&dt->do_lu)), -EPROTO);
1041 GOTO(out, rc = -EPROTO);
1043 rc = object_update_result_data_get(reply, rbuf, 0);
1048 if (rc == -ENOENT) {
1049 dt->do_lu.lo_header->loh_attr &= ~LOHA_EXISTS;
1050 obj->opo_non_exist = 1;
1054 oxe = osp_oac_xattr_find_or_add(obj, name, buf->lb_len);
1057 CWARN("%s: Fail to add xattr (%s) to cache for "
1058 DFID" (1): rc = %d\n", dname, name,
1059 PFID(lu_object_fid(&dt->do_lu)), rc);
1064 spin_lock(&obj->opo_lock);
1065 if (rc == -ENOENT || rc == -ENODATA) {
1071 spin_unlock(&obj->opo_lock);
1076 reply = req_capsule_server_sized_get(&req->rq_pill,
1077 &RMF_OUT_UPDATE_REPLY,
1078 OUT_UPDATE_REPLY_SIZE);
1079 if (reply->ourp_magic != UPDATE_REPLY_MAGIC) {
1080 CERROR("%s: Wrong version %x expected %x "DFID": rc = %d\n",
1081 dname, reply->ourp_magic, UPDATE_REPLY_MAGIC,
1082 PFID(lu_object_fid(&dt->do_lu)), -EPROTO);
1084 GOTO(out, rc = -EPROTO);
1087 rc = object_update_result_data_get(reply, rbuf, 0);
1088 if (rc < 0 || rbuf->lb_len == 0) {
1089 if (oxe == NULL && rc == -ENODATA) {
1090 oxe = osp_oac_xattr_find_or_add(obj, name, buf->lb_len);
1093 CWARN("%s: Fail to add xattr (%s) to cache for "
1094 DFID" (1): rc = %d\n", dname, name,
1095 PFID(lu_object_fid(&dt->do_lu)), rc);
1101 spin_lock(&obj->opo_lock);
1102 if (unlikely(rc == -ENODATA)) {
1108 spin_unlock(&obj->opo_lock);
1114 /* For detecting EA size. */
1119 oxe = osp_oac_xattr_find_or_add(obj, name, rbuf->lb_len);
1121 CWARN("%s: Fail to add xattr (%s) to "
1122 "cache for "DFID" (2): rc = %d\n",
1123 dname, name, PFID(lu_object_fid(&dt->do_lu)), rc);
1129 oxe = osp_oac_xattr_assignment(obj, oxe, rbuf);
1134 up_read(&obj->opo_invalidate_sem);
1137 if (rc > 0 && buf->lb_buf) {
1138 if (unlikely(buf->lb_len < rbuf->lb_len))
1141 memcpy(buf->lb_buf, rbuf->lb_buf, rbuf->lb_len);
1145 ptlrpc_req_finished(req);
1147 if (update && !IS_ERR(update))
1148 osp_update_request_destroy(env, update);
1151 osp_oac_xattr_put(oxe);
1157 * Implement OSP layer dt_object_operations::do_declare_xattr_set() interface.
1159 * Declare that the caller will set extended attribute to the specified
1162 * If it is non-remote transaction, it will add an OUT_XATTR_SET sub-request
1163 * to the OUT RPC that will be flushed when the transaction start. And if the
1164 * OSP attributes cache is initialized, then check whether the name extended
1165 * attribute entry exists in the cache or not. If yes, replace it; otherwise,
1166 * add the extended attribute to the cache.
1168 * \param[in] env pointer to the thread context
1169 * \param[in] dt pointer to the OSP layer dt_object
1170 * \param[in] buf pointer to the lu_buf to hold the extended attribute
1171 * \param[in] name the name of the extended attribute to be set
1172 * \param[in] flag to indicate the detailed set operation: LU_XATTR_CREATE
1173 * or LU_XATTR_REPLACE or others
1174 * \param[in] th pointer to the transaction handler
1176 * \retval 0 for success
1177 * \retval negative error number on failure
1179 int osp_declare_xattr_set(const struct lu_env *env, struct dt_object *dt,
1180 const struct lu_buf *buf, const char *name,
1181 int flag, struct thandle *th)
1183 return osp_trans_update_request_create(th);
1187 * Implement OSP layer dt_object_operations::do_xattr_set() interface.
1189 * Set extended attribute to the specified MDT/OST object.
1191 * Add an OUT_XATTR_SET sub-request into the OUT RPC that will be flushed in
1192 * the transaction stop. And if the OSP attributes cache is initialized, then
1193 * check whether the name extended attribute entry exists in the cache or not.
1194 * If yes, replace it; otherwise, add the extended attribute to the cache.
1196 * \param[in] env pointer to the thread context
1197 * \param[in] dt pointer to the OSP layer dt_object
1198 * \param[in] buf pointer to the lu_buf to hold the extended attribute
1199 * \param[in] name the name of the extended attribute to be set
1200 * \param[in] fl to indicate the detailed set operation: LU_XATTR_CREATE
1201 * or LU_XATTR_REPLACE or others
1202 * \param[in] th pointer to the transaction handler
1204 * \retval 0 for success
1205 * \retval negative error number on failure
1207 int osp_xattr_set(const struct lu_env *env, struct dt_object *dt,
1208 const struct lu_buf *buf, const char *name, int fl,
1211 struct osp_object *o = dt2osp_obj(dt);
1212 struct osp_update_request *update;
1213 struct osp_xattr_entry *oxe;
1217 update = thandle_to_osp_update_request(th);
1218 LASSERT(update != NULL);
1220 CDEBUG(D_INODE, DFID" set xattr '%s' with size %zd\n",
1221 PFID(lu_object_fid(&dt->do_lu)), name, buf->lb_len);
1223 rc = OSP_UPDATE_RPC_PACK(env, out_xattr_set_pack, update,
1224 lu_object_fid(&dt->do_lu), buf, name, fl);
1228 /* Do not cache linkEA that may be self-adjusted by peers
1229 * under EA overflow case. */
1230 if (strcmp(name, XATTR_NAME_LINK) == 0) {
1231 oxe = osp_oac_xattr_find(o, name, true);
1233 osp_oac_xattr_put(oxe);
1238 oxe = osp_oac_xattr_find_or_add(o, name, buf->lb_len);
1240 CWARN("%s: cannot cache xattr '%s' of "DFID"\n",
1241 osp_dto2name(o), name, PFID(lu_object_fid(&dt->do_lu)));
1246 oxe = osp_oac_xattr_assignment(o, oxe, buf);
1248 osp_oac_xattr_put(oxe);
1254 * Implement OSP layer dt_object_operations::do_declare_xattr_del() interface.
1256 * Declare that the caller will delete extended attribute on the specified
1259 * If it is non-remote transaction, it will add an OUT_XATTR_DEL sub-request
1260 * to the OUT RPC that will be flushed when the transaction start. And if the
1261 * name extended attribute entry exists in the OSP attributes cache, then remove
1262 * it from the cache.
1264 * \param[in] env pointer to the thread context
1265 * \param[in] dt pointer to the OSP layer dt_object
1266 * \param[in] name the name of the extended attribute to be set
1267 * \param[in] th pointer to the transaction handler
1269 * \retval 0 for success
1270 * \retval negative error number on failure
1272 int osp_declare_xattr_del(const struct lu_env *env, struct dt_object *dt,
1273 const char *name, struct thandle *th)
1275 return osp_trans_update_request_create(th);
1279 * Implement OSP layer dt_object_operations::do_xattr_del() interface.
1281 * Delete extended attribute on the specified MDT/OST object.
1283 * If it is remote transaction, it will add an OUT_XATTR_DEL sub-request into
1284 * the OUT RPC that will be flushed when the transaction stop. And if the name
1285 * extended attribute entry exists in the OSP attributes cache, then remove it
1288 * \param[in] env pointer to the thread context
1289 * \param[in] dt pointer to the OSP layer dt_object
1290 * \param[in] name the name of the extended attribute to be set
1291 * \param[in] th pointer to the transaction handler
1293 * \retval 0 for success
1294 * \retval negative error number on failure
1296 int osp_xattr_del(const struct lu_env *env, struct dt_object *dt,
1297 const char *name, struct thandle *th)
1299 struct osp_update_request *update;
1300 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1301 struct osp_object *o = dt2osp_obj(dt);
1302 struct osp_xattr_entry *oxe;
1305 update = thandle_to_osp_update_request(th);
1306 LASSERT(update != NULL);
1308 rc = OSP_UPDATE_RPC_PACK(env, out_xattr_del_pack, update, fid, name);
1312 oxe = osp_oac_xattr_find(o, name, true);
1314 /* Drop the ref for entry on list. */
1315 osp_oac_xattr_put(oxe);
1320 void osp_obj_invalidate_cache(struct osp_object *obj)
1322 struct osp_xattr_entry *oxe;
1323 struct osp_xattr_entry *tmp;
1325 spin_lock(&obj->opo_lock);
1326 list_for_each_entry_safe(oxe, tmp, &obj->opo_xattr_list, oxe_list) {
1328 list_del_init(&oxe->oxe_list);
1329 osp_oac_xattr_put(oxe);
1331 obj->opo_attr.la_valid = 0;
1332 spin_unlock(&obj->opo_lock);
1336 * Implement OSP layer dt_object_operations::do_invalidate() interface.
1338 * Invalidate attributes cached on the specified MDT/OST object.
1340 * \param[in] env pointer to the thread context
1341 * \param[in] dt pointer to the OSP layer dt_object
1343 * \retval 0 for success
1344 * \retval negative error number on failure
1346 int osp_invalidate(const struct lu_env *env, struct dt_object *dt)
1348 struct osp_object *obj = dt2osp_obj(dt);
1351 CDEBUG(D_HA, "Invalidate osp_object "DFID"\n",
1352 PFID(lu_object_fid(&dt->do_lu)));
1354 /* serialize attr/EA set vs. invalidation */
1355 down_write(&obj->opo_invalidate_sem);
1357 /* this should invalidate all in-flights */
1358 atomic_inc(&obj->opo_invalidate_seq);
1360 spin_lock(&obj->opo_lock);
1361 /* do not mark new objects stale */
1362 if (obj->opo_attr.la_valid)
1364 obj->opo_non_exist = 0;
1365 spin_unlock(&obj->opo_lock);
1367 osp_obj_invalidate_cache(obj);
1369 up_write(&obj->opo_invalidate_sem);
1374 bool osp_check_stale(struct dt_object *dt)
1376 struct osp_object *obj = dt2osp_obj(dt);
1378 if (is_ost_obj(&dt->do_lu) && obj->opo_non_exist)
1381 return obj->opo_stale;
1386 * Implement OSP layer dt_object_operations::do_declare_create() interface.
1388 * Declare that the caller will create the OST object.
1390 * If the transaction is a remote transaction and the FID for the OST-object
1391 * has been assigned already, then handle it as creating (remote) MDT object
1392 * via osp_md_declare_create(). This function is usually used for LFSCK
1393 * to re-create the lost OST object. Otherwise, if it is not replay case, the
1394 * OSP will reserve pre-created object for the subsequent create operation;
1395 * if the MDT side cached pre-created objects are less than some threshold,
1396 * then it will wakeup the pre-create thread.
1398 * \param[in] env pointer to the thread context
1399 * \param[in] dt pointer to the OSP layer dt_object
1400 * \param[in] attr the attribute for the object to be created
1401 * \param[in] hint pointer to the hint for creating the object, such as
1403 * \param[in] dof pointer to the dt_object_format for help the creation
1404 * \param[in] th pointer to the transaction handler
1406 * \retval 0 for success
1407 * \retval negative error number on failure
1409 static int osp_declare_create(const struct lu_env *env, struct dt_object *dt,
1410 struct lu_attr *attr,
1411 struct dt_allocation_hint *hint,
1412 struct dt_object_format *dof, struct thandle *th)
1414 struct osp_thread_info *osi = osp_env_info(env);
1415 struct osp_device *d = lu2osp_dev(dt->do_lu.lo_dev);
1416 struct osp_object *o = dt2osp_obj(dt);
1417 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1418 struct thandle *local_th;
1423 if (is_only_remote_trans(th) && !fid_is_zero(fid)) {
1424 LASSERT(fid_is_sane(fid));
1426 rc = osp_md_declare_create(env, dt, attr, hint, dof, th);
1431 /* should happen to non-0 OSP only so that at least one object
1432 * has been already declared in the scenario and LOD should
1434 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_CREATE_FAIL) && d->opd_index == 1)
1437 LASSERT(d->opd_last_used_oid_file);
1440 * There can be gaps in precreated ids and record to unlink llog
1441 * XXX: we do not handle gaps yet, implemented before solution
1442 * was found to be racy, so we disabled that. there is no
1443 * point in making useless but expensive llog declaration.
1445 /* rc = osp_sync_declare_add(env, o, MDS_UNLINK64_REC, th); */
1447 local_th = osp_get_storage_thandle(env, th, d);
1448 if (IS_ERR(local_th))
1449 RETURN(PTR_ERR(local_th));
1451 if (unlikely(!fid_is_zero(fid))) {
1452 /* replay case: caller knows fid */
1453 osp_objid_buf_prep(&osi->osi_lb, &osi->osi_off, NULL,
1455 rc = dt_declare_record_write(env, d->opd_last_used_oid_file,
1456 &osi->osi_lb, osi->osi_off,
1462 * in declaration we need to reserve object so that we don't block
1463 * awaiting precreation RPC to complete
1465 rc = osp_precreate_reserve(env, d, !hint || hint->dah_can_block);
1467 * we also need to declare update to local "last used id" file for
1468 * recovery if object isn't used for a reason, we need to release
1469 * reservation, this can be made in osd_object_release()
1472 /* mark id is reserved: in create we don't want to talk
1474 LASSERT(o->opo_reserved == 0);
1475 o->opo_reserved = 1;
1477 /* common for all OSPs file hystorically */
1478 osp_objid_buf_prep(&osi->osi_lb, &osi->osi_off, NULL,
1480 rc = dt_declare_record_write(env, d->opd_last_used_oid_file,
1481 &osi->osi_lb, osi->osi_off,
1484 /* not needed in the cache anymore */
1485 set_bit(LU_OBJECT_HEARD_BANSHEE,
1486 &dt->do_lu.lo_header->loh_flags);
1492 * Implement OSP layer dt_object_operations::do_create() interface.
1494 * Create the OST object.
1496 * If the transaction is a remote transaction and the FID for the OST-object
1497 * has been assigned already, then handle it as handling MDT object via the
1498 * osp_md_create(). For other cases, the OSP will assign FID to the
1499 * object to be created, and update last_used Object ID (OID) file.
1501 * \param[in] env pointer to the thread context
1502 * \param[in] dt pointer to the OSP layer dt_object
1503 * \param[in] attr the attribute for the object to be created
1504 * \param[in] hint pointer to the hint for creating the object, such as
1506 * \param[in] dof pointer to the dt_object_format for help the creation
1507 * \param[in] th pointer to the transaction handler
1509 * \retval 0 for success
1510 * \retval negative error number on failure
1512 static int osp_create(const struct lu_env *env, struct dt_object *dt,
1513 struct lu_attr *attr, struct dt_allocation_hint *hint,
1514 struct dt_object_format *dof, struct thandle *th)
1516 struct osp_thread_info *osi = osp_env_info(env);
1517 struct osp_device *d = lu2osp_dev(dt->do_lu.lo_dev);
1518 struct osp_object *o = dt2osp_obj(dt);
1520 struct lu_fid *fid = &osi->osi_fid;
1521 struct thandle *local_th;
1524 if (is_only_remote_trans(th) &&
1525 !fid_is_zero(lu_object_fid(&dt->do_lu))) {
1526 LASSERT(fid_is_sane(lu_object_fid(&dt->do_lu)));
1528 rc = osp_md_create(env, dt, attr, hint, dof, th);
1530 o->opo_non_exist = 0;
1535 o->opo_non_exist = 0;
1536 if (o->opo_reserved) {
1537 /* regular case, fid is assigned holding transaction open */
1538 osp_object_assign_fid(env, d, o);
1541 memcpy(fid, lu_object_fid(&dt->do_lu), sizeof(*fid));
1543 LASSERTF(fid_is_sane(fid), "fid for osp_object %p is insane"DFID"!\n",
1546 if (!o->opo_reserved) {
1547 /* special case, id was assigned outside of transaction
1548 * see comments in osp_declare_attr_set */
1549 LASSERT(d->opd_pre != NULL);
1550 spin_lock(&d->opd_pre_lock);
1551 osp_update_last_fid(d, fid);
1552 spin_unlock(&d->opd_pre_lock);
1555 CDEBUG(D_INODE, "fid for osp_object %p is "DFID"\n", o, PFID(fid));
1557 /* If the precreate ends, it means it will be ready to rollover to
1558 * the new sequence soon, all the creation should be synchronized,
1559 * otherwise during replay, the replay fid will be inconsistent with
1560 * last_used/create fid */
1561 if (osp_precreate_end_seq(env, d) && osp_is_fid_client(d))
1564 local_th = osp_get_storage_thandle(env, th, d);
1565 if (IS_ERR(local_th))
1566 RETURN(PTR_ERR(local_th));
1568 * it's OK if the import is inactive by this moment - id was created
1569 * by OST earlier, we just need to maintain it consistently on the disk
1570 * once import is reconnected, OSP will claim this and other objects
1571 * used and OST either keep them, if they exist or recreate
1574 /* we might have lost precreated objects */
1575 if (unlikely(d->opd_gap_count) > 0) {
1576 LASSERT(d->opd_pre != NULL);
1577 spin_lock(&d->opd_pre_lock);
1578 if (d->opd_gap_count > 0) {
1579 int count = d->opd_gap_count;
1581 rc = ostid_set_id(&osi->osi_oi,
1582 fid_oid(&d->opd_gap_start_fid));
1584 spin_unlock(&d->opd_pre_lock);
1587 d->opd_gap_count = 0;
1588 spin_unlock(&d->opd_pre_lock);
1590 CDEBUG(D_HA, "Writing gap "DFID"+%d in llog\n",
1591 PFID(&d->opd_gap_start_fid), count);
1592 /* real gap handling is disabled intil ORI-692 will be
1593 * fixed, now we only report gaps */
1595 spin_unlock(&d->opd_pre_lock);
1599 /* Only need update last_used oid file, seq file will only be update
1600 * during seq rollover */
1601 osp_objid_buf_prep(&osi->osi_lb, &osi->osi_off,
1602 &d->opd_last_id, d->opd_index);
1604 rc = dt_record_write(env, d->opd_last_used_oid_file, &osi->osi_lb,
1605 &osi->osi_off, local_th);
1607 CDEBUG(D_HA, "%s: Wrote last used FID: "DFID", index %d: %d\n",
1608 d->opd_obd->obd_name, PFID(fid), d->opd_index, rc);
1614 * Implement OSP layer dt_object_operations::do_declare_destroy() interface.
1616 * Declare that the caller will destroy the specified OST object.
1618 * The OST object destroy will be handled via llog asynchronously. This
1619 * function will declare the credits for generating MDS_UNLINK64_REC llog.
1621 * \param[in] env pointer to the thread context
1622 * \param[in] dt pointer to the OSP layer dt_object to be destroyed
1623 * \param[in] th pointer to the transaction handler
1625 * \retval 0 for success
1626 * \retval negative error number on failure
1628 int osp_declare_destroy(const struct lu_env *env, struct dt_object *dt,
1631 struct osp_object *o = dt2osp_obj(dt);
1632 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
1637 LASSERT(!osp->opd_connect_mdt);
1639 if (!OBD_FAIL_CHECK(OBD_FAIL_LFSCK_LOST_MDTOBJ))
1640 rc = osp_sync_declare_add(env, o, MDS_UNLINK64_REC, th);
1646 * Implement OSP layer dt_object_operations::do_destroy() interface.
1648 * Destroy the specified OST object.
1650 * The OSP generates a MDS_UNLINK64_REC record in the llog. There
1651 * will be some dedicated thread to handle the llog asynchronously.
1653 * It also marks the object as non-cached.
1655 * \param[in] env pointer to the thread context
1656 * \param[in] dt pointer to the OSP layer dt_object to be destroyed
1657 * \param[in] th pointer to the transaction handler
1659 * \retval 0 for success
1660 * \retval negative error number on failure
1662 static int osp_destroy(const struct lu_env *env, struct dt_object *dt,
1665 struct osp_object *o = dt2osp_obj(dt);
1666 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
1671 o->opo_non_exist = 1;
1673 LASSERT(!osp->opd_connect_mdt);
1675 if (!OBD_FAIL_CHECK(OBD_FAIL_LFSCK_LOST_MDTOBJ)) {
1676 /* once transaction is committed put proper command on
1677 * the queue going to our OST. */
1678 rc = osp_sync_add(env, o, MDS_UNLINK64_REC, th, NULL);
1683 /* not needed in cache any more */
1684 set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
1689 static int osp_orphan_index_lookup(const struct lu_env *env,
1690 struct dt_object *dt,
1692 const struct dt_key *key)
1697 static int osp_orphan_index_declare_insert(const struct lu_env *env,
1698 struct dt_object *dt,
1699 const struct dt_rec *rec,
1700 const struct dt_key *key,
1701 struct thandle *handle)
1706 static int osp_orphan_index_insert(const struct lu_env *env,
1707 struct dt_object *dt,
1708 const struct dt_rec *rec,
1709 const struct dt_key *key,
1710 struct thandle *handle)
1715 static int osp_orphan_index_declare_delete(const struct lu_env *env,
1716 struct dt_object *dt,
1717 const struct dt_key *key,
1718 struct thandle *handle)
1723 static int osp_orphan_index_delete(const struct lu_env *env,
1724 struct dt_object *dt,
1725 const struct dt_key *key,
1726 struct thandle *handle)
1732 * Initialize the OSP layer index iteration.
1734 * \param[in] env pointer to the thread context
1735 * \param[in] dt pointer to the index object to be iterated
1736 * \param[in] attr unused
1738 * \retval pointer to the iteration structure
1739 * \retval negative error number on failure
1741 struct dt_it *osp_it_init(const struct lu_env *env, struct dt_object *dt,
1748 return ERR_PTR(-ENOMEM);
1750 it->ooi_pos_ent = -1;
1752 it->ooi_attr = attr;
1754 return (struct dt_it *)it;
1758 * Finalize the OSP layer index iteration.
1760 * \param[in] env pointer to the thread context
1761 * \param[in] di pointer to the iteration structure
1763 void osp_it_fini(const struct lu_env *env, struct dt_it *di)
1765 struct osp_it *it = (struct osp_it *)di;
1766 struct page **pages = it->ooi_pages;
1767 int npages = it->ooi_total_npages;
1770 if (pages != NULL) {
1771 for (i = 0; i < npages; i++) {
1772 if (pages[i] != NULL) {
1773 if (pages[i] == it->ooi_cur_page) {
1775 it->ooi_cur_page = NULL;
1777 __free_page(pages[i]);
1780 OBD_FREE_PTR_ARRAY(pages, npages);
1786 * Get more records for the iteration from peer.
1788 * The new records will be filled in an array of pages. The OSP side
1789 * allows 1MB bulk data to be transferred.
1791 * \param[in] env pointer to the thread context
1792 * \param[in] it pointer to the iteration structure
1794 * \retval 0 for success
1795 * \retval negative error number on failure
1797 static int osp_it_fetch(const struct lu_env *env, struct osp_it *it)
1799 struct lu_device *dev = it->ooi_obj->do_lu.lo_dev;
1800 struct osp_device *osp = lu2osp_dev(dev);
1801 struct page **pages;
1802 struct ptlrpc_request *req = NULL;
1803 struct ptlrpc_bulk_desc *desc;
1804 struct idx_info *ii;
1811 npages = min_t(unsigned int, OFD_MAX_BRW_SIZE, 1 << 20);
1812 npages /= PAGE_SIZE;
1814 OBD_ALLOC_PTR_ARRAY(pages, npages);
1818 it->ooi_pages = pages;
1819 it->ooi_total_npages = npages;
1820 for (i = 0; i < npages; i++) {
1821 pages[i] = alloc_page(GFP_NOFS);
1822 if (pages[i] == NULL)
1826 req = ptlrpc_request_alloc(osp->opd_obd->u.cli.cl_import,
1831 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, OBD_IDX_READ);
1833 ptlrpc_request_free(req);
1837 osp_set_req_replay(osp, req);
1838 req->rq_request_portal = OUT_PORTAL;
1839 ii = req_capsule_client_get(&req->rq_pill, &RMF_IDX_INFO);
1840 memset(ii, 0, sizeof(*ii));
1841 if (fid_is_last_id(lu_object_fid(&it->ooi_obj->do_lu))) {
1842 /* LFSCK will iterate orphan object[FID_SEQ_LAYOUT_BTREE,
1843 * ost_index, 0] with LAST_ID FID, so it needs to replace
1844 * the FID with orphan FID here */
1845 ii->ii_fid.f_seq = FID_SEQ_LAYOUT_RBTREE;
1846 ii->ii_fid.f_oid = osp->opd_index;
1847 ii->ii_fid.f_ver = 0;
1848 ii->ii_flags = II_FL_NOHASH;
1849 ii->ii_attrs = osp_dev2node(osp);
1851 ii->ii_fid = *lu_object_fid(&it->ooi_obj->do_lu);
1852 ii->ii_flags = II_FL_NOHASH | II_FL_NOKEY | II_FL_VARKEY |
1854 ii->ii_attrs = it->ooi_attr;
1856 ii->ii_magic = IDX_INFO_MAGIC;
1857 ii->ii_count = npages * LU_PAGE_COUNT;
1858 ii->ii_hash_start = it->ooi_next;
1860 ptlrpc_at_set_req_timeout(req);
1862 desc = ptlrpc_prep_bulk_imp(req, npages, 1,
1863 PTLRPC_BULK_PUT_SINK,
1865 &ptlrpc_bulk_kiov_pin_ops);
1867 GOTO(out, rc = -ENOMEM);
1869 for (i = 0; i < npages; i++)
1870 desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0,
1873 ptlrpc_request_set_replen(req);
1874 rc = ptlrpc_queue_wait(req);
1878 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk,
1879 req->rq_bulk->bd_nob_transferred);
1884 ii = req_capsule_server_get(&req->rq_pill, &RMF_IDX_INFO);
1885 if (ii->ii_magic != IDX_INFO_MAGIC)
1886 GOTO(out, rc = -EPROTO);
1888 npages = (ii->ii_count + LU_PAGE_COUNT - 1) >>
1889 (PAGE_SHIFT - LU_PAGE_SHIFT);
1890 if (npages > it->ooi_total_npages) {
1891 CERROR("%s: returned more pages than expected, %u > %u\n",
1892 osp->opd_obd->obd_name, npages, it->ooi_total_npages);
1893 GOTO(out, rc = -EINVAL);
1896 it->ooi_rec_size = ii->ii_recsize;
1897 it->ooi_valid_npages = npages;
1898 if (ptlrpc_rep_need_swab(req))
1901 it->ooi_next = ii->ii_hash_end;
1904 ptlrpc_req_finished(req);
1910 * Move the iteration cursor to the next lu_page.
1912 * One system page (PAGE_SIZE) may contain multiple lu_page (4KB),
1913 * that depends on the LU_PAGE_COUNT. If it is not the last lu_page
1914 * in current system page, then move the iteration cursor to the next
1915 * lu_page in current system page. Otherwise, if there are more system
1916 * pages in the cache, then move the iteration cursor to the next system
1917 * page. If all the cached records (pages) have been iterated, then fetch
1918 * more records via osp_it_fetch().
1920 * \param[in] env pointer to the thread context
1921 * \param[in] di pointer to the iteration structure
1923 * \retval positive for end of the directory
1924 * \retval 0 for success
1925 * \retval negative error number on failure
1927 int osp_it_next_page(const struct lu_env *env, struct dt_it *di)
1929 struct osp_it *it = (struct osp_it *)di;
1930 struct lu_idxpage *idxpage;
1931 struct page **pages;
1937 idxpage = it->ooi_cur_idxpage;
1938 if (idxpage != NULL) {
1939 if (idxpage->lip_nr == 0)
1942 if (it->ooi_pos_ent < idxpage->lip_nr) {
1943 CDEBUG(D_INFO, "ooi_pos %d nr %d\n",
1944 (int)it->ooi_pos_ent, (int)idxpage->lip_nr);
1947 it->ooi_cur_idxpage = NULL;
1948 it->ooi_pos_lu_page++;
1951 if (it->ooi_pos_lu_page < LU_PAGE_COUNT) {
1952 it->ooi_cur_idxpage = (void *)it->ooi_cur_page +
1953 LU_PAGE_SIZE * it->ooi_pos_lu_page;
1955 lustre_swab_lip_header(it->ooi_cur_idxpage);
1956 if (it->ooi_cur_idxpage->lip_magic != LIP_MAGIC) {
1957 struct osp_device *osp =
1958 lu2osp_dev(it->ooi_obj->do_lu.lo_dev);
1960 CERROR("%s: invalid magic (%x != %x) for page "
1961 "%d/%d while read layout orphan index\n",
1962 osp->opd_obd->obd_name,
1963 it->ooi_cur_idxpage->lip_magic,
1964 LIP_MAGIC, it->ooi_pos_page,
1965 it->ooi_pos_lu_page);
1966 /* Skip this lu_page next time. */
1967 it->ooi_pos_ent = idxpage->lip_nr - 1;
1970 it->ooi_pos_ent = -1;
1974 kunmap(it->ooi_cur_page);
1975 it->ooi_cur_page = NULL;
1979 pages = it->ooi_pages;
1980 if (it->ooi_pos_page < it->ooi_valid_npages) {
1981 it->ooi_cur_page = kmap(pages[it->ooi_pos_page]);
1982 it->ooi_pos_lu_page = 0;
1986 for (i = 0; i < it->ooi_total_npages; i++) {
1987 if (pages[i] != NULL)
1988 __free_page(pages[i]);
1990 OBD_FREE_PTR_ARRAY(pages, it->ooi_total_npages);
1992 it->ooi_pos_page = 0;
1993 it->ooi_total_npages = 0;
1994 it->ooi_valid_npages = 0;
1997 it->ooi_cur_page = NULL;
1998 it->ooi_cur_idxpage = NULL;
1999 it->ooi_pages = NULL;
2002 if (it->ooi_next == II_END_OFF)
2005 rc = osp_it_fetch(env, it);
2013 * Move the iteration cursor to the next record.
2015 * If there are more records in the lu_page, then move the iteration
2016 * cursor to the next record directly. Otherwise, move the iteration
2017 * cursor to the record in the next lu_page via osp_it_next_page()
2019 * \param[in] env pointer to the thread context
2020 * \param[in] di pointer to the iteration structure
2022 * \retval positive for end of the directory
2023 * \retval 0 for success
2024 * \retval negative error number on failure
2026 static int osp_orphan_it_next(const struct lu_env *env, struct dt_it *di)
2028 struct osp_it *it = (struct osp_it *)di;
2029 struct lu_idxpage *idxpage;
2034 idxpage = it->ooi_cur_idxpage;
2035 if (idxpage != NULL) {
2036 if (idxpage->lip_nr == 0)
2040 if (it->ooi_pos_ent < idxpage->lip_nr) {
2041 if (it->ooi_rec_size ==
2042 sizeof(struct lu_orphan_rec_v3)) {
2044 (struct lu_orphan_ent_v3 *)idxpage->lip_entries+
2047 lustre_swab_orphan_ent_v3(it->ooi_ent);
2048 } else if (it->ooi_rec_size ==
2049 sizeof(struct lu_orphan_rec_v2)) {
2051 (struct lu_orphan_ent_v2 *)idxpage->lip_entries+
2054 lustre_swab_orphan_ent_v2(it->ooi_ent);
2057 (struct lu_orphan_ent *)idxpage->lip_entries +
2060 lustre_swab_orphan_ent(it->ooi_ent);
2066 rc = osp_it_next_page(env, di);
2073 int osp_it_get(const struct lu_env *env, struct dt_it *di,
2074 const struct dt_key *key)
2079 void osp_it_put(const struct lu_env *env, struct dt_it *di)
2083 static struct dt_key *osp_orphan_it_key(const struct lu_env *env,
2084 const struct dt_it *di)
2086 struct osp_it *it = (struct osp_it *)di;
2087 struct lu_orphan_ent *ent = (struct lu_orphan_ent *)it->ooi_ent;
2089 if (likely(ent != NULL))
2090 return (struct dt_key *)(&ent->loe_key);
2095 static int osp_orphan_it_key_size(const struct lu_env *env,
2096 const struct dt_it *di)
2098 return sizeof(struct lu_fid);
2101 static int osp_orphan_it_rec(const struct lu_env *env, const struct dt_it *di,
2102 struct dt_rec *rec, __u32 attr)
2104 struct osp_it *it = (struct osp_it *)di;
2106 if (likely(it->ooi_ent)) {
2107 if (it->ooi_rec_size == sizeof(struct lu_orphan_rec_v3)) {
2108 struct lu_orphan_ent_v3 *ent =
2109 (struct lu_orphan_ent_v3 *)it->ooi_ent;
2111 *(struct lu_orphan_rec_v3 *)rec = ent->loe_rec;
2112 } else if (it->ooi_rec_size ==
2113 sizeof(struct lu_orphan_rec_v2)) {
2114 struct lu_orphan_ent_v2 *ent =
2115 (struct lu_orphan_ent_v2 *)it->ooi_ent;
2117 *(struct lu_orphan_rec_v2 *)rec = ent->loe_rec;
2119 struct lu_orphan_ent *ent =
2120 (struct lu_orphan_ent *)it->ooi_ent;
2122 *(struct lu_orphan_rec *)rec = ent->loe_rec;
2130 __u64 osp_it_store(const struct lu_env *env, const struct dt_it *di)
2132 struct osp_it *it = (struct osp_it *)di;
2134 return it->ooi_next;
2138 * Locate the iteration cursor to the specified position (cookie).
2140 * \param[in] env pointer to the thread context
2141 * \param[in] di pointer to the iteration structure
2142 * \param[in] hash the specified position
2144 * \retval positive number for locating to the exactly position
2146 * \retval 0 for arriving at the end of the iteration
2147 * \retval negative error number on failure
2149 int osp_orphan_it_load(const struct lu_env *env, const struct dt_it *di,
2152 struct osp_it *it = (struct osp_it *)di;
2155 it->ooi_next = hash;
2156 rc = osp_orphan_it_next(env, (struct dt_it *)di);
2166 int osp_it_key_rec(const struct lu_env *env, const struct dt_it *di,
2172 static const struct dt_index_operations osp_orphan_index_ops = {
2173 .dio_lookup = osp_orphan_index_lookup,
2174 .dio_declare_insert = osp_orphan_index_declare_insert,
2175 .dio_insert = osp_orphan_index_insert,
2176 .dio_declare_delete = osp_orphan_index_declare_delete,
2177 .dio_delete = osp_orphan_index_delete,
2179 .init = osp_it_init,
2180 .fini = osp_it_fini,
2181 .next = osp_orphan_it_next,
2184 .key = osp_orphan_it_key,
2185 .key_size = osp_orphan_it_key_size,
2186 .rec = osp_orphan_it_rec,
2187 .store = osp_it_store,
2188 .load = osp_orphan_it_load,
2189 .key_rec = osp_it_key_rec,
2194 * Implement OSP layer dt_object_operations::do_index_try() interface.
2196 * Negotiate the index type.
2198 * If the target index is an IDIF object, then use osp_orphan_index_ops.
2199 * Otherwise, assign osp_md_index_ops to the dt_object::do_index_ops.
2200 * (\see lustre/include/lustre_fid.h for IDIF.)
2202 * \param[in] env pointer to the thread context
2203 * \param[in] dt pointer to the OSP layer dt_object
2204 * \param[in] feat unused
2206 * \retval 0 for success
2208 static int osp_index_try(const struct lu_env *env,
2209 struct dt_object *dt,
2210 const struct dt_index_features *feat)
2212 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2214 if (fid_is_last_id(fid) && fid_is_idif(fid))
2215 dt->do_index_ops = &osp_orphan_index_ops;
2217 dt->do_index_ops = &osp_md_index_ops;
2221 static const struct dt_object_operations osp_obj_ops = {
2222 .do_declare_attr_get = osp_declare_attr_get,
2223 .do_attr_get = osp_attr_get,
2224 .do_declare_attr_set = osp_declare_attr_set,
2225 .do_attr_set = osp_attr_set,
2226 .do_declare_xattr_get = osp_declare_xattr_get,
2227 .do_xattr_get = osp_xattr_get,
2228 .do_declare_xattr_set = osp_declare_xattr_set,
2229 .do_xattr_set = osp_xattr_set,
2230 .do_declare_create = osp_declare_create,
2231 .do_create = osp_create,
2232 .do_declare_destroy = osp_declare_destroy,
2233 .do_destroy = osp_destroy,
2234 .do_index_try = osp_index_try,
2238 * Implement OSP layer lu_object_operations::loo_object_init() interface.
2240 * Initialize the object.
2242 * If it is a remote MDT object, then call do_attr_get() to fetch
2243 * the attribute from the peer.
2245 * \param[in] env pointer to the thread context
2246 * \param[in] o pointer to the OSP layer lu_object
2247 * \param[in] conf unused
2249 * \retval 0 for success
2250 * \retval negative error number on failure
2252 static int osp_object_init(const struct lu_env *env, struct lu_object *o,
2253 const struct lu_object_conf *conf)
2255 struct osp_object *po = lu2osp_obj(o);
2259 spin_lock_init(&po->opo_lock);
2260 o->lo_header->loh_attr |= LOHA_REMOTE;
2261 INIT_LIST_HEAD(&po->opo_xattr_list);
2262 INIT_LIST_HEAD(&po->opo_invalidate_cb_list);
2263 init_rwsem(&po->opo_invalidate_sem);
2265 if (is_ost_obj(o)) {
2266 po->opo_obj.do_ops = &osp_obj_ops;
2268 struct lu_attr *la = &osp_env_info(env)->osi_attr;
2270 po->opo_obj.do_ops = &osp_md_obj_ops;
2271 po->opo_obj.do_body_ops = &osp_md_body_ops;
2273 if (conf != NULL && conf->loc_flags & LOC_F_NEW) {
2274 po->opo_non_exist = 1;
2276 rc = po->opo_obj.do_ops->do_attr_get(env, lu2dt_obj(o),
2279 o->lo_header->loh_attr |=
2280 LOHA_EXISTS | (la->la_mode & S_IFMT);
2281 if (rc == -ENOENT) {
2282 po->opo_non_exist = 1;
2286 init_rwsem(&po->opo_sem);
2291 static void osp_object_free_rcu(struct rcu_head *head)
2293 struct osp_object *obj = container_of(head, struct osp_object,
2294 opo_header.loh_rcu);
2296 kmem_cache_free(osp_object_kmem, obj);
2300 * Implement OSP layer lu_object_operations::loo_object_free() interface.
2302 * Finalize the object.
2304 * If the OSP object has attributes cache, then destroy the cache.
2305 * Free the object finally.
2307 * \param[in] env pointer to the thread context
2308 * \param[in] o pointer to the OSP layer lu_object
2310 static void osp_object_free(const struct lu_env *env, struct lu_object *o)
2312 struct osp_object *obj = lu2osp_obj(o);
2313 struct lu_object_header *h = o->lo_header;
2314 struct osp_xattr_entry *oxe;
2315 struct osp_xattr_entry *tmp;
2318 dt_object_fini(&obj->opo_obj);
2319 lu_object_header_fini(h);
2320 list_for_each_entry_safe(oxe, tmp, &obj->opo_xattr_list, oxe_list) {
2321 list_del(&oxe->oxe_list);
2322 count = atomic_read(&oxe->oxe_ref);
2323 LASSERTF(count == 1,
2324 "Still has %d users on the xattr entry %.*s\n",
2325 count-1, (int)oxe->oxe_namelen, oxe->oxe_buf);
2327 OBD_FREE_LARGE(oxe, oxe->oxe_buflen);
2329 OBD_FREE_PRE(obj, sizeof(*obj), "slab-freed");
2330 call_rcu(&obj->opo_header.loh_rcu, osp_object_free_rcu);
2334 * Implement OSP layer lu_object_operations::loo_object_release() interface.
2336 * Cleanup (not free) the object.
2338 * If it is a reserved object but failed to be created, or it is an OST
2339 * object, then mark the object as non-cached.
2341 * \param[in] env pointer to the thread context
2342 * \param[in] o pointer to the OSP layer lu_object
2344 static void osp_object_release(const struct lu_env *env, struct lu_object *o)
2346 struct osp_object *po = lu2osp_obj(o);
2347 struct osp_device *d = lu2osp_dev(o->lo_dev);
2352 * release reservation if object was declared but not created
2353 * this may require lu_object_put() in LOD
2355 if (unlikely(po->opo_reserved)) {
2356 LASSERT(d->opd_pre != NULL);
2357 LASSERT(d->opd_pre_reserved > 0);
2358 spin_lock(&d->opd_pre_lock);
2359 d->opd_pre_reserved--;
2360 spin_unlock(&d->opd_pre_lock);
2363 * Check that osp_precreate_cleanup_orphans is not blocked
2364 * due to opd_pre_reserved > 0.
2366 if (unlikely(d->opd_pre_reserved == 0 &&
2367 (d->opd_pre_recovering || d->opd_pre_status)))
2368 wake_up(&d->opd_pre_waitq);
2370 /* not needed in cache any more */
2371 set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
2375 /* XXX: Currently, NOT cache OST-object on MDT because:
2376 * 1. it is not often accessed on MDT.
2377 * 2. avoid up layer (such as LFSCK) to load too many
2378 * once-used OST-objects. */
2379 set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
2384 static int osp_object_print(const struct lu_env *env, void *cookie,
2385 lu_printer_t p, const struct lu_object *l)
2387 const struct osp_object *o = lu2osp_obj((struct lu_object *)l);
2389 return (*p)(env, cookie, LUSTRE_OSP_NAME"-object@%p", o);
2392 static int osp_object_invariant(const struct lu_object *o)
2397 const struct lu_object_operations osp_lu_obj_ops = {
2398 .loo_object_init = osp_object_init,
2399 .loo_object_free = osp_object_free,
2400 .loo_object_release = osp_object_release,
2401 .loo_object_print = osp_object_print,
2402 .loo_object_invariant = osp_object_invariant