4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * lustre/osp/osp_object.c
31 * Lustre OST Proxy Device (OSP) is the agent on the local MDT for the OST
34 * OSP object attributes cache
35 * ---------------------------
36 * OSP object is the stub of the remote OST-object or MDT-object. Both the
37 * attribute and the extended attributes are stored on the peer side remotely.
38 * It is inefficient to send RPC to peer to fetch those attributes when every
39 * get_attr()/get_xattr() called. For a large system, the LFSCK synchronous
40 * mode scanning is prohibitively inefficient.
42 * The OSP maintains the OSP object attributes cache to cache some
43 * attributes on the local MDT.
45 * The basic attributes, such as owner/mode/flags, are stored in the
46 * osp_object::opo_attr. The extended attributes will be stored
47 * as osp_xattr_entry. Every extended attribute has an independent
48 * osp_xattr_entry, and all the osp_xattr_entry are linked into the
49 * osp_object::opo_xattr_list. The OSP object attributes cache
50 * is protected by the osp_object::opo_lock.
52 * Not all OSP objects have an attributes cache because maintaining
53 * the cache requires some resources. Currently, the OSP object
54 * attributes cache will be initialized when the attributes or the
55 * extended attributes are pre-fetched via osp_declare_attr_get()
56 * or osp_declare_xattr_get(). That is usually for LFSCK purpose,
57 * but it also can be shared by others.
60 * XXX: NOT prepare out RPC for remote transaction. ((please refer to the
61 * comment of osp_trans_create() for remote transaction)
63 * According to our current transaction/dt_object_lock framework (to make
64 * the cross-MDTs modification for DNE1 to be workable), the transaction
65 * sponsor will start the transaction firstly, then try to acquire related
66 * dt_object_lock if needed. Under such rules, if we want to prepare the
67 * OUT RPC in the transaction declare phase, then related attr/xattr
68 * should be known without dt_object_lock. But such condition maybe not
69 * true for some remote transaction case. For example:
71 * For linkEA repairing (by LFSCK) case, before the LFSCK thread obtained
72 * the dt_object_lock on the target MDT-object, it cannot know whether
73 * the MDT-object has linkEA or not, neither invalid or not.
75 * Since the LFSCK thread cannot hold dt_object_lock before the remote
76 * transaction start (otherwise there will be some potential deadlock),
77 * it cannot prepare related OUT RPC for repairing during the declare
78 * phase as other normal transactions do.
80 * To resolve the trouble, we will make OSP to prepare related OUT RPC
81 * after remote transaction started, and trigger the remote updating
82 * (send RPC) when trans_stop. Then the up layer users, such as LFSCK,
83 * can follow the general rule to handle trans_start/dt_object_lock
84 * for repairing linkEA inconsistency without distinguishing remote
87 * In fact, above solution for remote transaction should be the normal
88 * model without considering DNE1. The trouble brought by DNE1 will be
89 * resolved in DNE2. At that time, this patch can be removed.
92 * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
93 * Author: Mikhail Pershin <mike.tappro@intel.com>
96 #define DEBUG_SUBSYSTEM S_MDS
98 #include <lustre_obdo.h>
99 #include <lustre_swab.h>
101 #include "osp_internal.h"
103 static inline __u32 osp_dev2node(struct osp_device *osp)
105 return osp->opd_storage->dd_lu_dev.ld_site->ld_seq_site->ss_node_id;
108 static inline const char *osp_dto2name(struct osp_object *obj)
110 return obj->opo_obj.do_lu.lo_dev->ld_obd->obd_name;
113 static inline bool is_ost_obj(struct lu_object *lo)
115 return !lu2osp_dev(lo->lo_dev)->opd_connect_mdt;
118 static inline void __osp_oac_xattr_assignment(struct osp_object *obj,
119 struct osp_xattr_entry *oxe,
120 const struct lu_buf *buf)
123 memcpy(oxe->oxe_value, buf->lb_buf, buf->lb_len);
125 oxe->oxe_vallen = buf->lb_len;
131 * Assign FID to the OST object.
133 * This function will assign the FID to the OST object of a striped file.
135 * \param[in] env pointer to the thread context
136 * \param[in] d pointer to the OSP device
137 * \param[in] o pointer to the OSP object that the FID will be
140 static void osp_object_assign_fid(const struct lu_env *env,
141 struct osp_device *d, struct osp_object *o)
143 struct osp_thread_info *osi = osp_env_info(env);
145 LASSERT(fid_is_zero(lu_object_fid(&o->opo_obj.do_lu)));
146 LASSERT(o->opo_reserved);
149 osp_precreate_get_fid(env, d, &osi->osi_fid);
151 lu_object_assign_fid(env, &o->opo_obj.do_lu, &osi->osi_fid);
154 #define OXE_DEFAULT_LEN 16
157 * Allocate osp_xattr_entry.
159 * If total size exceeds PAGE_SIZE, name and value will allocated in a
160 * separate buf, otherwise it's allocated inline.
162 * \param[in] name pointer to XATTR name
163 * \param[in] namelen XATTR name len
164 * \param[in] vallen XATTR value len
165 * \retval oxe pointer on success
166 * \retval NULL on failure
168 static struct osp_xattr_entry *osp_oac_xattr_alloc(const char *name,
172 struct osp_xattr_entry *oxe;
176 vallen = OXE_DEFAULT_LEN;
177 size = sizeof(*oxe) + namelen + 1 + vallen;
178 if (likely(size <= PAGE_SIZE)) {
179 OBD_ALLOC(oxe, size);
182 oxe->oxe_buflen = size;
183 oxe->oxe_value = oxe->oxe_name + namelen + 1;
187 OBD_ALLOC_LARGE(buf, vallen);
192 OBD_ALLOC(oxe, size);
193 if (unlikely(!oxe)) {
197 oxe->oxe_buflen = vallen;
198 oxe->oxe_value = buf;
199 oxe->oxe_largebuf = 1;
202 INIT_LIST_HEAD(&oxe->oxe_list);
204 oxe->oxe_namelen = namelen;
205 memcpy(oxe->oxe_name, name, namelen);
206 /* One ref is for the caller, the other is for the entry on the list. */
207 atomic_set(&oxe->oxe_ref, 2);
212 static void osp_oac_xattr_free(struct osp_xattr_entry *oxe)
214 LASSERT(list_empty(&oxe->oxe_list));
215 if (unlikely(oxe->oxe_largebuf)) {
216 OBD_FREE_LARGE(oxe->oxe_value, oxe->oxe_buflen);
217 OBD_FREE(oxe, sizeof(*oxe) + oxe->oxe_namelen + 1);
219 OBD_FREE(oxe, oxe->oxe_buflen);
224 * Release reference from the OSP object extended attribute entry.
226 * If it is the last reference, then free the entry.
228 * \param[in] oxe pointer to the OSP object extended attribute entry.
230 static inline void osp_oac_xattr_put(struct osp_xattr_entry *oxe)
232 if (atomic_dec_and_test(&oxe->oxe_ref))
233 osp_oac_xattr_free(oxe);
237 * Find the named extended attribute in the OSP object attributes cache.
239 * The caller should take the osp_object::opo_lock before calling
242 * \param[in] obj pointer to the OSP object
243 * \param[in] name the name of the extended attribute
244 * \param[in] namelen the name length of the extended attribute
246 * \retval pointer to the found extended attribute entry
247 * \retval NULL if the specified extended attribute is not
250 static struct osp_xattr_entry *
251 osp_oac_xattr_find_locked(struct osp_object *obj, const char *name,
254 struct osp_xattr_entry *oxe;
256 list_for_each_entry(oxe, &obj->opo_xattr_list, oxe_list) {
257 if (namelen == oxe->oxe_namelen &&
258 strncmp(name, oxe->oxe_name, namelen) == 0)
266 * Find the named extended attribute in the OSP object attributes cache.
268 * Call osp_oac_xattr_find_locked() with the osp_object::opo_lock held.
270 * \param[in] obj pointer to the OSP object
271 * \param[in] name the name of the extended attribute
272 * \param[in] unlink true if the extended attribute entry is to be removed
275 * \retval pointer to the found extended attribute entry
276 * \retval NULL if the specified extended attribute is not
279 static struct osp_xattr_entry *osp_oac_xattr_find(struct osp_object *obj,
280 const char *name, bool unlink)
282 struct osp_xattr_entry *oxe = NULL;
284 spin_lock(&obj->opo_lock);
285 oxe = osp_oac_xattr_find_locked(obj, name, strlen(name));
288 list_del_init(&oxe->oxe_list);
290 atomic_inc(&oxe->oxe_ref);
292 spin_unlock(&obj->opo_lock);
298 * Find the named extended attribute in the OSP object attributes cache.
300 * If it is not in the cache, then add an empty entry (that will be
301 * filled later) to cache with the given name.
303 * \param[in] obj pointer to the OSP object
304 * \param[in] name the name of the extended attribute
305 * \param[in] len the length of the extended attribute value
307 * \retval pointer to the found or new-created extended
309 * \retval NULL if the specified extended attribute is not in the
310 * cache or fail to add new empty entry to the cache.
312 static struct osp_xattr_entry *
313 osp_oac_xattr_find_or_add(struct osp_object *obj, const char *name, size_t len)
315 struct osp_xattr_entry *oxe;
316 struct osp_xattr_entry *tmp = NULL;
317 size_t namelen = strlen(name);
319 oxe = osp_oac_xattr_find(obj, name, false);
323 oxe = osp_oac_xattr_alloc(name, namelen, len);
327 spin_lock(&obj->opo_lock);
328 tmp = osp_oac_xattr_find_locked(obj, name, namelen);
330 list_add_tail(&oxe->oxe_list, &obj->opo_xattr_list);
332 atomic_inc(&tmp->oxe_ref);
333 spin_unlock(&obj->opo_lock);
336 osp_oac_xattr_free(oxe);
344 * Check whether \a oxe is large enough to hold the xattr value
346 * \param[in] oxe pointer to the OSP object attributes cache xattr entry
347 * \param[in] len xattr value size in bytes
349 * \retval true if xattr can fit in \a oxe
350 * \retval false if xattr can not fit in \a oxe
352 static inline bool oxe_can_hold(struct osp_xattr_entry *oxe, size_t len)
354 if (unlikely(oxe->oxe_largebuf))
355 return oxe->oxe_buflen >= len;
357 return oxe->oxe_buflen - oxe->oxe_namelen - 1 - sizeof(*oxe) >= len;
361 * Assign the cached OST-object's EA with the given value.
363 * If the current EA entry in cache has not enough space to hold the new
364 * value, remove it, create a new one, then assign with the given value.
366 * \param[in] obj pointer to the OSP object
367 * \param[in] oxe pointer to the cached EA entry to be assigned
368 * \param[in] buf pointer to the buffer with new EA value
370 * \retval pointer to the new created EA entry in cache if
371 * current entry is not big enough; otherwise, the
372 * input 'oxe' will be returned.
374 static struct osp_xattr_entry *
375 osp_oac_xattr_assignment(struct osp_object *obj, struct osp_xattr_entry *oxe,
376 const struct lu_buf *buf)
378 struct osp_xattr_entry *new = NULL;
379 struct osp_xattr_entry *old = NULL;
380 int namelen = oxe->oxe_namelen;
381 bool unlink_only = false;
383 if (!oxe_can_hold(oxe, buf->lb_len)) {
384 new = osp_oac_xattr_alloc(oxe->oxe_name, namelen, buf->lb_len);
386 __osp_oac_xattr_assignment(obj, new, buf);
389 CWARN("%s: cannot update cached xattr %.*s of "DFID"\n",
390 osp_dto2name(obj), namelen, oxe->oxe_name,
391 PFID(lu_object_fid(&obj->opo_obj.do_lu)));
395 spin_lock(&obj->opo_lock);
396 old = osp_oac_xattr_find_locked(obj, oxe->oxe_name, namelen);
399 /* Unlink the 'old'. */
400 list_del_init(&old->oxe_list);
402 /* Drop the ref for 'old' on list. */
403 osp_oac_xattr_put(old);
405 /* Drop the ref for current using. */
406 osp_oac_xattr_put(oxe);
409 /* Insert 'new' into list. */
410 list_add_tail(&new->oxe_list, &obj->opo_xattr_list);
411 } else if (unlink_only) {
412 /* Unlink the 'old'. */
413 list_del_init(&old->oxe_list);
415 /* Drop the ref for 'old' on list. */
416 osp_oac_xattr_put(old);
418 __osp_oac_xattr_assignment(obj, oxe, buf);
421 /* Drop the ref for current using. */
422 osp_oac_xattr_put(oxe);
425 /* Someone unlinked the 'old' by race,
426 * insert the 'new' one into list. */
427 list_add_tail(&new->oxe_list, &obj->opo_xattr_list);
429 spin_unlock(&obj->opo_lock);
435 * Parse the OSP object attribute from the RPC reply.
437 * If the attribute is valid, then it will be added to the OSP object
440 * \param[in] env pointer to the thread context
441 * \param[in] reply pointer to the RPC reply
442 * \param[in] req pointer to the RPC request
443 * \param[out] attr pointer to buffer to hold the output attribute
444 * \param[in] obj pointer to the OSP object
445 * \param[in] index the index of the attribute buffer in the reply
447 * \retval 0 for success
448 * \retval negative error number on failure
450 static int osp_get_attr_from_reply(const struct lu_env *env,
451 struct object_update_reply *reply,
452 struct ptlrpc_request *req,
453 struct lu_attr *attr,
454 struct osp_object *obj, int index)
456 struct osp_thread_info *osi = osp_env_info(env);
457 struct lu_buf *rbuf = &osi->osi_lb2;
458 struct obdo *lobdo = &osi->osi_obdo;
462 rc = object_update_result_data_get(reply, rbuf, index);
466 wobdo = rbuf->lb_buf;
467 if (rbuf->lb_len != sizeof(*wobdo))
470 LASSERT(req != NULL);
471 if (req_capsule_req_need_swab(&req->rq_pill))
472 lustre_swab_obdo(wobdo);
474 lustre_get_wire_obdo(NULL, lobdo, wobdo);
476 spin_lock(&obj->opo_lock);
477 la_from_obdo(&obj->opo_attr, lobdo, lobdo->o_valid);
478 spin_unlock(&obj->opo_lock);
481 la_from_obdo(attr, lobdo, lobdo->o_valid);
487 * Interpreter function for getting OSP object attribute asynchronously.
489 * Called to interpret the result of an async mode RPC for getting the
490 * OSP object attribute.
492 * \param[in] env pointer to the thread context
493 * \param[in] reply pointer to the RPC reply
494 * \param[in] req pointer to the RPC request
495 * \param[in] obj pointer to the OSP object
496 * \param[out] data pointer to buffer to hold the output attribute
497 * \param[in] index the index of the attribute buffer in the reply
498 * \param[in] rc the result for handling the RPC
500 * \retval 0 for success
501 * \retval negative error number on failure
503 static int osp_attr_get_interpterer(const struct lu_env *env,
504 struct object_update_reply *reply,
505 struct ptlrpc_request *req,
506 struct osp_object *obj,
507 void *data, int index, int rc)
509 struct lu_attr *attr = data;
512 osp2lu_obj(obj)->lo_header->loh_attr |= LOHA_EXISTS;
513 obj->opo_non_exist = 0;
515 return osp_get_attr_from_reply(env, reply, req, NULL, obj,
519 osp2lu_obj(obj)->lo_header->loh_attr &= ~LOHA_EXISTS;
520 obj->opo_non_exist = 1;
523 spin_lock(&obj->opo_lock);
525 spin_unlock(&obj->opo_lock);
532 * Implement OSP layer dt_object_operations::do_declare_attr_get() interface.
534 * Declare that the caller will get attribute from the specified OST object.
536 * This function adds an Object Unified Target (OUT) sub-request to the per-OSP
537 * based shared asynchronous request queue. The osp_attr_get_interpterer()
538 * is registered as the interpreter function to handle the result of this
541 * \param[in] env pointer to the thread context
542 * \param[in] dt pointer to the OSP layer dt_object
544 * \retval 0 for success
545 * \retval negative error number on failure
547 static int osp_declare_attr_get(const struct lu_env *env, struct dt_object *dt)
549 struct osp_object *obj = dt2osp_obj(dt);
550 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
553 mutex_lock(&osp->opd_async_requests_mutex);
554 rc = osp_insert_async_request(env, OUT_ATTR_GET, obj, 0, NULL, NULL,
555 &obj->opo_attr, sizeof(struct obdo),
556 osp_attr_get_interpterer);
557 mutex_unlock(&osp->opd_async_requests_mutex);
563 * Implement OSP layer dt_object_operations::do_attr_get() interface.
565 * Get attribute from the specified MDT/OST object.
567 * If the attribute is in the OSP object attributes cache, then return
568 * the cached attribute directly. Otherwise it will trigger an OUT RPC
569 * to the peer to get the attribute synchronously, if successful, add it
570 * to the OSP attributes cache. (\see lustre/osp/osp_trans.c for OUT RPC.)
572 * \param[in] env pointer to the thread context
573 * \param[in] dt pointer to the OSP layer dt_object
574 * \param[out] attr pointer to the buffer to hold the output attribute
576 * \retval 0 for success
577 * \retval negative error number on failure
579 int osp_attr_get(const struct lu_env *env, struct dt_object *dt,
580 struct lu_attr *attr)
582 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
583 struct osp_object *obj = dt2osp_obj(dt);
584 struct dt_device *dev = &osp->opd_dt_dev;
585 struct osp_update_request *update;
586 struct object_update_reply *reply;
587 struct ptlrpc_request *req = NULL;
588 int invalidated, cache = 0, rc = 0;
591 if (is_ost_obj(&dt->do_lu) && obj->opo_non_exist)
593 if (obj->opo_destroyed)
596 spin_lock(&obj->opo_lock);
597 if (obj->opo_attr.la_valid != 0 && !obj->opo_stale) {
598 *attr = obj->opo_attr;
599 spin_unlock(&obj->opo_lock);
603 spin_unlock(&obj->opo_lock);
605 update = osp_update_request_create(dev);
607 RETURN(PTR_ERR(update));
609 rc = OSP_UPDATE_RPC_PACK(env, out_attr_get_pack, update,
610 lu_object_fid(&dt->do_lu));
612 CERROR("%s: Insert update error "DFID": rc = %d\n",
613 dev->dd_lu_dev.ld_obd->obd_name,
614 PFID(lu_object_fid(&dt->do_lu)), rc);
619 invalidated = atomic_read(&obj->opo_invalidate_seq);
621 rc = osp_remote_sync(env, osp, update, &req);
623 down_read(&obj->opo_invalidate_sem);
624 if (invalidated == atomic_read(&obj->opo_invalidate_seq)) {
625 /* no invalited has came so far, we can cache the attrs */
631 osp2lu_obj(obj)->lo_header->loh_attr &= ~LOHA_EXISTS;
633 obj->opo_non_exist = 1;
635 CERROR("%s: osp_attr_get update error "DFID": rc = %d\n",
636 dev->dd_lu_dev.ld_obd->obd_name,
637 PFID(lu_object_fid(&dt->do_lu)), rc);
643 osp2lu_obj(obj)->lo_header->loh_attr |= LOHA_EXISTS;
644 obj->opo_non_exist = 0;
645 reply = req_capsule_server_sized_get(&req->rq_pill,
646 &RMF_OUT_UPDATE_REPLY,
647 OUT_UPDATE_REPLY_SIZE);
648 if (reply == NULL || reply->ourp_magic != UPDATE_REPLY_MAGIC)
649 GOTO(out, rc = -EPROTO);
651 rc = osp_get_attr_from_reply(env, reply, req, attr,
652 cache ? obj : NULL, 0);
656 spin_lock(&obj->opo_lock);
659 spin_unlock(&obj->opo_lock);
664 up_read(&obj->opo_invalidate_sem);
668 ptlrpc_req_finished(req);
670 osp_update_request_destroy(env, update);
676 * Implement OSP layer dt_object_operations::do_declare_attr_set() interface.
678 * If the transaction is not remote one, then declare the credits that will
679 * be used for the subsequent llog record for the object's attributes.
681 * \param[in] env pointer to the thread context
682 * \param[in] dt pointer to the OSP layer dt_object
683 * \param[in] attr pointer to the attribute to be set
684 * \param[in] th pointer to the transaction handler
686 * \retval 0 for success
687 * \retval negative error number on failure
689 static int osp_declare_attr_set(const struct lu_env *env, struct dt_object *dt,
690 const struct lu_attr *attr, struct thandle *th)
692 struct osp_device *d = lu2osp_dev(dt->do_lu.lo_dev);
693 struct osp_object *o = dt2osp_obj(dt);
696 if (is_only_remote_trans(th))
697 return osp_md_declare_attr_set(env, dt, attr, th);
699 * Usually we don't allow server stack to manipulate size
700 * but there is a special case when striping is created
701 * late, after stripeless file got truncated to non-zero.
703 * In this case we do the following:
705 * 1) grab id in declare - this can lead to leaked OST objects
706 * but we don't currently have proper mechanism and the only
707 * options we have are to do truncate RPC holding transaction
708 * open (very bad) or to grab id in declare at cost of leaked
709 * OST object in same very rare unfortunate case (just bad)
710 * notice 1.6-2.0 do assignment outside of running transaction
711 * all the time, meaning many more chances for leaked objects.
713 * 2) send synchronous truncate RPC with just assigned id
716 /* there are few places in MDD code still passing NULL
717 * XXX: to be fixed soon */
721 if (attr->la_valid & LA_SIZE && attr->la_size > 0 &&
722 fid_is_zero(lu_object_fid(&o->opo_obj.do_lu))) {
723 LASSERT(!dt_object_exists(dt));
724 osp_object_assign_fid(env, d, o);
725 rc = osp_object_truncate(env, dt, attr->la_size);
730 if (!(attr->la_valid & LA_REMOTE_ATTR_SET))
733 /* track all UID/GID, projid, and layout version changes via llog */
734 rc = osp_sync_declare_add(env, o, MDS_SETATTR64_REC, th);
740 * Implement OSP layer dt_object_operations::do_attr_set() interface.
742 * Set attribute to the specified OST object.
744 * If the transaction is a remote one, then add OUT_ATTR_SET sub-request
745 * in the OUT RPC that will be flushed when the remote transaction stop.
746 * Otherwise, it will generate a MDS_SETATTR64_REC record in the llog that
747 * will be handled by a dedicated thread asynchronously.
749 * If the attribute entry exists in the OSP object attributes cache,
750 * then update the cached attribute according to given attribute.
752 * \param[in] env pointer to the thread context
753 * \param[in] dt pointer to the OSP layer dt_object
754 * \param[in] attr pointer to the attribute to be set
755 * \param[in] th pointer to the transaction handler
757 * \retval 0 for success
758 * \retval negative error number on failure
760 static int osp_attr_set(const struct lu_env *env, struct dt_object *dt,
761 const struct lu_attr *attr, struct thandle *th)
763 struct osp_object *o = dt2osp_obj(dt);
767 /* we're interested in uid/gid/projid/layout version changes,
768 * and also specific setting of enc flag
770 if (!(attr->la_valid & LA_REMOTE_ATTR_SET) &&
771 !(attr->la_valid == LA_FLAGS &&
772 attr->la_flags == LUSTRE_ENCRYPT_FL))
775 if (!is_only_remote_trans(th)) {
776 if (attr->la_flags & LUSTRE_SET_SYNC_FL) {
777 struct ptlrpc_request *req = NULL;
778 struct osp_update_request *update = NULL;
779 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
781 update = osp_update_request_create(&osp->opd_dt_dev);
783 RETURN(PTR_ERR(update));
785 rc = OSP_UPDATE_RPC_PACK(env, out_attr_set_pack, update,
786 lu_object_fid(&dt->do_lu),
789 CERROR("%s: update error "DFID": rc = %d\n",
790 osp->opd_obd->obd_name,
791 PFID(lu_object_fid(&dt->do_lu)), rc);
793 osp_update_request_destroy(env, update);
797 rc = osp_remote_sync(env, osp, update, &req);
799 ptlrpc_req_finished(req);
801 osp_update_request_destroy(env, update);
803 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
805 rc = osp_sync_add(env, o, MDS_SETATTR64_REC, th, attr);
806 /* send layout version to OST ASAP */
807 if (attr->la_valid & LA_LAYOUT_VERSION)
808 wake_up(&osp->opd_sync_waitq);
809 /* XXX: send new uid/gid to OST ASAP? */
814 /* It is for OST-object attr_set directly without updating
815 * local MDT-object attribute. It is usually used by LFSCK. */
816 rc = osp_md_attr_set(env, dt, attr, th);
817 CDEBUG(D_INFO, "(1) set attr "DFID": rc = %d\n",
818 PFID(&dt->do_lu.lo_header->loh_fid), rc);
823 /* Update the OSP object attributes cache. */
825 spin_lock(&o->opo_lock);
826 if (attr->la_valid & LA_UID) {
827 la->la_uid = attr->la_uid;
828 la->la_valid |= LA_UID;
831 if (attr->la_valid & LA_GID) {
832 la->la_gid = attr->la_gid;
833 la->la_valid |= LA_GID;
835 if (attr->la_valid & LA_PROJID) {
836 la->la_projid = attr->la_projid;
837 la->la_valid |= LA_PROJID;
839 spin_unlock(&o->opo_lock);
846 * Interpreter function for getting OSP object extended attribute asynchronously
848 * Called to interpret the result of an async mode RPC for getting the
849 * OSP object extended attribute.
851 * \param[in] env pointer to the thread context
852 * \param[in] reply pointer to the RPC reply
853 * \param[in] req pointer to the RPC request
854 * \param[in] obj pointer to the OSP object
855 * \param[out] data pointer to OSP object attributes cache
856 * \param[in] index the index of the attribute buffer in the reply
857 * \param[in] rc the result for handling the RPC
859 * \retval 0 for success
860 * \retval negative error number on failure
862 static int osp_xattr_get_interpterer(const struct lu_env *env,
863 struct object_update_reply *reply,
864 struct ptlrpc_request *req,
865 struct osp_object *obj,
866 void *data, int index, int rc)
868 struct osp_xattr_entry *oxe = data;
870 spin_lock(&obj->opo_lock);
872 struct lu_buf *rbuf = &osp_env_info(env)->osi_lb2;
874 rc = object_update_result_data_get(reply, rbuf, index);
875 if (rc == -ENOENT || rc == -ENODATA || rc == 0) {
881 if (unlikely(rc < 0) || !oxe_can_hold(oxe, rbuf->lb_len)) {
886 __osp_oac_xattr_assignment(obj, oxe, rbuf);
887 } else if (rc == -ENOENT || rc == -ENODATA) {
895 spin_unlock(&obj->opo_lock);
897 /* Put the reference obtained in the osp_declare_xattr_get(). */
898 osp_oac_xattr_put(oxe);
904 * Implement OSP dt_object_operations::do_declare_xattr_get() interface.
906 * Declare that the caller will get extended attribute from the specified
909 * This function will add an OUT_XATTR_GET sub-request to the per OSP
910 * based shared asynchronous request queue with the interpreter function:
911 * osp_xattr_get_interpterer().
913 * \param[in] env pointer to the thread context
914 * \param[in] dt pointer to the OSP layer dt_object
915 * \param[out] buf pointer to the lu_buf to hold the extended attribute
916 * \param[in] name the name for the expected extended attribute
918 * \retval 0 for success
919 * \retval negative error number on failure
921 static int osp_declare_xattr_get(const struct lu_env *env, struct dt_object *dt,
922 struct lu_buf *buf, const char *name)
924 struct osp_object *obj = dt2osp_obj(dt);
925 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
926 struct osp_xattr_entry *oxe;
930 LASSERT(buf != NULL);
931 LASSERT(name != NULL);
933 if (unlikely(buf->lb_len == 0))
936 oxe = osp_oac_xattr_find_or_add(obj, name, buf->lb_len);
940 len = strlen(name) + 1;
941 mutex_lock(&osp->opd_async_requests_mutex);
942 rc = osp_insert_async_request(env, OUT_XATTR_GET, obj, 1,
943 &len, (const void **)&name,
945 osp_xattr_get_interpterer);
947 mutex_unlock(&osp->opd_async_requests_mutex);
948 osp_oac_xattr_put(oxe);
950 struct osp_update_request *our;
951 struct osp_update_request_sub *ours;
953 /* XXX: Currently, we trigger the batched async OUT
954 * RPC via dt_declare_xattr_get(). It is not
955 * perfect solution, but works well now.
957 * We will improve it in the future. */
958 our = osp->opd_async_requests;
959 ours = osp_current_object_update_request(our);
960 if (ours != NULL && ours->ours_req != NULL &&
961 ours->ours_req->ourq_count > 0) {
962 osp->opd_async_requests = NULL;
963 mutex_unlock(&osp->opd_async_requests_mutex);
964 rc = osp_unplug_async_request(env, osp, our);
966 mutex_unlock(&osp->opd_async_requests_mutex);
974 * Implement OSP layer dt_object_operations::do_xattr_get() interface.
976 * Get extended attribute from the specified MDT/OST object.
978 * If the extended attribute is in the OSP object attributes cache, then
979 * return the cached extended attribute directly. Otherwise it will get
980 * the extended attribute synchronously, if successful, add it to the OSP
981 * attributes cache. (\see lustre/osp/osp_trans.c for OUT RPC.)
983 * There is a race condition: some other thread has added the named extended
984 * attributed entry to the OSP object attributes cache during the current
985 * OUT_XATTR_GET handling. If such case happens, the OSP will replace the
986 * (just) existing extended attribute entry with the new replied one.
988 * \param[in] env pointer to the thread context
989 * \param[in] dt pointer to the OSP layer dt_object
990 * \param[out] buf pointer to the lu_buf to hold the extended attribute
991 * \param[in] name the name for the expected extended attribute
993 * \retval 0 for success
994 * \retval negative error number on failure
996 int osp_xattr_get(const struct lu_env *env, struct dt_object *dt,
997 struct lu_buf *buf, const char *name)
999 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
1000 struct osp_object *obj = dt2osp_obj(dt);
1001 struct dt_device *dev = &osp->opd_dt_dev;
1002 struct lu_buf *rbuf = &osp_env_info(env)->osi_lb2;
1003 struct osp_update_request *update = NULL;
1004 struct ptlrpc_request *req = NULL;
1005 struct object_update_reply *reply;
1006 struct osp_xattr_entry *oxe = NULL;
1007 const char *dname = osp_dto2name(obj);
1008 int invalidated, rc = 0;
1011 LASSERT(buf != NULL);
1012 LASSERT(name != NULL);
1014 if (CFS_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_NETWORK) &&
1015 osp->opd_index == cfs_fail_val) {
1016 if (is_ost_obj(&dt->do_lu)) {
1017 if (osp_dev2node(osp) == cfs_fail_val)
1020 if (strcmp(name, XATTR_NAME_LINK) == 0)
1025 if (unlikely(obj->opo_non_exist))
1028 invalidated = atomic_read(&obj->opo_invalidate_seq);
1030 oxe = osp_oac_xattr_find(obj, name, false);
1032 spin_lock(&obj->opo_lock);
1033 if (oxe->oxe_ready) {
1034 if (!oxe->oxe_exist)
1035 GOTO(unlock, rc = -ENODATA);
1037 if (buf->lb_buf == NULL)
1038 GOTO(unlock, rc = oxe->oxe_vallen);
1040 if (buf->lb_len < oxe->oxe_vallen)
1041 GOTO(unlock, rc = -ERANGE);
1043 memcpy(buf->lb_buf, oxe->oxe_value,
1046 GOTO(unlock, rc = oxe->oxe_vallen);
1049 spin_unlock(&obj->opo_lock);
1050 osp_oac_xattr_put(oxe);
1054 spin_unlock(&obj->opo_lock);
1056 update = osp_update_request_create(dev);
1058 GOTO(out_req, rc = PTR_ERR(update));
1060 rc = OSP_UPDATE_RPC_PACK(env, out_xattr_get_pack, update,
1061 lu_object_fid(&dt->do_lu), name, buf->lb_len);
1063 CERROR("%s: Insert update error "DFID": rc = %d\n",
1064 dname, PFID(lu_object_fid(&dt->do_lu)), rc);
1068 rc = osp_remote_sync(env, osp, update, &req);
1070 down_read(&obj->opo_invalidate_sem);
1071 if (invalidated != atomic_read(&obj->opo_invalidate_seq)) {
1072 /* invalidated has been requested, we can't cache the result */
1075 dt->do_lu.lo_header->loh_attr &= ~LOHA_EXISTS;
1078 reply = req_capsule_server_sized_get(&req->rq_pill,
1079 &RMF_OUT_UPDATE_REPLY,
1080 OUT_UPDATE_REPLY_SIZE);
1081 if (reply->ourp_magic != UPDATE_REPLY_MAGIC) {
1082 CERROR("%s: Wrong version %x expected %x "DFID
1083 ": rc = %d\n", dname, reply->ourp_magic,
1085 PFID(lu_object_fid(&dt->do_lu)), -EPROTO);
1086 GOTO(out, rc = -EPROTO);
1088 rc = object_update_result_data_get(reply, rbuf, 0);
1093 if (rc == -ENOENT) {
1094 dt->do_lu.lo_header->loh_attr &= ~LOHA_EXISTS;
1095 obj->opo_non_exist = 1;
1099 oxe = osp_oac_xattr_find_or_add(obj, name, buf->lb_len);
1102 CWARN("%s: Fail to add xattr (%s) to cache for "
1103 DFID" (1): rc = %d\n", dname, name,
1104 PFID(lu_object_fid(&dt->do_lu)), rc);
1109 spin_lock(&obj->opo_lock);
1110 if (rc == -ENOENT || rc == -ENODATA) {
1116 spin_unlock(&obj->opo_lock);
1121 reply = req_capsule_server_sized_get(&req->rq_pill,
1122 &RMF_OUT_UPDATE_REPLY,
1123 OUT_UPDATE_REPLY_SIZE);
1124 if (reply->ourp_magic != UPDATE_REPLY_MAGIC) {
1125 CERROR("%s: Wrong version %x expected %x "DFID": rc = %d\n",
1126 dname, reply->ourp_magic, UPDATE_REPLY_MAGIC,
1127 PFID(lu_object_fid(&dt->do_lu)), -EPROTO);
1129 GOTO(out, rc = -EPROTO);
1132 rc = object_update_result_data_get(reply, rbuf, 0);
1133 if (rc < 0 || rbuf->lb_len == 0) {
1134 if (oxe == NULL && rc == -ENODATA) {
1135 oxe = osp_oac_xattr_find_or_add(obj, name, buf->lb_len);
1138 CWARN("%s: Fail to add xattr (%s) to cache for "
1139 DFID" (1): rc = %d\n", dname, name,
1140 PFID(lu_object_fid(&dt->do_lu)), rc);
1146 spin_lock(&obj->opo_lock);
1147 if (unlikely(rc == -ENODATA)) {
1153 spin_unlock(&obj->opo_lock);
1159 /* For detecting EA size. */
1164 oxe = osp_oac_xattr_find_or_add(obj, name, rbuf->lb_len);
1166 CWARN("%s: Fail to add xattr (%s) to "
1167 "cache for "DFID" (2): rc = %d\n",
1168 dname, name, PFID(lu_object_fid(&dt->do_lu)), rc);
1174 oxe = osp_oac_xattr_assignment(obj, oxe, rbuf);
1179 up_read(&obj->opo_invalidate_sem);
1182 if (rc > 0 && buf->lb_buf) {
1183 if (unlikely(buf->lb_len < rbuf->lb_len))
1186 memcpy(buf->lb_buf, rbuf->lb_buf, rbuf->lb_len);
1190 ptlrpc_req_finished(req);
1192 if (update && !IS_ERR(update))
1193 osp_update_request_destroy(env, update);
1196 osp_oac_xattr_put(oxe);
1202 * Implement OSP layer dt_object_operations::do_declare_xattr_set() interface.
1204 * Declare that the caller will set extended attribute to the specified
1207 * If it is non-remote transaction, it will add an OUT_XATTR_SET sub-request
1208 * to the OUT RPC that will be flushed when the transaction start. And if the
1209 * OSP attributes cache is initialized, then check whether the name extended
1210 * attribute entry exists in the cache or not. If yes, replace it; otherwise,
1211 * add the extended attribute to the cache.
1213 * \param[in] env pointer to the thread context
1214 * \param[in] dt pointer to the OSP layer dt_object
1215 * \param[in] buf pointer to the lu_buf to hold the extended attribute
1216 * \param[in] name the name of the extended attribute to be set
1217 * \param[in] flag to indicate the detailed set operation: LU_XATTR_CREATE
1218 * or LU_XATTR_REPLACE or others
1219 * \param[in] th pointer to the transaction handler
1221 * \retval 0 for success
1222 * \retval negative error number on failure
1224 int osp_declare_xattr_set(const struct lu_env *env, struct dt_object *dt,
1225 const struct lu_buf *buf, const char *name,
1226 int flag, struct thandle *th)
1228 return osp_trans_update_request_create(th);
1232 * Implement OSP layer dt_object_operations::do_xattr_set() interface.
1234 * Set extended attribute to the specified MDT/OST object.
1236 * Add an OUT_XATTR_SET sub-request into the OUT RPC that will be flushed in
1237 * the transaction stop. And if the OSP attributes cache is initialized, then
1238 * check whether the name extended attribute entry exists in the cache or not.
1239 * If yes, replace it; otherwise, add the extended attribute to the cache.
1241 * \param[in] env pointer to the thread context
1242 * \param[in] dt pointer to the OSP layer dt_object
1243 * \param[in] buf pointer to the lu_buf to hold the extended attribute
1244 * \param[in] name the name of the extended attribute to be set
1245 * \param[in] fl to indicate the detailed set operation: LU_XATTR_CREATE
1246 * or LU_XATTR_REPLACE or others
1247 * \param[in] th pointer to the transaction handler
1249 * \retval 0 for success
1250 * \retval negative error number on failure
1252 int osp_xattr_set(const struct lu_env *env, struct dt_object *dt,
1253 const struct lu_buf *buf, const char *name, int fl,
1256 struct osp_object *o = dt2osp_obj(dt);
1257 struct osp_update_request *update;
1258 struct osp_xattr_entry *oxe;
1262 update = thandle_to_osp_update_request(th);
1263 LASSERT(update != NULL);
1265 CDEBUG(D_INODE, DFID" set xattr '%s' with size %zd\n",
1266 PFID(lu_object_fid(&dt->do_lu)), name, buf->lb_len);
1268 rc = OSP_UPDATE_RPC_PACK(env, out_xattr_set_pack, update,
1269 lu_object_fid(&dt->do_lu), buf, name, fl);
1273 /* Do not cache linkEA that may be self-adjusted by peers
1274 * under EA overflow case. */
1275 if (strcmp(name, XATTR_NAME_LINK) == 0) {
1276 oxe = osp_oac_xattr_find(o, name, true);
1278 osp_oac_xattr_put(oxe);
1283 oxe = osp_oac_xattr_find_or_add(o, name, buf->lb_len);
1285 CWARN("%s: cannot cache xattr '%s' of "DFID"\n",
1286 osp_dto2name(o), name, PFID(lu_object_fid(&dt->do_lu)));
1291 oxe = osp_oac_xattr_assignment(o, oxe, buf);
1293 osp_oac_xattr_put(oxe);
1299 * Implement OSP layer dt_object_operations::do_declare_xattr_del() interface.
1301 * Declare that the caller will delete extended attribute on the specified
1304 * If it is non-remote transaction, it will add an OUT_XATTR_DEL sub-request
1305 * to the OUT RPC that will be flushed when the transaction start. And if the
1306 * name extended attribute entry exists in the OSP attributes cache, then remove
1307 * it from the cache.
1309 * \param[in] env pointer to the thread context
1310 * \param[in] dt pointer to the OSP layer dt_object
1311 * \param[in] name the name of the extended attribute to be set
1312 * \param[in] th pointer to the transaction handler
1314 * \retval 0 for success
1315 * \retval negative error number on failure
1317 int osp_declare_xattr_del(const struct lu_env *env, struct dt_object *dt,
1318 const char *name, struct thandle *th)
1320 return osp_trans_update_request_create(th);
1324 * Implement OSP layer dt_object_operations::do_xattr_del() interface.
1326 * Delete extended attribute on the specified MDT/OST object.
1328 * If it is remote transaction, it will add an OUT_XATTR_DEL sub-request into
1329 * the OUT RPC that will be flushed when the transaction stop. And if the name
1330 * extended attribute entry exists in the OSP attributes cache, then remove it
1333 * \param[in] env pointer to the thread context
1334 * \param[in] dt pointer to the OSP layer dt_object
1335 * \param[in] name the name of the extended attribute to be set
1336 * \param[in] th pointer to the transaction handler
1338 * \retval 0 for success
1339 * \retval negative error number on failure
1341 int osp_xattr_del(const struct lu_env *env, struct dt_object *dt,
1342 const char *name, struct thandle *th)
1344 struct osp_update_request *update;
1345 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1346 struct osp_object *o = dt2osp_obj(dt);
1347 struct osp_xattr_entry *oxe;
1350 update = thandle_to_osp_update_request(th);
1351 LASSERT(update != NULL);
1353 rc = OSP_UPDATE_RPC_PACK(env, out_xattr_del_pack, update, fid, name);
1357 oxe = osp_oac_xattr_find(o, name, true);
1359 /* Drop the ref for entry on list. */
1360 osp_oac_xattr_put(oxe);
1365 void osp_obj_invalidate_cache(struct osp_object *obj)
1367 struct osp_xattr_entry *oxe;
1368 struct osp_xattr_entry *tmp;
1370 spin_lock(&obj->opo_lock);
1371 list_for_each_entry_safe(oxe, tmp, &obj->opo_xattr_list, oxe_list) {
1373 list_del_init(&oxe->oxe_list);
1374 osp_oac_xattr_put(oxe);
1376 obj->opo_attr.la_valid = 0;
1377 spin_unlock(&obj->opo_lock);
1381 * Implement OSP layer dt_object_operations::do_invalidate() interface.
1383 * Invalidate attributes cached on the specified MDT/OST object.
1385 * \param[in] env pointer to the thread context
1386 * \param[in] dt pointer to the OSP layer dt_object
1388 * \retval 0 for success
1389 * \retval negative error number on failure
1391 int osp_invalidate(const struct lu_env *env, struct dt_object *dt)
1393 struct osp_object *obj = dt2osp_obj(dt);
1396 CDEBUG(D_HA, "Invalidate osp_object "DFID"\n",
1397 PFID(lu_object_fid(&dt->do_lu)));
1399 /* serialize attr/EA set vs. invalidation */
1400 down_write(&obj->opo_invalidate_sem);
1402 /* this should invalidate all in-flights */
1403 atomic_inc(&obj->opo_invalidate_seq);
1405 spin_lock(&obj->opo_lock);
1406 /* do not mark new objects stale */
1407 if (obj->opo_attr.la_valid)
1409 obj->opo_non_exist = 0;
1410 spin_unlock(&obj->opo_lock);
1412 osp_obj_invalidate_cache(obj);
1414 up_write(&obj->opo_invalidate_sem);
1419 bool osp_check_stale(struct dt_object *dt)
1421 struct osp_object *obj = dt2osp_obj(dt);
1423 if (is_ost_obj(&dt->do_lu) && obj->opo_non_exist)
1426 return obj->opo_stale;
1431 * Implement OSP layer dt_object_operations::do_declare_create() interface.
1433 * Declare that the caller will create the OST object.
1435 * If the transaction is a remote transaction and the FID for the OST-object
1436 * has been assigned already, then handle it as creating (remote) MDT object
1437 * via osp_md_declare_create(). This function is usually used for LFSCK
1438 * to re-create the lost OST object. Otherwise, if it is not replay case, the
1439 * OSP will reserve pre-created object for the subsequent create operation;
1440 * if the MDT side cached pre-created objects are less than some threshold,
1441 * then it will wakeup the pre-create thread.
1443 * \param[in] env pointer to the thread context
1444 * \param[in] dt pointer to the OSP layer dt_object
1445 * \param[in] attr the attribute for the object to be created
1446 * \param[in] hint pointer to the hint for creating the object, such as
1448 * \param[in] dof pointer to the dt_object_format for help the creation
1449 * \param[in] th pointer to the transaction handler
1451 * \retval 0 for success
1452 * \retval negative error number on failure
1454 static int osp_declare_create(const struct lu_env *env, struct dt_object *dt,
1455 struct lu_attr *attr,
1456 struct dt_allocation_hint *hint,
1457 struct dt_object_format *dof, struct thandle *th)
1459 struct osp_thread_info *osi = osp_env_info(env);
1460 struct osp_device *d = lu2osp_dev(dt->do_lu.lo_dev);
1461 struct osp_object *o = dt2osp_obj(dt);
1462 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1463 struct thandle *local_th;
1468 if (is_only_remote_trans(th) && !fid_is_zero(fid)) {
1469 LASSERT(fid_is_sane(fid));
1471 rc = osp_md_declare_create(env, dt, attr, hint, dof, th);
1476 /* should happen to non-0 OSP only so that at least one object
1477 * has been already declared in the scenario and LOD should
1479 if (CFS_FAIL_CHECK(OBD_FAIL_MDS_OSC_CREATE_FAIL) && d->opd_index == 1)
1482 LASSERT(d->opd_last_used_oid_file);
1485 * There can be gaps in precreated ids and record to unlink llog
1486 * XXX: we do not handle gaps yet, implemented before solution
1487 * was found to be racy, so we disabled that. there is no
1488 * point in making useless but expensive llog declaration.
1490 /* rc = osp_sync_declare_add(env, o, MDS_UNLINK64_REC, th); */
1492 local_th = osp_get_storage_thandle(env, th, d);
1493 if (IS_ERR(local_th))
1494 RETURN(PTR_ERR(local_th));
1496 if (unlikely(!fid_is_zero(fid))) {
1497 /* replay case: caller knows fid */
1498 osp_objid_buf_prep(&osi->osi_lb, &osi->osi_off, NULL,
1500 rc = dt_declare_record_write(env, d->opd_last_used_oid_file,
1501 &osi->osi_lb, osi->osi_off,
1507 * in declaration we need to reserve object so that we don't block
1508 * awaiting precreation RPC to complete
1510 rc = osp_precreate_reserve(env, d, !hint || hint->dah_can_block);
1512 * we also need to declare update to local "last used id" file for
1513 * recovery if object isn't used for a reason, we need to release
1514 * reservation, this can be made in osd_object_release()
1517 /* mark id is reserved: in create we don't want to talk
1519 LASSERT(o->opo_reserved == 0);
1520 o->opo_reserved = 1;
1522 /* common for all OSPs file hystorically */
1523 osp_objid_buf_prep(&osi->osi_lb, &osi->osi_off, NULL,
1525 rc = dt_declare_record_write(env, d->opd_last_used_oid_file,
1526 &osi->osi_lb, osi->osi_off,
1529 /* not needed in the cache anymore */
1530 set_bit(LU_OBJECT_HEARD_BANSHEE,
1531 &dt->do_lu.lo_header->loh_flags);
1537 * Implement OSP layer dt_object_operations::do_create() interface.
1539 * Create the OST object.
1541 * If the transaction is a remote transaction and the FID for the OST-object
1542 * has been assigned already, then handle it as handling MDT object via the
1543 * osp_md_create(). For other cases, the OSP will assign FID to the
1544 * object to be created, and update last_used Object ID (OID) file.
1546 * \param[in] env pointer to the thread context
1547 * \param[in] dt pointer to the OSP layer dt_object
1548 * \param[in] attr the attribute for the object to be created
1549 * \param[in] hint pointer to the hint for creating the object, such as
1551 * \param[in] dof pointer to the dt_object_format for help the creation
1552 * \param[in] th pointer to the transaction handler
1554 * \retval 0 for success
1555 * \retval negative error number on failure
1557 static int osp_create(const struct lu_env *env, struct dt_object *dt,
1558 struct lu_attr *attr, struct dt_allocation_hint *hint,
1559 struct dt_object_format *dof, struct thandle *th)
1561 struct osp_thread_info *osi = osp_env_info(env);
1562 struct osp_device *d = lu2osp_dev(dt->do_lu.lo_dev);
1563 struct osp_object *o = dt2osp_obj(dt);
1565 struct lu_fid *fid = &osi->osi_fid;
1566 struct thandle *local_th;
1569 if (is_only_remote_trans(th) &&
1570 !fid_is_zero(lu_object_fid(&dt->do_lu))) {
1571 LASSERT(fid_is_sane(lu_object_fid(&dt->do_lu)));
1573 rc = osp_md_create(env, dt, attr, hint, dof, th);
1575 o->opo_non_exist = 0;
1580 o->opo_non_exist = 0;
1581 if (o->opo_reserved) {
1582 /* regular case, fid is assigned holding transaction open */
1583 osp_object_assign_fid(env, d, o);
1586 memcpy(fid, lu_object_fid(&dt->do_lu), sizeof(*fid));
1588 LASSERTF(fid_is_sane(fid), "fid for osp_object %px is insane"DFID"!\n",
1591 if (!o->opo_reserved) {
1592 /* special case, id was assigned outside of transaction
1593 * see comments in osp_declare_attr_set */
1594 LASSERT(d->opd_pre != NULL);
1595 spin_lock(&d->opd_pre_lock);
1596 osp_update_last_fid(d, fid);
1597 spin_unlock(&d->opd_pre_lock);
1600 CDEBUG(D_INODE, "fid for osp_object %p is "DFID"\n", o, PFID(fid));
1602 /* If the precreate ends, it means it will be ready to rollover to
1603 * the new sequence soon, all the creation should be synchronized,
1604 * otherwise during replay, the replay fid will be inconsistent with
1605 * last_used/create fid */
1606 if (osp_precreate_end_seq(d) && osp_is_fid_client(d))
1609 local_th = osp_get_storage_thandle(env, th, d);
1610 if (IS_ERR(local_th))
1611 RETURN(PTR_ERR(local_th));
1613 * it's OK if the import is inactive by this moment - id was created
1614 * by OST earlier, we just need to maintain it consistently on the disk
1615 * once import is reconnected, OSP will claim this and other objects
1616 * used and OST either keep them, if they exist or recreate
1619 /* we might have lost precreated objects */
1620 if (unlikely(d->opd_gap_count) > 0) {
1621 LASSERT(d->opd_pre != NULL);
1622 spin_lock(&d->opd_pre_lock);
1623 if (d->opd_gap_count > 0) {
1624 int count = d->opd_gap_count;
1626 rc = ostid_set_id(&osi->osi_oi,
1627 fid_oid(&d->opd_gap_start_fid));
1629 spin_unlock(&d->opd_pre_lock);
1632 d->opd_gap_count = 0;
1633 spin_unlock(&d->opd_pre_lock);
1635 CDEBUG(D_HA, "Writing gap "DFID"+%d in llog\n",
1636 PFID(&d->opd_gap_start_fid), count);
1637 /* real gap handling is disabled intil ORI-692 will be
1638 * fixed, now we only report gaps */
1640 spin_unlock(&d->opd_pre_lock);
1644 /* Only need update last_used oid file, seq file will only be update
1645 * during seq rollover */
1646 osp_objid_buf_prep(&osi->osi_lb, &osi->osi_off,
1647 &d->opd_last_id, d->opd_index);
1649 rc = dt_record_write(env, d->opd_last_used_oid_file, &osi->osi_lb,
1650 &osi->osi_off, local_th);
1652 CDEBUG(D_HA, "%s: Wrote last used FID: "DFID", index %d: %d\n",
1653 d->opd_obd->obd_name, PFID(fid), d->opd_index, rc);
1659 * Implement OSP layer dt_object_operations::do_declare_destroy() interface.
1661 * Declare that the caller will destroy the specified OST object.
1663 * The OST object destroy will be handled via llog asynchronously. This
1664 * function will declare the credits for generating MDS_UNLINK64_REC llog.
1666 * \param[in] env pointer to the thread context
1667 * \param[in] dt pointer to the OSP layer dt_object to be destroyed
1668 * \param[in] th pointer to the transaction handler
1670 * \retval 0 for success
1671 * \retval negative error number on failure
1673 static int osp_declare_destroy(const struct lu_env *env, struct dt_object *dt,
1676 struct osp_object *o = dt2osp_obj(dt);
1677 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
1682 LASSERT(!osp->opd_connect_mdt);
1684 if (!CFS_FAIL_CHECK(OBD_FAIL_LFSCK_LOST_MDTOBJ))
1685 rc = osp_sync_declare_add(env, o, MDS_UNLINK64_REC, th);
1691 * Implement OSP layer dt_object_operations::do_destroy() interface.
1693 * Destroy the specified OST object.
1695 * The OSP generates a MDS_UNLINK64_REC record in the llog. There
1696 * will be some dedicated thread to handle the llog asynchronously.
1698 * It also marks the object as non-cached.
1700 * \param[in] env pointer to the thread context
1701 * \param[in] dt pointer to the OSP layer dt_object to be destroyed
1702 * \param[in] th pointer to the transaction handler
1704 * \retval 0 for success
1705 * \retval negative error number on failure
1707 static int osp_destroy(const struct lu_env *env, struct dt_object *dt,
1710 struct osp_object *o = dt2osp_obj(dt);
1711 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
1716 o->opo_non_exist = 1;
1718 LASSERT(!osp->opd_connect_mdt);
1720 if (!CFS_FAIL_CHECK(OBD_FAIL_LFSCK_LOST_MDTOBJ)) {
1721 /* once transaction is committed put proper command on
1722 * the queue going to our OST. */
1723 rc = osp_sync_add(env, o, MDS_UNLINK64_REC, th, NULL);
1728 /* not needed in cache any more */
1729 set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
1734 static int osp_orphan_index_lookup(const struct lu_env *env,
1735 struct dt_object *dt,
1737 const struct dt_key *key)
1742 static int osp_orphan_index_declare_insert(const struct lu_env *env,
1743 struct dt_object *dt,
1744 const struct dt_rec *rec,
1745 const struct dt_key *key,
1746 struct thandle *handle)
1751 static int osp_orphan_index_insert(const struct lu_env *env,
1752 struct dt_object *dt,
1753 const struct dt_rec *rec,
1754 const struct dt_key *key,
1755 struct thandle *handle)
1760 static int osp_orphan_index_declare_delete(const struct lu_env *env,
1761 struct dt_object *dt,
1762 const struct dt_key *key,
1763 struct thandle *handle)
1768 static int osp_orphan_index_delete(const struct lu_env *env,
1769 struct dt_object *dt,
1770 const struct dt_key *key,
1771 struct thandle *handle)
1777 * Initialize the OSP layer index iteration.
1779 * \param[in] env pointer to the thread context
1780 * \param[in] dt pointer to the index object to be iterated
1781 * \param[in] attr unused
1783 * \retval pointer to the iteration structure
1784 * \retval negative error number on failure
1786 struct dt_it *osp_it_init(const struct lu_env *env, struct dt_object *dt,
1793 return ERR_PTR(-ENOMEM);
1795 it->ooi_pos_ent = -1;
1797 it->ooi_attr = attr;
1799 return (struct dt_it *)it;
1803 * Finalize the OSP layer index iteration.
1805 * \param[in] env pointer to the thread context
1806 * \param[in] di pointer to the iteration structure
1808 void osp_it_fini(const struct lu_env *env, struct dt_it *di)
1810 struct osp_it *it = (struct osp_it *)di;
1811 struct page **pages = it->ooi_pages;
1812 int npages = it->ooi_total_npages;
1815 if (pages != NULL) {
1816 for (i = 0; i < npages; i++) {
1817 if (pages[i] != NULL) {
1818 if (pages[i] == it->ooi_cur_page) {
1820 it->ooi_cur_page = NULL;
1822 __free_page(pages[i]);
1825 OBD_FREE_PTR_ARRAY(pages, npages);
1831 * Get more records for the iteration from peer.
1833 * The new records will be filled in an array of pages. The OSP side
1834 * allows 1MB bulk data to be transferred.
1836 * \param[in] env pointer to the thread context
1837 * \param[in] it pointer to the iteration structure
1839 * \retval 0 for success
1840 * \retval negative error number on failure
1842 static int osp_it_fetch(const struct lu_env *env, struct osp_it *it)
1844 struct lu_device *dev = it->ooi_obj->do_lu.lo_dev;
1845 struct osp_device *osp = lu2osp_dev(dev);
1846 struct page **pages;
1847 struct ptlrpc_request *req = NULL;
1848 struct ptlrpc_bulk_desc *desc;
1849 struct idx_info *ii;
1856 npages = min_t(unsigned int, OFD_MAX_BRW_SIZE, 1 << 20);
1857 npages /= PAGE_SIZE;
1859 OBD_ALLOC_PTR_ARRAY(pages, npages);
1863 it->ooi_pages = pages;
1864 it->ooi_total_npages = npages;
1865 for (i = 0; i < npages; i++) {
1866 pages[i] = alloc_page(GFP_NOFS);
1867 if (pages[i] == NULL)
1871 req = ptlrpc_request_alloc(osp->opd_obd->u.cli.cl_import,
1876 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, OBD_IDX_READ);
1878 ptlrpc_request_free(req);
1882 osp_set_req_replay(osp, req);
1883 req->rq_request_portal = OUT_PORTAL;
1884 ii = req_capsule_client_get(&req->rq_pill, &RMF_IDX_INFO);
1885 memset(ii, 0, sizeof(*ii));
1886 if (fid_is_last_id(lu_object_fid(&it->ooi_obj->do_lu))) {
1887 /* LFSCK will iterate orphan object[FID_SEQ_LAYOUT_BTREE,
1888 * ost_index, 0] with LAST_ID FID, so it needs to replace
1889 * the FID with orphan FID here */
1890 ii->ii_fid.f_seq = FID_SEQ_LAYOUT_RBTREE;
1891 ii->ii_fid.f_oid = osp->opd_index;
1892 ii->ii_fid.f_ver = 0;
1893 ii->ii_flags = II_FL_NOHASH;
1894 ii->ii_attrs = osp_dev2node(osp);
1896 ii->ii_fid = *lu_object_fid(&it->ooi_obj->do_lu);
1897 ii->ii_flags = II_FL_NOHASH | II_FL_NOKEY | II_FL_VARKEY |
1899 ii->ii_attrs = it->ooi_attr;
1901 ii->ii_magic = IDX_INFO_MAGIC;
1902 ii->ii_count = npages * LU_PAGE_COUNT;
1903 ii->ii_hash_start = it->ooi_next;
1905 ptlrpc_at_set_req_timeout(req);
1907 desc = ptlrpc_prep_bulk_imp(req, npages, 1,
1908 PTLRPC_BULK_PUT_SINK,
1910 &ptlrpc_bulk_kiov_pin_ops);
1912 GOTO(out, rc = -ENOMEM);
1914 for (i = 0; i < npages; i++)
1915 desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0,
1918 ptlrpc_request_set_replen(req);
1919 rc = ptlrpc_queue_wait(req);
1923 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk,
1924 req->rq_bulk->bd_nob_transferred);
1929 ii = req_capsule_server_get(&req->rq_pill, &RMF_IDX_INFO);
1930 if (ii->ii_magic != IDX_INFO_MAGIC)
1931 GOTO(out, rc = -EPROTO);
1933 npages = (ii->ii_count + LU_PAGE_COUNT - 1) >>
1934 (PAGE_SHIFT - LU_PAGE_SHIFT);
1935 if (npages > it->ooi_total_npages) {
1936 CERROR("%s: returned more pages than expected, %u > %u\n",
1937 osp->opd_obd->obd_name, npages, it->ooi_total_npages);
1938 GOTO(out, rc = -EINVAL);
1941 it->ooi_rec_size = ii->ii_recsize;
1942 it->ooi_valid_npages = npages;
1943 if (req_capsule_rep_need_swab(&req->rq_pill))
1946 it->ooi_next = ii->ii_hash_end;
1949 ptlrpc_req_finished(req);
1955 * Move the iteration cursor to the next lu_page.
1957 * One system page (PAGE_SIZE) may contain multiple lu_page (4KB),
1958 * that depends on the LU_PAGE_COUNT. If it is not the last lu_page
1959 * in current system page, then move the iteration cursor to the next
1960 * lu_page in current system page. Otherwise, if there are more system
1961 * pages in the cache, then move the iteration cursor to the next system
1962 * page. If all the cached records (pages) have been iterated, then fetch
1963 * more records via osp_it_fetch().
1965 * \param[in] env pointer to the thread context
1966 * \param[in] di pointer to the iteration structure
1968 * \retval positive for end of the directory
1969 * \retval 0 for success
1970 * \retval negative error number on failure
1972 int osp_it_next_page(const struct lu_env *env, struct dt_it *di)
1974 struct osp_it *it = (struct osp_it *)di;
1975 struct lu_idxpage *idxpage;
1976 struct page **pages;
1982 idxpage = it->ooi_cur_idxpage;
1983 if (idxpage != NULL) {
1984 if (idxpage->lip_nr == 0)
1985 goto finish_cur_idxpage;
1987 if (it->ooi_pos_ent < idxpage->lip_nr) {
1988 CDEBUG(D_INFO, "ooi_pos %d nr %d\n",
1989 (int)it->ooi_pos_ent, (int)idxpage->lip_nr);
1993 it->ooi_cur_idxpage = NULL;
1994 it->ooi_pos_lu_page++;
1997 if (it->ooi_pos_lu_page < LU_PAGE_COUNT) {
1998 it->ooi_cur_idxpage = (void *)it->ooi_cur_page +
1999 LU_PAGE_SIZE * it->ooi_pos_lu_page;
2001 lustre_swab_lip_header(it->ooi_cur_idxpage);
2002 if (it->ooi_cur_idxpage->lip_magic != LIP_MAGIC) {
2003 struct osp_device *osp =
2004 lu2osp_dev(it->ooi_obj->do_lu.lo_dev);
2006 CERROR("%s: invalid magic (%x != %x) for page "
2007 "%d/%d while read layout orphan index\n",
2008 osp->opd_obd->obd_name,
2009 it->ooi_cur_idxpage->lip_magic,
2010 LIP_MAGIC, it->ooi_pos_page,
2011 it->ooi_pos_lu_page);
2012 /* Skip this lu_page next time. */
2013 it->ooi_pos_ent = idxpage->lip_nr - 1;
2016 it->ooi_pos_ent = -1;
2017 goto process_idxpage;
2020 kunmap(it->ooi_cur_page);
2021 it->ooi_cur_page = NULL;
2025 pages = it->ooi_pages;
2026 if (it->ooi_pos_page < it->ooi_valid_npages) {
2027 it->ooi_cur_page = kmap(pages[it->ooi_pos_page]);
2028 it->ooi_pos_lu_page = 0;
2032 for (i = 0; i < it->ooi_total_npages; i++) {
2033 if (pages[i] != NULL)
2034 __free_page(pages[i]);
2036 OBD_FREE_PTR_ARRAY(pages, it->ooi_total_npages);
2038 it->ooi_pos_page = 0;
2039 it->ooi_total_npages = 0;
2040 it->ooi_valid_npages = 0;
2043 it->ooi_cur_page = NULL;
2044 it->ooi_cur_idxpage = NULL;
2045 it->ooi_pages = NULL;
2048 if (it->ooi_next == II_END_OFF)
2051 rc = osp_it_fetch(env, it);
2059 * Move the iteration cursor to the next record.
2061 * If there are more records in the lu_page, then move the iteration
2062 * cursor to the next record directly. Otherwise, move the iteration
2063 * cursor to the record in the next lu_page via osp_it_next_page()
2065 * \param[in] env pointer to the thread context
2066 * \param[in] di pointer to the iteration structure
2068 * \retval positive for end of the directory
2069 * \retval 0 for success
2070 * \retval negative error number on failure
2072 static int osp_orphan_it_next(const struct lu_env *env, struct dt_it *di)
2074 struct osp_it *it = (struct osp_it *)di;
2075 struct lu_idxpage *idxpage;
2080 idxpage = it->ooi_cur_idxpage;
2081 if (idxpage != NULL) {
2082 if (idxpage->lip_nr == 0)
2086 if (it->ooi_pos_ent < idxpage->lip_nr) {
2087 if (it->ooi_rec_size ==
2088 sizeof(struct lu_orphan_rec_v3)) {
2090 (struct lu_orphan_ent_v3 *)idxpage->lip_entries+
2093 lustre_swab_orphan_ent_v3(it->ooi_ent);
2094 } else if (it->ooi_rec_size ==
2095 sizeof(struct lu_orphan_rec_v2)) {
2097 (struct lu_orphan_ent_v2 *)idxpage->lip_entries+
2100 lustre_swab_orphan_ent_v2(it->ooi_ent);
2103 (struct lu_orphan_ent *)idxpage->lip_entries +
2106 lustre_swab_orphan_ent(it->ooi_ent);
2112 rc = osp_it_next_page(env, di);
2119 int osp_it_get(const struct lu_env *env, struct dt_it *di,
2120 const struct dt_key *key)
2125 void osp_it_put(const struct lu_env *env, struct dt_it *di)
2129 static struct dt_key *osp_orphan_it_key(const struct lu_env *env,
2130 const struct dt_it *di)
2132 struct osp_it *it = (struct osp_it *)di;
2133 struct lu_orphan_ent *ent = (struct lu_orphan_ent *)it->ooi_ent;
2135 if (likely(ent != NULL))
2136 return (struct dt_key *)(&ent->loe_key);
2141 static int osp_orphan_it_key_size(const struct lu_env *env,
2142 const struct dt_it *di)
2144 return sizeof(struct lu_fid);
2147 static int osp_orphan_it_rec(const struct lu_env *env, const struct dt_it *di,
2148 struct dt_rec *rec, __u32 attr)
2150 struct osp_it *it = (struct osp_it *)di;
2152 if (likely(it->ooi_ent)) {
2153 if (it->ooi_rec_size == sizeof(struct lu_orphan_rec_v3)) {
2154 struct lu_orphan_ent_v3 *ent =
2155 (struct lu_orphan_ent_v3 *)it->ooi_ent;
2157 *(struct lu_orphan_rec_v3 *)rec = ent->loe_rec;
2158 } else if (it->ooi_rec_size ==
2159 sizeof(struct lu_orphan_rec_v2)) {
2160 struct lu_orphan_ent_v2 *ent =
2161 (struct lu_orphan_ent_v2 *)it->ooi_ent;
2163 *(struct lu_orphan_rec_v2 *)rec = ent->loe_rec;
2165 struct lu_orphan_ent *ent =
2166 (struct lu_orphan_ent *)it->ooi_ent;
2168 *(struct lu_orphan_rec *)rec = ent->loe_rec;
2176 __u64 osp_it_store(const struct lu_env *env, const struct dt_it *di)
2178 struct osp_it *it = (struct osp_it *)di;
2180 return it->ooi_next;
2184 * Locate the iteration cursor to the specified position (cookie).
2186 * \param[in] env pointer to the thread context
2187 * \param[in] di pointer to the iteration structure
2188 * \param[in] hash the specified position
2190 * \retval positive number for locating to the exactly position
2192 * \retval 0 for arriving at the end of the iteration
2193 * \retval negative error number on failure
2195 static int osp_orphan_it_load(const struct lu_env *env, const struct dt_it *di,
2198 struct osp_it *it = (struct osp_it *)di;
2201 it->ooi_next = hash;
2202 rc = osp_orphan_it_next(env, (struct dt_it *)di);
2212 int osp_it_key_rec(const struct lu_env *env, const struct dt_it *di,
2218 static const struct dt_index_operations osp_orphan_index_ops = {
2219 .dio_lookup = osp_orphan_index_lookup,
2220 .dio_declare_insert = osp_orphan_index_declare_insert,
2221 .dio_insert = osp_orphan_index_insert,
2222 .dio_declare_delete = osp_orphan_index_declare_delete,
2223 .dio_delete = osp_orphan_index_delete,
2225 .init = osp_it_init,
2226 .fini = osp_it_fini,
2227 .next = osp_orphan_it_next,
2230 .key = osp_orphan_it_key,
2231 .key_size = osp_orphan_it_key_size,
2232 .rec = osp_orphan_it_rec,
2233 .store = osp_it_store,
2234 .load = osp_orphan_it_load,
2235 .key_rec = osp_it_key_rec,
2240 * Implement OSP layer dt_object_operations::do_index_try() interface.
2242 * Negotiate the index type.
2244 * If the target index is an IDIF object, then use osp_orphan_index_ops.
2245 * Otherwise, assign osp_md_index_ops to the dt_object::do_index_ops.
2246 * (\see lustre/include/lustre_fid.h for IDIF.)
2248 * \param[in] env pointer to the thread context
2249 * \param[in] dt pointer to the OSP layer dt_object
2250 * \param[in] feat unused
2252 * \retval 0 for success
2254 static int osp_index_try(const struct lu_env *env,
2255 struct dt_object *dt,
2256 const struct dt_index_features *feat)
2258 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2260 if (fid_is_last_id(fid) && fid_is_idif(fid))
2261 dt->do_index_ops = &osp_orphan_index_ops;
2263 dt->do_index_ops = &osp_md_index_ops;
2267 static const struct dt_object_operations osp_obj_ops = {
2268 .do_declare_attr_get = osp_declare_attr_get,
2269 .do_attr_get = osp_attr_get,
2270 .do_declare_attr_set = osp_declare_attr_set,
2271 .do_attr_set = osp_attr_set,
2272 .do_declare_xattr_get = osp_declare_xattr_get,
2273 .do_xattr_get = osp_xattr_get,
2274 .do_declare_xattr_set = osp_declare_xattr_set,
2275 .do_xattr_set = osp_xattr_set,
2276 .do_declare_create = osp_declare_create,
2277 .do_create = osp_create,
2278 .do_declare_destroy = osp_declare_destroy,
2279 .do_destroy = osp_destroy,
2280 .do_index_try = osp_index_try,
2284 * Implement OSP layer lu_object_operations::loo_object_init() interface.
2286 * Initialize the object.
2288 * If it is a remote MDT object, then call do_attr_get() to fetch
2289 * the attribute from the peer.
2291 * \param[in] env pointer to the thread context
2292 * \param[in] o pointer to the OSP layer lu_object
2293 * \param[in] conf unused
2295 * \retval 0 for success
2296 * \retval negative error number on failure
2298 static int osp_object_init(const struct lu_env *env, struct lu_object *o,
2299 const struct lu_object_conf *conf)
2301 struct osp_object *po = lu2osp_obj(o);
2306 o->lo_header->loh_attr |= LOHA_REMOTE;
2308 if (is_ost_obj(o)) {
2309 po->opo_obj.do_ops = &osp_obj_ops;
2311 struct lu_attr *la = &osp_env_info(env)->osi_attr;
2313 po->opo_obj.do_ops = &osp_md_obj_ops;
2314 po->opo_obj.do_body_ops = &osp_md_body_ops;
2316 if (conf != NULL && conf->loc_flags & LOC_F_NEW) {
2317 po->opo_non_exist = 1;
2319 rc = po->opo_obj.do_ops->do_attr_get(env, lu2dt_obj(o),
2322 o->lo_header->loh_attr |=
2323 LOHA_EXISTS | (la->la_mode & S_IFMT);
2324 if (rc == -ENOENT) {
2325 po->opo_non_exist = 1;
2334 static void osp_object_free_rcu(struct rcu_head *head)
2336 struct osp_object *obj = container_of(head, struct osp_object,
2337 opo_header.loh_rcu);
2339 kmem_cache_free(osp_object_kmem, obj);
2343 * Implement OSP layer lu_object_operations::loo_object_free() interface.
2345 * Finalize the object.
2347 * If the OSP object has attributes cache, then destroy the cache.
2348 * Free the object finally.
2350 * \param[in] env pointer to the thread context
2351 * \param[in] o pointer to the OSP layer lu_object
2353 static void osp_object_free(const struct lu_env *env, struct lu_object *o)
2355 struct osp_object *obj = lu2osp_obj(o);
2356 struct lu_object_header *h = o->lo_header;
2357 struct osp_xattr_entry *oxe;
2358 struct osp_xattr_entry *tmp;
2361 dt_object_fini(&obj->opo_obj);
2363 lu_object_header_fini(h);
2364 list_for_each_entry_safe(oxe, tmp, &obj->opo_xattr_list, oxe_list) {
2365 list_del_init(&oxe->oxe_list);
2366 count = atomic_read(&oxe->oxe_ref);
2367 LASSERTF(count == 1,
2368 "Still has %d users on the xattr entry %.*s\n",
2369 count-1, (int)oxe->oxe_namelen, oxe->oxe_name);
2371 osp_oac_xattr_free(oxe);
2373 OBD_FREE_PRE(obj, sizeof(*obj), "slab-freed");
2374 call_rcu(&obj->opo_header.loh_rcu, osp_object_free_rcu);
2378 * Implement OSP layer lu_object_operations::loo_object_release() interface.
2380 * Cleanup (not free) the object.
2382 * If it is a reserved object but failed to be created, or it is an OST
2383 * object, then mark the object as non-cached.
2385 * \param[in] env pointer to the thread context
2386 * \param[in] o pointer to the OSP layer lu_object
2388 static void osp_object_release(const struct lu_env *env, struct lu_object *o)
2390 struct osp_object *po = lu2osp_obj(o);
2391 struct osp_device *d = lu2osp_dev(o->lo_dev);
2396 * release reservation if object was declared but not created
2397 * this may require lu_object_put() in LOD
2399 if (unlikely(po->opo_reserved)) {
2400 LASSERT(d->opd_pre != NULL);
2401 LASSERT(d->opd_pre_reserved > 0);
2402 spin_lock(&d->opd_pre_lock);
2403 d->opd_pre_reserved--;
2404 spin_unlock(&d->opd_pre_lock);
2407 * Check that osp_precreate_cleanup_orphans is not blocked
2408 * due to opd_pre_reserved > 0.
2410 if (unlikely(d->opd_pre_reserved == 0 &&
2411 (d->opd_pre_recovering || d->opd_pre_status)))
2412 wake_up(&d->opd_pre_waitq);
2414 /* not needed in cache any more */
2415 set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
2419 /* XXX: Currently, NOT cache OST-object on MDT because:
2420 * 1. it is not often accessed on MDT.
2421 * 2. avoid up layer (such as LFSCK) to load too many
2422 * once-used OST-objects. */
2423 set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
2428 static int osp_object_print(const struct lu_env *env, void *cookie,
2429 lu_printer_t p, const struct lu_object *l)
2431 const struct osp_object *o = lu2osp_obj((struct lu_object *)l);
2433 return (*p)(env, cookie, LUSTRE_OSP_NAME"-object@%p", o);
2436 static int osp_object_invariant(const struct lu_object *o)
2441 const struct lu_object_operations osp_lu_obj_ops = {
2442 .loo_object_init = osp_object_init,
2443 .loo_object_free = osp_object_free,
2444 .loo_object_release = osp_object_release,
2445 .loo_object_print = osp_object_print,
2446 .loo_object_invariant = osp_object_invariant