4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * lustre/osp/osp_object.c
31 * Lustre OST Proxy Device (OSP) is the agent on the local MDT for the OST
34 * OSP object attributes cache
35 * ---------------------------
36 * OSP object is the stub of the remote OST-object or MDT-object. Both the
37 * attribute and the extended attributes are stored on the peer side remotely.
38 * It is inefficient to send RPC to peer to fetch those attributes when every
39 * get_attr()/get_xattr() called. For a large system, the LFSCK synchronous
40 * mode scanning is prohibitively inefficient.
42 * The OSP maintains the OSP object attributes cache to cache some
43 * attributes on the local MDT.
45 * The basic attributes, such as owner/mode/flags, are stored in the
46 * osp_object::opo_attr. The extended attributes will be stored
47 * as osp_xattr_entry. Every extended attribute has an independent
48 * osp_xattr_entry, and all the osp_xattr_entry are linked into the
49 * osp_object::opo_xattr_list. The OSP object attributes cache
50 * is protected by the osp_object::opo_lock.
52 * Not all OSP objects have an attributes cache because maintaining
53 * the cache requires some resources. Currently, the OSP object
54 * attributes cache will be initialized when the attributes or the
55 * extended attributes are pre-fetched via osp_declare_attr_get()
56 * or osp_declare_xattr_get(). That is usually for LFSCK purpose,
57 * but it also can be shared by others.
60 * XXX: NOT prepare out RPC for remote transaction. ((please refer to the
61 * comment of osp_trans_create() for remote transaction)
63 * According to our current transaction/dt_object_lock framework (to make
64 * the cross-MDTs modification for DNE1 to be workable), the transaction
65 * sponsor will start the transaction firstly, then try to acquire related
66 * dt_object_lock if needed. Under such rules, if we want to prepare the
67 * OUT RPC in the transaction declare phase, then related attr/xattr
68 * should be known without dt_object_lock. But such condition maybe not
69 * true for some remote transaction case. For example:
71 * For linkEA repairing (by LFSCK) case, before the LFSCK thread obtained
72 * the dt_object_lock on the target MDT-object, it cannot know whether
73 * the MDT-object has linkEA or not, neither invalid or not.
75 * Since the LFSCK thread cannot hold dt_object_lock before the remote
76 * transaction start (otherwise there will be some potential deadlock),
77 * it cannot prepare related OUT RPC for repairing during the declare
78 * phase as other normal transactions do.
80 * To resolve the trouble, we will make OSP to prepare related OUT RPC
81 * after remote transaction started, and trigger the remote updating
82 * (send RPC) when trans_stop. Then the up layer users, such as LFSCK,
83 * can follow the general rule to handle trans_start/dt_object_lock
84 * for repairing linkEA inconsistency without distinguishing remote
87 * In fact, above solution for remote transaction should be the normal
88 * model without considering DNE1. The trouble brought by DNE1 will be
89 * resolved in DNE2. At that time, this patch can be removed.
92 * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
93 * Author: Mikhail Pershin <mike.tappro@intel.com>
96 #define DEBUG_SUBSYSTEM S_MDS
98 #include <lustre_obdo.h>
99 #include <lustre_swab.h>
101 #include "osp_internal.h"
103 static inline __u32 osp_dev2node(struct osp_device *osp)
105 return osp->opd_storage->dd_lu_dev.ld_site->ld_seq_site->ss_node_id;
108 static inline const char *osp_dto2name(struct osp_object *obj)
110 return obj->opo_obj.do_lu.lo_dev->ld_obd->obd_name;
113 static inline bool is_ost_obj(struct lu_object *lo)
115 return !lu2osp_dev(lo->lo_dev)->opd_connect_mdt;
118 static inline void __osp_oac_xattr_assignment(struct osp_object *obj,
119 struct osp_xattr_entry *oxe,
120 const struct lu_buf *buf)
123 memcpy(oxe->oxe_value, buf->lb_buf, buf->lb_len);
125 oxe->oxe_vallen = buf->lb_len;
131 * Assign FID to the OST object.
133 * This function will assign the FID to the OST object of a striped file.
135 * \param[in] env pointer to the thread context
136 * \param[in] d pointer to the OSP device
137 * \param[in] o pointer to the OSP object that the FID will be
140 static void osp_object_assign_fid(const struct lu_env *env,
141 struct osp_device *d, struct osp_object *o)
143 struct osp_thread_info *osi = osp_env_info(env);
145 LASSERT(fid_is_zero(lu_object_fid(&o->opo_obj.do_lu)));
146 LASSERT(o->opo_reserved);
149 osp_precreate_get_fid(env, d, &osi->osi_fid);
151 lu_object_assign_fid(env, &o->opo_obj.do_lu, &osi->osi_fid);
154 #define OXE_DEFAULT_LEN 16
157 * Allocate osp_xattr_entry.
159 * If total size exceeds PAGE_SIZE, name and value will allocated in a
160 * separate buf, otherwise it's allocated inline.
162 * \param[in] name pointer to XATTR name
163 * \param[in] namelen XATTR name len
164 * \param[in] vallen XATTR value len
165 * \retval oxe pointer on success
166 * \retval NULL on failure
168 static struct osp_xattr_entry *osp_oac_xattr_alloc(const char *name,
172 struct osp_xattr_entry *oxe;
176 vallen = OXE_DEFAULT_LEN;
177 size = sizeof(*oxe) + namelen + 1 + vallen;
178 if (likely(size <= PAGE_SIZE)) {
179 OBD_ALLOC(oxe, size);
182 oxe->oxe_buflen = size;
183 oxe->oxe_value = oxe->oxe_name + namelen + 1;
187 OBD_ALLOC_LARGE(buf, vallen);
192 OBD_ALLOC(oxe, size);
193 if (unlikely(!oxe)) {
197 oxe->oxe_buflen = vallen;
198 oxe->oxe_value = buf;
199 oxe->oxe_largebuf = 1;
202 INIT_LIST_HEAD(&oxe->oxe_list);
204 oxe->oxe_namelen = namelen;
205 memcpy(oxe->oxe_name, name, namelen);
206 /* One ref is for the caller, the other is for the entry on the list. */
207 atomic_set(&oxe->oxe_ref, 2);
212 static void osp_oac_xattr_free(struct osp_xattr_entry *oxe)
214 LASSERT(list_empty(&oxe->oxe_list));
215 if (unlikely(oxe->oxe_largebuf)) {
216 OBD_FREE_LARGE(oxe->oxe_value, oxe->oxe_buflen);
217 OBD_FREE(oxe, sizeof(*oxe) + oxe->oxe_namelen + 1);
219 OBD_FREE(oxe, oxe->oxe_buflen);
224 * Release reference from the OSP object extended attribute entry.
226 * If it is the last reference, then free the entry.
228 * \param[in] oxe pointer to the OSP object extended attribute entry.
230 static inline void osp_oac_xattr_put(struct osp_xattr_entry *oxe)
232 if (atomic_dec_and_test(&oxe->oxe_ref))
233 osp_oac_xattr_free(oxe);
237 * Find the named extended attribute in the OSP object attributes cache.
239 * The caller should take the osp_object::opo_lock before calling
242 * \param[in] obj pointer to the OSP object
243 * \param[in] name the name of the extended attribute
244 * \param[in] namelen the name length of the extended attribute
246 * \retval pointer to the found extended attribute entry
247 * \retval NULL if the specified extended attribute is not
250 static struct osp_xattr_entry *
251 osp_oac_xattr_find_locked(struct osp_object *obj, const char *name,
254 struct osp_xattr_entry *oxe;
256 list_for_each_entry(oxe, &obj->opo_xattr_list, oxe_list) {
257 if (namelen == oxe->oxe_namelen &&
258 strncmp(name, oxe->oxe_name, namelen) == 0)
266 * Find the named extended attribute in the OSP object attributes cache.
268 * Call osp_oac_xattr_find_locked() with the osp_object::opo_lock held.
270 * \param[in] obj pointer to the OSP object
271 * \param[in] name the name of the extended attribute
272 * \param[in] unlink true if the extended attribute entry is to be removed
275 * \retval pointer to the found extended attribute entry
276 * \retval NULL if the specified extended attribute is not
279 static struct osp_xattr_entry *osp_oac_xattr_find(struct osp_object *obj,
280 const char *name, bool unlink)
282 struct osp_xattr_entry *oxe = NULL;
284 spin_lock(&obj->opo_lock);
285 oxe = osp_oac_xattr_find_locked(obj, name, strlen(name));
288 list_del_init(&oxe->oxe_list);
290 atomic_inc(&oxe->oxe_ref);
292 spin_unlock(&obj->opo_lock);
298 * Find the named extended attribute in the OSP object attributes cache.
300 * If it is not in the cache, then add an empty entry (that will be
301 * filled later) to cache with the given name.
303 * \param[in] obj pointer to the OSP object
304 * \param[in] name the name of the extended attribute
305 * \param[in] len the length of the extended attribute value
307 * \retval pointer to the found or new-created extended
309 * \retval NULL if the specified extended attribute is not in the
310 * cache or fail to add new empty entry to the cache.
312 static struct osp_xattr_entry *
313 osp_oac_xattr_find_or_add(struct osp_object *obj, const char *name, size_t len)
315 struct osp_xattr_entry *oxe;
316 struct osp_xattr_entry *tmp = NULL;
317 size_t namelen = strlen(name);
319 oxe = osp_oac_xattr_find(obj, name, false);
323 oxe = osp_oac_xattr_alloc(name, namelen, len);
327 spin_lock(&obj->opo_lock);
328 tmp = osp_oac_xattr_find_locked(obj, name, namelen);
330 list_add_tail(&oxe->oxe_list, &obj->opo_xattr_list);
332 atomic_inc(&tmp->oxe_ref);
333 spin_unlock(&obj->opo_lock);
336 osp_oac_xattr_free(oxe);
343 /* whether \a oxe is large enough to hold XATTR value */
344 static inline bool oxe_can_hold(struct osp_xattr_entry *oxe, size_t len)
346 if (unlikely(oxe->oxe_largebuf))
347 return oxe->oxe_buflen > len;
349 return oxe->oxe_buflen - oxe->oxe_namelen - 1 - sizeof(*oxe) > len;
353 * Assign the cached OST-object's EA with the given value.
355 * If the current EA entry in cache has not enough space to hold the new
356 * value, remove it, create a new one, then assign with the given value.
358 * \param[in] obj pointer to the OSP object
359 * \param[in] oxe pointer to the cached EA entry to be assigned
360 * \param[in] buf pointer to the buffer with new EA value
362 * \retval pointer to the new created EA entry in cache if
363 * current entry is not big enough; otherwise, the
364 * input 'oxe' will be returned.
366 static struct osp_xattr_entry *
367 osp_oac_xattr_assignment(struct osp_object *obj, struct osp_xattr_entry *oxe,
368 const struct lu_buf *buf)
370 struct osp_xattr_entry *new = NULL;
371 struct osp_xattr_entry *old = NULL;
372 int namelen = oxe->oxe_namelen;
373 bool unlink_only = false;
375 if (!oxe_can_hold(oxe, buf->lb_len)) {
376 new = osp_oac_xattr_alloc(oxe->oxe_name, namelen, buf->lb_len);
378 __osp_oac_xattr_assignment(obj, new, buf);
381 CWARN("%s: cannot update cached xattr %.*s of "DFID"\n",
382 osp_dto2name(obj), namelen, oxe->oxe_name,
383 PFID(lu_object_fid(&obj->opo_obj.do_lu)));
387 spin_lock(&obj->opo_lock);
388 old = osp_oac_xattr_find_locked(obj, oxe->oxe_name, namelen);
391 /* Unlink the 'old'. */
392 list_del_init(&old->oxe_list);
394 /* Drop the ref for 'old' on list. */
395 osp_oac_xattr_put(old);
397 /* Drop the ref for current using. */
398 osp_oac_xattr_put(oxe);
401 /* Insert 'new' into list. */
402 list_add_tail(&new->oxe_list, &obj->opo_xattr_list);
403 } else if (unlink_only) {
404 /* Unlink the 'old'. */
405 list_del_init(&old->oxe_list);
407 /* Drop the ref for 'old' on list. */
408 osp_oac_xattr_put(old);
410 __osp_oac_xattr_assignment(obj, oxe, buf);
413 /* Drop the ref for current using. */
414 osp_oac_xattr_put(oxe);
417 /* Someone unlinked the 'old' by race,
418 * insert the 'new' one into list. */
419 list_add_tail(&new->oxe_list, &obj->opo_xattr_list);
421 spin_unlock(&obj->opo_lock);
427 * Parse the OSP object attribute from the RPC reply.
429 * If the attribute is valid, then it will be added to the OSP object
432 * \param[in] env pointer to the thread context
433 * \param[in] reply pointer to the RPC reply
434 * \param[in] req pointer to the RPC request
435 * \param[out] attr pointer to buffer to hold the output attribute
436 * \param[in] obj pointer to the OSP object
437 * \param[in] index the index of the attribute buffer in the reply
439 * \retval 0 for success
440 * \retval negative error number on failure
442 static int osp_get_attr_from_reply(const struct lu_env *env,
443 struct object_update_reply *reply,
444 struct ptlrpc_request *req,
445 struct lu_attr *attr,
446 struct osp_object *obj, int index)
448 struct osp_thread_info *osi = osp_env_info(env);
449 struct lu_buf *rbuf = &osi->osi_lb2;
450 struct obdo *lobdo = &osi->osi_obdo;
454 rc = object_update_result_data_get(reply, rbuf, index);
458 wobdo = rbuf->lb_buf;
459 if (rbuf->lb_len != sizeof(*wobdo))
462 LASSERT(req != NULL);
463 if (req_capsule_req_need_swab(&req->rq_pill))
464 lustre_swab_obdo(wobdo);
466 lustre_get_wire_obdo(NULL, lobdo, wobdo);
468 spin_lock(&obj->opo_lock);
469 la_from_obdo(&obj->opo_attr, lobdo, lobdo->o_valid);
470 spin_unlock(&obj->opo_lock);
473 la_from_obdo(attr, lobdo, lobdo->o_valid);
479 * Interpreter function for getting OSP object attribute asynchronously.
481 * Called to interpret the result of an async mode RPC for getting the
482 * OSP object attribute.
484 * \param[in] env pointer to the thread context
485 * \param[in] reply pointer to the RPC reply
486 * \param[in] req pointer to the RPC request
487 * \param[in] obj pointer to the OSP object
488 * \param[out] data pointer to buffer to hold the output attribute
489 * \param[in] index the index of the attribute buffer in the reply
490 * \param[in] rc the result for handling the RPC
492 * \retval 0 for success
493 * \retval negative error number on failure
495 static int osp_attr_get_interpterer(const struct lu_env *env,
496 struct object_update_reply *reply,
497 struct ptlrpc_request *req,
498 struct osp_object *obj,
499 void *data, int index, int rc)
501 struct lu_attr *attr = data;
504 osp2lu_obj(obj)->lo_header->loh_attr |= LOHA_EXISTS;
505 obj->opo_non_exist = 0;
507 return osp_get_attr_from_reply(env, reply, req, NULL, obj,
511 osp2lu_obj(obj)->lo_header->loh_attr &= ~LOHA_EXISTS;
512 obj->opo_non_exist = 1;
515 spin_lock(&obj->opo_lock);
517 spin_unlock(&obj->opo_lock);
524 * Implement OSP layer dt_object_operations::do_declare_attr_get() interface.
526 * Declare that the caller will get attribute from the specified OST object.
528 * This function adds an Object Unified Target (OUT) sub-request to the per-OSP
529 * based shared asynchronous request queue. The osp_attr_get_interpterer()
530 * is registered as the interpreter function to handle the result of this
533 * \param[in] env pointer to the thread context
534 * \param[in] dt pointer to the OSP layer dt_object
536 * \retval 0 for success
537 * \retval negative error number on failure
539 static int osp_declare_attr_get(const struct lu_env *env, struct dt_object *dt)
541 struct osp_object *obj = dt2osp_obj(dt);
542 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
545 mutex_lock(&osp->opd_async_requests_mutex);
546 rc = osp_insert_async_request(env, OUT_ATTR_GET, obj, 0, NULL, NULL,
547 &obj->opo_attr, sizeof(struct obdo),
548 osp_attr_get_interpterer);
549 mutex_unlock(&osp->opd_async_requests_mutex);
555 * Implement OSP layer dt_object_operations::do_attr_get() interface.
557 * Get attribute from the specified MDT/OST object.
559 * If the attribute is in the OSP object attributes cache, then return
560 * the cached attribute directly. Otherwise it will trigger an OUT RPC
561 * to the peer to get the attribute synchronously, if successful, add it
562 * to the OSP attributes cache. (\see lustre/osp/osp_trans.c for OUT RPC.)
564 * \param[in] env pointer to the thread context
565 * \param[in] dt pointer to the OSP layer dt_object
566 * \param[out] attr pointer to the buffer to hold the output attribute
568 * \retval 0 for success
569 * \retval negative error number on failure
571 int osp_attr_get(const struct lu_env *env, struct dt_object *dt,
572 struct lu_attr *attr)
574 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
575 struct osp_object *obj = dt2osp_obj(dt);
576 struct dt_device *dev = &osp->opd_dt_dev;
577 struct osp_update_request *update;
578 struct object_update_reply *reply;
579 struct ptlrpc_request *req = NULL;
580 int invalidated, cache = 0, rc = 0;
583 if (is_ost_obj(&dt->do_lu) && obj->opo_non_exist)
585 if (obj->opo_destroyed)
588 spin_lock(&obj->opo_lock);
589 if (obj->opo_attr.la_valid != 0 && !obj->opo_stale) {
590 *attr = obj->opo_attr;
591 spin_unlock(&obj->opo_lock);
595 spin_unlock(&obj->opo_lock);
597 update = osp_update_request_create(dev);
599 RETURN(PTR_ERR(update));
601 rc = OSP_UPDATE_RPC_PACK(env, out_attr_get_pack, update,
602 lu_object_fid(&dt->do_lu));
604 CERROR("%s: Insert update error "DFID": rc = %d\n",
605 dev->dd_lu_dev.ld_obd->obd_name,
606 PFID(lu_object_fid(&dt->do_lu)), rc);
611 invalidated = atomic_read(&obj->opo_invalidate_seq);
613 rc = osp_remote_sync(env, osp, update, &req);
615 down_read(&obj->opo_invalidate_sem);
616 if (invalidated == atomic_read(&obj->opo_invalidate_seq)) {
617 /* no invalited has came so far, we can cache the attrs */
623 osp2lu_obj(obj)->lo_header->loh_attr &= ~LOHA_EXISTS;
625 obj->opo_non_exist = 1;
627 CERROR("%s: osp_attr_get update error "DFID": rc = %d\n",
628 dev->dd_lu_dev.ld_obd->obd_name,
629 PFID(lu_object_fid(&dt->do_lu)), rc);
635 osp2lu_obj(obj)->lo_header->loh_attr |= LOHA_EXISTS;
636 obj->opo_non_exist = 0;
637 reply = req_capsule_server_sized_get(&req->rq_pill,
638 &RMF_OUT_UPDATE_REPLY,
639 OUT_UPDATE_REPLY_SIZE);
640 if (reply == NULL || reply->ourp_magic != UPDATE_REPLY_MAGIC)
641 GOTO(out, rc = -EPROTO);
643 rc = osp_get_attr_from_reply(env, reply, req, attr,
644 cache ? obj : NULL, 0);
648 spin_lock(&obj->opo_lock);
651 spin_unlock(&obj->opo_lock);
656 up_read(&obj->opo_invalidate_sem);
660 ptlrpc_req_finished(req);
662 osp_update_request_destroy(env, update);
668 * Implement OSP layer dt_object_operations::do_declare_attr_set() interface.
670 * If the transaction is not remote one, then declare the credits that will
671 * be used for the subsequent llog record for the object's attributes.
673 * \param[in] env pointer to the thread context
674 * \param[in] dt pointer to the OSP layer dt_object
675 * \param[in] attr pointer to the attribute to be set
676 * \param[in] th pointer to the transaction handler
678 * \retval 0 for success
679 * \retval negative error number on failure
681 static int osp_declare_attr_set(const struct lu_env *env, struct dt_object *dt,
682 const struct lu_attr *attr, struct thandle *th)
684 struct osp_device *d = lu2osp_dev(dt->do_lu.lo_dev);
685 struct osp_object *o = dt2osp_obj(dt);
688 if (is_only_remote_trans(th))
689 return osp_md_declare_attr_set(env, dt, attr, th);
691 * Usually we don't allow server stack to manipulate size
692 * but there is a special case when striping is created
693 * late, after stripeless file got truncated to non-zero.
695 * In this case we do the following:
697 * 1) grab id in declare - this can lead to leaked OST objects
698 * but we don't currently have proper mechanism and the only
699 * options we have are to do truncate RPC holding transaction
700 * open (very bad) or to grab id in declare at cost of leaked
701 * OST object in same very rare unfortunate case (just bad)
702 * notice 1.6-2.0 do assignment outside of running transaction
703 * all the time, meaning many more chances for leaked objects.
705 * 2) send synchronous truncate RPC with just assigned id
708 /* there are few places in MDD code still passing NULL
709 * XXX: to be fixed soon */
713 if (attr->la_valid & LA_SIZE && attr->la_size > 0 &&
714 fid_is_zero(lu_object_fid(&o->opo_obj.do_lu))) {
715 LASSERT(!dt_object_exists(dt));
716 osp_object_assign_fid(env, d, o);
717 rc = osp_object_truncate(env, dt, attr->la_size);
722 if (!(attr->la_valid & LA_REMOTE_ATTR_SET))
725 /* track all UID/GID, projid, and layout version changes via llog */
726 rc = osp_sync_declare_add(env, o, MDS_SETATTR64_REC, th);
732 * Implement OSP layer dt_object_operations::do_attr_set() interface.
734 * Set attribute to the specified OST object.
736 * If the transaction is a remote one, then add OUT_ATTR_SET sub-request
737 * in the OUT RPC that will be flushed when the remote transaction stop.
738 * Otherwise, it will generate a MDS_SETATTR64_REC record in the llog that
739 * will be handled by a dedicated thread asynchronously.
741 * If the attribute entry exists in the OSP object attributes cache,
742 * then update the cached attribute according to given attribute.
744 * \param[in] env pointer to the thread context
745 * \param[in] dt pointer to the OSP layer dt_object
746 * \param[in] attr pointer to the attribute to be set
747 * \param[in] th pointer to the transaction handler
749 * \retval 0 for success
750 * \retval negative error number on failure
752 static int osp_attr_set(const struct lu_env *env, struct dt_object *dt,
753 const struct lu_attr *attr, struct thandle *th)
755 struct osp_object *o = dt2osp_obj(dt);
759 /* we're interested in uid/gid/projid/layout version changes only */
760 if (!(attr->la_valid & LA_REMOTE_ATTR_SET))
763 if (!is_only_remote_trans(th)) {
764 if (attr->la_flags & LUSTRE_SET_SYNC_FL) {
765 struct ptlrpc_request *req = NULL;
766 struct osp_update_request *update = NULL;
767 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
769 update = osp_update_request_create(&osp->opd_dt_dev);
771 RETURN(PTR_ERR(update));
773 rc = OSP_UPDATE_RPC_PACK(env, out_attr_set_pack, update,
774 lu_object_fid(&dt->do_lu),
777 CERROR("%s: update error "DFID": rc = %d\n",
778 osp->opd_obd->obd_name,
779 PFID(lu_object_fid(&dt->do_lu)), rc);
781 osp_update_request_destroy(env, update);
785 rc = osp_remote_sync(env, osp, update, &req);
787 ptlrpc_req_finished(req);
789 osp_update_request_destroy(env, update);
791 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
793 rc = osp_sync_add(env, o, MDS_SETATTR64_REC, th, attr);
794 /* send layout version to OST ASAP */
795 if (attr->la_valid & LA_LAYOUT_VERSION)
796 wake_up(&osp->opd_sync_waitq);
797 /* XXX: send new uid/gid to OST ASAP? */
802 /* It is for OST-object attr_set directly without updating
803 * local MDT-object attribute. It is usually used by LFSCK. */
804 rc = osp_md_attr_set(env, dt, attr, th);
805 CDEBUG(D_INFO, "(1) set attr "DFID": rc = %d\n",
806 PFID(&dt->do_lu.lo_header->loh_fid), rc);
811 /* Update the OSP object attributes cache. */
813 spin_lock(&o->opo_lock);
814 if (attr->la_valid & LA_UID) {
815 la->la_uid = attr->la_uid;
816 la->la_valid |= LA_UID;
819 if (attr->la_valid & LA_GID) {
820 la->la_gid = attr->la_gid;
821 la->la_valid |= LA_GID;
823 if (attr->la_valid & LA_PROJID) {
824 la->la_projid = attr->la_projid;
825 la->la_valid |= LA_PROJID;
827 spin_unlock(&o->opo_lock);
834 * Interpreter function for getting OSP object extended attribute asynchronously
836 * Called to interpret the result of an async mode RPC for getting the
837 * OSP object extended attribute.
839 * \param[in] env pointer to the thread context
840 * \param[in] reply pointer to the RPC reply
841 * \param[in] req pointer to the RPC request
842 * \param[in] obj pointer to the OSP object
843 * \param[out] data pointer to OSP object attributes cache
844 * \param[in] index the index of the attribute buffer in the reply
845 * \param[in] rc the result for handling the RPC
847 * \retval 0 for success
848 * \retval negative error number on failure
850 static int osp_xattr_get_interpterer(const struct lu_env *env,
851 struct object_update_reply *reply,
852 struct ptlrpc_request *req,
853 struct osp_object *obj,
854 void *data, int index, int rc)
856 struct osp_xattr_entry *oxe = data;
858 spin_lock(&obj->opo_lock);
860 struct lu_buf *rbuf = &osp_env_info(env)->osi_lb2;
862 rc = object_update_result_data_get(reply, rbuf, index);
863 if (rc == -ENOENT || rc == -ENODATA || rc == 0) {
869 if (unlikely(rc < 0) || !oxe_can_hold(oxe, rbuf->lb_len)) {
874 __osp_oac_xattr_assignment(obj, oxe, rbuf);
875 } else if (rc == -ENOENT || rc == -ENODATA) {
883 spin_unlock(&obj->opo_lock);
885 /* Put the reference obtained in the osp_declare_xattr_get(). */
886 osp_oac_xattr_put(oxe);
892 * Implement OSP dt_object_operations::do_declare_xattr_get() interface.
894 * Declare that the caller will get extended attribute from the specified
897 * This function will add an OUT_XATTR_GET sub-request to the per OSP
898 * based shared asynchronous request queue with the interpreter function:
899 * osp_xattr_get_interpterer().
901 * \param[in] env pointer to the thread context
902 * \param[in] dt pointer to the OSP layer dt_object
903 * \param[out] buf pointer to the lu_buf to hold the extended attribute
904 * \param[in] name the name for the expected extended attribute
906 * \retval 0 for success
907 * \retval negative error number on failure
909 static int osp_declare_xattr_get(const struct lu_env *env, struct dt_object *dt,
910 struct lu_buf *buf, const char *name)
912 struct osp_object *obj = dt2osp_obj(dt);
913 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
914 struct osp_xattr_entry *oxe;
918 LASSERT(buf != NULL);
919 LASSERT(name != NULL);
921 if (unlikely(buf->lb_len == 0))
924 oxe = osp_oac_xattr_find_or_add(obj, name, buf->lb_len);
928 len = strlen(name) + 1;
929 mutex_lock(&osp->opd_async_requests_mutex);
930 rc = osp_insert_async_request(env, OUT_XATTR_GET, obj, 1,
931 &len, (const void **)&name,
933 osp_xattr_get_interpterer);
935 mutex_unlock(&osp->opd_async_requests_mutex);
936 osp_oac_xattr_put(oxe);
938 struct osp_update_request *our;
939 struct osp_update_request_sub *ours;
941 /* XXX: Currently, we trigger the batched async OUT
942 * RPC via dt_declare_xattr_get(). It is not
943 * perfect solution, but works well now.
945 * We will improve it in the future. */
946 our = osp->opd_async_requests;
947 ours = osp_current_object_update_request(our);
948 if (ours != NULL && ours->ours_req != NULL &&
949 ours->ours_req->ourq_count > 0) {
950 osp->opd_async_requests = NULL;
951 mutex_unlock(&osp->opd_async_requests_mutex);
952 rc = osp_unplug_async_request(env, osp, our);
954 mutex_unlock(&osp->opd_async_requests_mutex);
962 * Implement OSP layer dt_object_operations::do_xattr_get() interface.
964 * Get extended attribute from the specified MDT/OST object.
966 * If the extended attribute is in the OSP object attributes cache, then
967 * return the cached extended attribute directly. Otherwise it will get
968 * the extended attribute synchronously, if successful, add it to the OSP
969 * attributes cache. (\see lustre/osp/osp_trans.c for OUT RPC.)
971 * There is a race condition: some other thread has added the named extended
972 * attributed entry to the OSP object attributes cache during the current
973 * OUT_XATTR_GET handling. If such case happens, the OSP will replace the
974 * (just) existing extended attribute entry with the new replied one.
976 * \param[in] env pointer to the thread context
977 * \param[in] dt pointer to the OSP layer dt_object
978 * \param[out] buf pointer to the lu_buf to hold the extended attribute
979 * \param[in] name the name for the expected extended attribute
981 * \retval 0 for success
982 * \retval negative error number on failure
984 int osp_xattr_get(const struct lu_env *env, struct dt_object *dt,
985 struct lu_buf *buf, const char *name)
987 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
988 struct osp_object *obj = dt2osp_obj(dt);
989 struct dt_device *dev = &osp->opd_dt_dev;
990 struct lu_buf *rbuf = &osp_env_info(env)->osi_lb2;
991 struct osp_update_request *update = NULL;
992 struct ptlrpc_request *req = NULL;
993 struct object_update_reply *reply;
994 struct osp_xattr_entry *oxe = NULL;
995 const char *dname = osp_dto2name(obj);
996 int invalidated, rc = 0;
999 LASSERT(buf != NULL);
1000 LASSERT(name != NULL);
1002 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_NETWORK) &&
1003 osp->opd_index == cfs_fail_val) {
1004 if (is_ost_obj(&dt->do_lu)) {
1005 if (osp_dev2node(osp) == cfs_fail_val)
1008 if (strcmp(name, XATTR_NAME_LINK) == 0)
1013 if (unlikely(obj->opo_non_exist))
1016 invalidated = atomic_read(&obj->opo_invalidate_seq);
1018 oxe = osp_oac_xattr_find(obj, name, false);
1020 spin_lock(&obj->opo_lock);
1021 if (oxe->oxe_ready) {
1022 if (!oxe->oxe_exist)
1023 GOTO(unlock, rc = -ENODATA);
1025 if (buf->lb_buf == NULL)
1026 GOTO(unlock, rc = oxe->oxe_vallen);
1028 if (buf->lb_len < oxe->oxe_vallen)
1029 GOTO(unlock, rc = -ERANGE);
1031 memcpy(buf->lb_buf, oxe->oxe_value,
1034 GOTO(unlock, rc = oxe->oxe_vallen);
1037 spin_unlock(&obj->opo_lock);
1038 osp_oac_xattr_put(oxe);
1042 spin_unlock(&obj->opo_lock);
1044 update = osp_update_request_create(dev);
1046 GOTO(out_req, rc = PTR_ERR(update));
1048 rc = OSP_UPDATE_RPC_PACK(env, out_xattr_get_pack, update,
1049 lu_object_fid(&dt->do_lu), name, buf->lb_len);
1051 CERROR("%s: Insert update error "DFID": rc = %d\n",
1052 dname, PFID(lu_object_fid(&dt->do_lu)), rc);
1056 rc = osp_remote_sync(env, osp, update, &req);
1058 down_read(&obj->opo_invalidate_sem);
1059 if (invalidated != atomic_read(&obj->opo_invalidate_seq)) {
1060 /* invalidated has been requested, we can't cache the result */
1063 dt->do_lu.lo_header->loh_attr &= ~LOHA_EXISTS;
1066 reply = req_capsule_server_sized_get(&req->rq_pill,
1067 &RMF_OUT_UPDATE_REPLY,
1068 OUT_UPDATE_REPLY_SIZE);
1069 if (reply->ourp_magic != UPDATE_REPLY_MAGIC) {
1070 CERROR("%s: Wrong version %x expected %x "DFID
1071 ": rc = %d\n", dname, reply->ourp_magic,
1073 PFID(lu_object_fid(&dt->do_lu)), -EPROTO);
1074 GOTO(out, rc = -EPROTO);
1076 rc = object_update_result_data_get(reply, rbuf, 0);
1081 if (rc == -ENOENT) {
1082 dt->do_lu.lo_header->loh_attr &= ~LOHA_EXISTS;
1083 obj->opo_non_exist = 1;
1087 oxe = osp_oac_xattr_find_or_add(obj, name, buf->lb_len);
1090 CWARN("%s: Fail to add xattr (%s) to cache for "
1091 DFID" (1): rc = %d\n", dname, name,
1092 PFID(lu_object_fid(&dt->do_lu)), rc);
1097 spin_lock(&obj->opo_lock);
1098 if (rc == -ENOENT || rc == -ENODATA) {
1104 spin_unlock(&obj->opo_lock);
1109 reply = req_capsule_server_sized_get(&req->rq_pill,
1110 &RMF_OUT_UPDATE_REPLY,
1111 OUT_UPDATE_REPLY_SIZE);
1112 if (reply->ourp_magic != UPDATE_REPLY_MAGIC) {
1113 CERROR("%s: Wrong version %x expected %x "DFID": rc = %d\n",
1114 dname, reply->ourp_magic, UPDATE_REPLY_MAGIC,
1115 PFID(lu_object_fid(&dt->do_lu)), -EPROTO);
1117 GOTO(out, rc = -EPROTO);
1120 rc = object_update_result_data_get(reply, rbuf, 0);
1121 if (rc < 0 || rbuf->lb_len == 0) {
1122 if (oxe == NULL && rc == -ENODATA) {
1123 oxe = osp_oac_xattr_find_or_add(obj, name, buf->lb_len);
1126 CWARN("%s: Fail to add xattr (%s) to cache for "
1127 DFID" (1): rc = %d\n", dname, name,
1128 PFID(lu_object_fid(&dt->do_lu)), rc);
1134 spin_lock(&obj->opo_lock);
1135 if (unlikely(rc == -ENODATA)) {
1141 spin_unlock(&obj->opo_lock);
1147 /* For detecting EA size. */
1152 oxe = osp_oac_xattr_find_or_add(obj, name, rbuf->lb_len);
1154 CWARN("%s: Fail to add xattr (%s) to "
1155 "cache for "DFID" (2): rc = %d\n",
1156 dname, name, PFID(lu_object_fid(&dt->do_lu)), rc);
1162 oxe = osp_oac_xattr_assignment(obj, oxe, rbuf);
1167 up_read(&obj->opo_invalidate_sem);
1170 if (rc > 0 && buf->lb_buf) {
1171 if (unlikely(buf->lb_len < rbuf->lb_len))
1174 memcpy(buf->lb_buf, rbuf->lb_buf, rbuf->lb_len);
1178 ptlrpc_req_finished(req);
1180 if (update && !IS_ERR(update))
1181 osp_update_request_destroy(env, update);
1184 osp_oac_xattr_put(oxe);
1190 * Implement OSP layer dt_object_operations::do_declare_xattr_set() interface.
1192 * Declare that the caller will set extended attribute to the specified
1195 * If it is non-remote transaction, it will add an OUT_XATTR_SET sub-request
1196 * to the OUT RPC that will be flushed when the transaction start. And if the
1197 * OSP attributes cache is initialized, then check whether the name extended
1198 * attribute entry exists in the cache or not. If yes, replace it; otherwise,
1199 * add the extended attribute to the cache.
1201 * \param[in] env pointer to the thread context
1202 * \param[in] dt pointer to the OSP layer dt_object
1203 * \param[in] buf pointer to the lu_buf to hold the extended attribute
1204 * \param[in] name the name of the extended attribute to be set
1205 * \param[in] flag to indicate the detailed set operation: LU_XATTR_CREATE
1206 * or LU_XATTR_REPLACE or others
1207 * \param[in] th pointer to the transaction handler
1209 * \retval 0 for success
1210 * \retval negative error number on failure
1212 int osp_declare_xattr_set(const struct lu_env *env, struct dt_object *dt,
1213 const struct lu_buf *buf, const char *name,
1214 int flag, struct thandle *th)
1216 return osp_trans_update_request_create(th);
1220 * Implement OSP layer dt_object_operations::do_xattr_set() interface.
1222 * Set extended attribute to the specified MDT/OST object.
1224 * Add an OUT_XATTR_SET sub-request into the OUT RPC that will be flushed in
1225 * the transaction stop. And if the OSP attributes cache is initialized, then
1226 * check whether the name extended attribute entry exists in the cache or not.
1227 * If yes, replace it; otherwise, add the extended attribute to the cache.
1229 * \param[in] env pointer to the thread context
1230 * \param[in] dt pointer to the OSP layer dt_object
1231 * \param[in] buf pointer to the lu_buf to hold the extended attribute
1232 * \param[in] name the name of the extended attribute to be set
1233 * \param[in] fl to indicate the detailed set operation: LU_XATTR_CREATE
1234 * or LU_XATTR_REPLACE or others
1235 * \param[in] th pointer to the transaction handler
1237 * \retval 0 for success
1238 * \retval negative error number on failure
1240 int osp_xattr_set(const struct lu_env *env, struct dt_object *dt,
1241 const struct lu_buf *buf, const char *name, int fl,
1244 struct osp_object *o = dt2osp_obj(dt);
1245 struct osp_update_request *update;
1246 struct osp_xattr_entry *oxe;
1250 update = thandle_to_osp_update_request(th);
1251 LASSERT(update != NULL);
1253 CDEBUG(D_INODE, DFID" set xattr '%s' with size %zd\n",
1254 PFID(lu_object_fid(&dt->do_lu)), name, buf->lb_len);
1256 rc = OSP_UPDATE_RPC_PACK(env, out_xattr_set_pack, update,
1257 lu_object_fid(&dt->do_lu), buf, name, fl);
1261 /* Do not cache linkEA that may be self-adjusted by peers
1262 * under EA overflow case. */
1263 if (strcmp(name, XATTR_NAME_LINK) == 0) {
1264 oxe = osp_oac_xattr_find(o, name, true);
1266 osp_oac_xattr_put(oxe);
1271 oxe = osp_oac_xattr_find_or_add(o, name, buf->lb_len);
1273 CWARN("%s: cannot cache xattr '%s' of "DFID"\n",
1274 osp_dto2name(o), name, PFID(lu_object_fid(&dt->do_lu)));
1279 oxe = osp_oac_xattr_assignment(o, oxe, buf);
1281 osp_oac_xattr_put(oxe);
1287 * Implement OSP layer dt_object_operations::do_declare_xattr_del() interface.
1289 * Declare that the caller will delete extended attribute on the specified
1292 * If it is non-remote transaction, it will add an OUT_XATTR_DEL sub-request
1293 * to the OUT RPC that will be flushed when the transaction start. And if the
1294 * name extended attribute entry exists in the OSP attributes cache, then remove
1295 * it from the cache.
1297 * \param[in] env pointer to the thread context
1298 * \param[in] dt pointer to the OSP layer dt_object
1299 * \param[in] name the name of the extended attribute to be set
1300 * \param[in] th pointer to the transaction handler
1302 * \retval 0 for success
1303 * \retval negative error number on failure
1305 int osp_declare_xattr_del(const struct lu_env *env, struct dt_object *dt,
1306 const char *name, struct thandle *th)
1308 return osp_trans_update_request_create(th);
1312 * Implement OSP layer dt_object_operations::do_xattr_del() interface.
1314 * Delete extended attribute on the specified MDT/OST object.
1316 * If it is remote transaction, it will add an OUT_XATTR_DEL sub-request into
1317 * the OUT RPC that will be flushed when the transaction stop. And if the name
1318 * extended attribute entry exists in the OSP attributes cache, then remove it
1321 * \param[in] env pointer to the thread context
1322 * \param[in] dt pointer to the OSP layer dt_object
1323 * \param[in] name the name of the extended attribute to be set
1324 * \param[in] th pointer to the transaction handler
1326 * \retval 0 for success
1327 * \retval negative error number on failure
1329 int osp_xattr_del(const struct lu_env *env, struct dt_object *dt,
1330 const char *name, struct thandle *th)
1332 struct osp_update_request *update;
1333 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1334 struct osp_object *o = dt2osp_obj(dt);
1335 struct osp_xattr_entry *oxe;
1338 update = thandle_to_osp_update_request(th);
1339 LASSERT(update != NULL);
1341 rc = OSP_UPDATE_RPC_PACK(env, out_xattr_del_pack, update, fid, name);
1345 oxe = osp_oac_xattr_find(o, name, true);
1347 /* Drop the ref for entry on list. */
1348 osp_oac_xattr_put(oxe);
1353 void osp_obj_invalidate_cache(struct osp_object *obj)
1355 struct osp_xattr_entry *oxe;
1356 struct osp_xattr_entry *tmp;
1358 spin_lock(&obj->opo_lock);
1359 list_for_each_entry_safe(oxe, tmp, &obj->opo_xattr_list, oxe_list) {
1361 list_del_init(&oxe->oxe_list);
1362 osp_oac_xattr_put(oxe);
1364 obj->opo_attr.la_valid = 0;
1365 spin_unlock(&obj->opo_lock);
1369 * Implement OSP layer dt_object_operations::do_invalidate() interface.
1371 * Invalidate attributes cached on the specified MDT/OST object.
1373 * \param[in] env pointer to the thread context
1374 * \param[in] dt pointer to the OSP layer dt_object
1376 * \retval 0 for success
1377 * \retval negative error number on failure
1379 int osp_invalidate(const struct lu_env *env, struct dt_object *dt)
1381 struct osp_object *obj = dt2osp_obj(dt);
1384 CDEBUG(D_HA, "Invalidate osp_object "DFID"\n",
1385 PFID(lu_object_fid(&dt->do_lu)));
1387 /* serialize attr/EA set vs. invalidation */
1388 down_write(&obj->opo_invalidate_sem);
1390 /* this should invalidate all in-flights */
1391 atomic_inc(&obj->opo_invalidate_seq);
1393 spin_lock(&obj->opo_lock);
1394 /* do not mark new objects stale */
1395 if (obj->opo_attr.la_valid)
1397 obj->opo_non_exist = 0;
1398 spin_unlock(&obj->opo_lock);
1400 osp_obj_invalidate_cache(obj);
1402 up_write(&obj->opo_invalidate_sem);
1407 bool osp_check_stale(struct dt_object *dt)
1409 struct osp_object *obj = dt2osp_obj(dt);
1411 if (is_ost_obj(&dt->do_lu) && obj->opo_non_exist)
1414 return obj->opo_stale;
1419 * Implement OSP layer dt_object_operations::do_declare_create() interface.
1421 * Declare that the caller will create the OST object.
1423 * If the transaction is a remote transaction and the FID for the OST-object
1424 * has been assigned already, then handle it as creating (remote) MDT object
1425 * via osp_md_declare_create(). This function is usually used for LFSCK
1426 * to re-create the lost OST object. Otherwise, if it is not replay case, the
1427 * OSP will reserve pre-created object for the subsequent create operation;
1428 * if the MDT side cached pre-created objects are less than some threshold,
1429 * then it will wakeup the pre-create thread.
1431 * \param[in] env pointer to the thread context
1432 * \param[in] dt pointer to the OSP layer dt_object
1433 * \param[in] attr the attribute for the object to be created
1434 * \param[in] hint pointer to the hint for creating the object, such as
1436 * \param[in] dof pointer to the dt_object_format for help the creation
1437 * \param[in] th pointer to the transaction handler
1439 * \retval 0 for success
1440 * \retval negative error number on failure
1442 static int osp_declare_create(const struct lu_env *env, struct dt_object *dt,
1443 struct lu_attr *attr,
1444 struct dt_allocation_hint *hint,
1445 struct dt_object_format *dof, struct thandle *th)
1447 struct osp_thread_info *osi = osp_env_info(env);
1448 struct osp_device *d = lu2osp_dev(dt->do_lu.lo_dev);
1449 struct osp_object *o = dt2osp_obj(dt);
1450 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1451 struct thandle *local_th;
1456 if (is_only_remote_trans(th) && !fid_is_zero(fid)) {
1457 LASSERT(fid_is_sane(fid));
1459 rc = osp_md_declare_create(env, dt, attr, hint, dof, th);
1464 /* should happen to non-0 OSP only so that at least one object
1465 * has been already declared in the scenario and LOD should
1467 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_CREATE_FAIL) && d->opd_index == 1)
1470 LASSERT(d->opd_last_used_oid_file);
1473 * There can be gaps in precreated ids and record to unlink llog
1474 * XXX: we do not handle gaps yet, implemented before solution
1475 * was found to be racy, so we disabled that. there is no
1476 * point in making useless but expensive llog declaration.
1478 /* rc = osp_sync_declare_add(env, o, MDS_UNLINK64_REC, th); */
1480 local_th = osp_get_storage_thandle(env, th, d);
1481 if (IS_ERR(local_th))
1482 RETURN(PTR_ERR(local_th));
1484 if (unlikely(!fid_is_zero(fid))) {
1485 /* replay case: caller knows fid */
1486 osp_objid_buf_prep(&osi->osi_lb, &osi->osi_off, NULL,
1488 rc = dt_declare_record_write(env, d->opd_last_used_oid_file,
1489 &osi->osi_lb, osi->osi_off,
1495 * in declaration we need to reserve object so that we don't block
1496 * awaiting precreation RPC to complete
1498 rc = osp_precreate_reserve(env, d, !hint || hint->dah_can_block);
1500 * we also need to declare update to local "last used id" file for
1501 * recovery if object isn't used for a reason, we need to release
1502 * reservation, this can be made in osd_object_release()
1505 /* mark id is reserved: in create we don't want to talk
1507 LASSERT(o->opo_reserved == 0);
1508 o->opo_reserved = 1;
1510 /* common for all OSPs file hystorically */
1511 osp_objid_buf_prep(&osi->osi_lb, &osi->osi_off, NULL,
1513 rc = dt_declare_record_write(env, d->opd_last_used_oid_file,
1514 &osi->osi_lb, osi->osi_off,
1517 /* not needed in the cache anymore */
1518 set_bit(LU_OBJECT_HEARD_BANSHEE,
1519 &dt->do_lu.lo_header->loh_flags);
1525 * Implement OSP layer dt_object_operations::do_create() interface.
1527 * Create the OST object.
1529 * If the transaction is a remote transaction and the FID for the OST-object
1530 * has been assigned already, then handle it as handling MDT object via the
1531 * osp_md_create(). For other cases, the OSP will assign FID to the
1532 * object to be created, and update last_used Object ID (OID) file.
1534 * \param[in] env pointer to the thread context
1535 * \param[in] dt pointer to the OSP layer dt_object
1536 * \param[in] attr the attribute for the object to be created
1537 * \param[in] hint pointer to the hint for creating the object, such as
1539 * \param[in] dof pointer to the dt_object_format for help the creation
1540 * \param[in] th pointer to the transaction handler
1542 * \retval 0 for success
1543 * \retval negative error number on failure
1545 static int osp_create(const struct lu_env *env, struct dt_object *dt,
1546 struct lu_attr *attr, struct dt_allocation_hint *hint,
1547 struct dt_object_format *dof, struct thandle *th)
1549 struct osp_thread_info *osi = osp_env_info(env);
1550 struct osp_device *d = lu2osp_dev(dt->do_lu.lo_dev);
1551 struct osp_object *o = dt2osp_obj(dt);
1553 struct lu_fid *fid = &osi->osi_fid;
1554 struct thandle *local_th;
1557 if (is_only_remote_trans(th) &&
1558 !fid_is_zero(lu_object_fid(&dt->do_lu))) {
1559 LASSERT(fid_is_sane(lu_object_fid(&dt->do_lu)));
1561 rc = osp_md_create(env, dt, attr, hint, dof, th);
1563 o->opo_non_exist = 0;
1568 o->opo_non_exist = 0;
1569 if (o->opo_reserved) {
1570 /* regular case, fid is assigned holding transaction open */
1571 osp_object_assign_fid(env, d, o);
1574 memcpy(fid, lu_object_fid(&dt->do_lu), sizeof(*fid));
1576 LASSERTF(fid_is_sane(fid), "fid for osp_object %p is insane"DFID"!\n",
1579 if (!o->opo_reserved) {
1580 /* special case, id was assigned outside of transaction
1581 * see comments in osp_declare_attr_set */
1582 LASSERT(d->opd_pre != NULL);
1583 spin_lock(&d->opd_pre_lock);
1584 osp_update_last_fid(d, fid);
1585 spin_unlock(&d->opd_pre_lock);
1588 CDEBUG(D_INODE, "fid for osp_object %p is "DFID"\n", o, PFID(fid));
1590 /* If the precreate ends, it means it will be ready to rollover to
1591 * the new sequence soon, all the creation should be synchronized,
1592 * otherwise during replay, the replay fid will be inconsistent with
1593 * last_used/create fid */
1594 if (osp_precreate_end_seq(env, d) && osp_is_fid_client(d))
1597 local_th = osp_get_storage_thandle(env, th, d);
1598 if (IS_ERR(local_th))
1599 RETURN(PTR_ERR(local_th));
1601 * it's OK if the import is inactive by this moment - id was created
1602 * by OST earlier, we just need to maintain it consistently on the disk
1603 * once import is reconnected, OSP will claim this and other objects
1604 * used and OST either keep them, if they exist or recreate
1607 /* we might have lost precreated objects */
1608 if (unlikely(d->opd_gap_count) > 0) {
1609 LASSERT(d->opd_pre != NULL);
1610 spin_lock(&d->opd_pre_lock);
1611 if (d->opd_gap_count > 0) {
1612 int count = d->opd_gap_count;
1614 rc = ostid_set_id(&osi->osi_oi,
1615 fid_oid(&d->opd_gap_start_fid));
1617 spin_unlock(&d->opd_pre_lock);
1620 d->opd_gap_count = 0;
1621 spin_unlock(&d->opd_pre_lock);
1623 CDEBUG(D_HA, "Writing gap "DFID"+%d in llog\n",
1624 PFID(&d->opd_gap_start_fid), count);
1625 /* real gap handling is disabled intil ORI-692 will be
1626 * fixed, now we only report gaps */
1628 spin_unlock(&d->opd_pre_lock);
1632 /* Only need update last_used oid file, seq file will only be update
1633 * during seq rollover */
1634 osp_objid_buf_prep(&osi->osi_lb, &osi->osi_off,
1635 &d->opd_last_id, d->opd_index);
1637 rc = dt_record_write(env, d->opd_last_used_oid_file, &osi->osi_lb,
1638 &osi->osi_off, local_th);
1640 CDEBUG(D_HA, "%s: Wrote last used FID: "DFID", index %d: %d\n",
1641 d->opd_obd->obd_name, PFID(fid), d->opd_index, rc);
1647 * Implement OSP layer dt_object_operations::do_declare_destroy() interface.
1649 * Declare that the caller will destroy the specified OST object.
1651 * The OST object destroy will be handled via llog asynchronously. This
1652 * function will declare the credits for generating MDS_UNLINK64_REC llog.
1654 * \param[in] env pointer to the thread context
1655 * \param[in] dt pointer to the OSP layer dt_object to be destroyed
1656 * \param[in] th pointer to the transaction handler
1658 * \retval 0 for success
1659 * \retval negative error number on failure
1661 int osp_declare_destroy(const struct lu_env *env, struct dt_object *dt,
1664 struct osp_object *o = dt2osp_obj(dt);
1665 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
1670 LASSERT(!osp->opd_connect_mdt);
1672 if (!OBD_FAIL_CHECK(OBD_FAIL_LFSCK_LOST_MDTOBJ))
1673 rc = osp_sync_declare_add(env, o, MDS_UNLINK64_REC, th);
1679 * Implement OSP layer dt_object_operations::do_destroy() interface.
1681 * Destroy the specified OST object.
1683 * The OSP generates a MDS_UNLINK64_REC record in the llog. There
1684 * will be some dedicated thread to handle the llog asynchronously.
1686 * It also marks the object as non-cached.
1688 * \param[in] env pointer to the thread context
1689 * \param[in] dt pointer to the OSP layer dt_object to be destroyed
1690 * \param[in] th pointer to the transaction handler
1692 * \retval 0 for success
1693 * \retval negative error number on failure
1695 static int osp_destroy(const struct lu_env *env, struct dt_object *dt,
1698 struct osp_object *o = dt2osp_obj(dt);
1699 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
1704 o->opo_non_exist = 1;
1706 LASSERT(!osp->opd_connect_mdt);
1708 if (!OBD_FAIL_CHECK(OBD_FAIL_LFSCK_LOST_MDTOBJ)) {
1709 /* once transaction is committed put proper command on
1710 * the queue going to our OST. */
1711 rc = osp_sync_add(env, o, MDS_UNLINK64_REC, th, NULL);
1716 /* not needed in cache any more */
1717 set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
1722 static int osp_orphan_index_lookup(const struct lu_env *env,
1723 struct dt_object *dt,
1725 const struct dt_key *key)
1730 static int osp_orphan_index_declare_insert(const struct lu_env *env,
1731 struct dt_object *dt,
1732 const struct dt_rec *rec,
1733 const struct dt_key *key,
1734 struct thandle *handle)
1739 static int osp_orphan_index_insert(const struct lu_env *env,
1740 struct dt_object *dt,
1741 const struct dt_rec *rec,
1742 const struct dt_key *key,
1743 struct thandle *handle)
1748 static int osp_orphan_index_declare_delete(const struct lu_env *env,
1749 struct dt_object *dt,
1750 const struct dt_key *key,
1751 struct thandle *handle)
1756 static int osp_orphan_index_delete(const struct lu_env *env,
1757 struct dt_object *dt,
1758 const struct dt_key *key,
1759 struct thandle *handle)
1765 * Initialize the OSP layer index iteration.
1767 * \param[in] env pointer to the thread context
1768 * \param[in] dt pointer to the index object to be iterated
1769 * \param[in] attr unused
1771 * \retval pointer to the iteration structure
1772 * \retval negative error number on failure
1774 struct dt_it *osp_it_init(const struct lu_env *env, struct dt_object *dt,
1781 return ERR_PTR(-ENOMEM);
1783 it->ooi_pos_ent = -1;
1785 it->ooi_attr = attr;
1787 return (struct dt_it *)it;
1791 * Finalize the OSP layer index iteration.
1793 * \param[in] env pointer to the thread context
1794 * \param[in] di pointer to the iteration structure
1796 void osp_it_fini(const struct lu_env *env, struct dt_it *di)
1798 struct osp_it *it = (struct osp_it *)di;
1799 struct page **pages = it->ooi_pages;
1800 int npages = it->ooi_total_npages;
1803 if (pages != NULL) {
1804 for (i = 0; i < npages; i++) {
1805 if (pages[i] != NULL) {
1806 if (pages[i] == it->ooi_cur_page) {
1808 it->ooi_cur_page = NULL;
1810 __free_page(pages[i]);
1813 OBD_FREE_PTR_ARRAY(pages, npages);
1819 * Get more records for the iteration from peer.
1821 * The new records will be filled in an array of pages. The OSP side
1822 * allows 1MB bulk data to be transferred.
1824 * \param[in] env pointer to the thread context
1825 * \param[in] it pointer to the iteration structure
1827 * \retval 0 for success
1828 * \retval negative error number on failure
1830 static int osp_it_fetch(const struct lu_env *env, struct osp_it *it)
1832 struct lu_device *dev = it->ooi_obj->do_lu.lo_dev;
1833 struct osp_device *osp = lu2osp_dev(dev);
1834 struct page **pages;
1835 struct ptlrpc_request *req = NULL;
1836 struct ptlrpc_bulk_desc *desc;
1837 struct idx_info *ii;
1844 npages = min_t(unsigned int, OFD_MAX_BRW_SIZE, 1 << 20);
1845 npages /= PAGE_SIZE;
1847 OBD_ALLOC_PTR_ARRAY(pages, npages);
1851 it->ooi_pages = pages;
1852 it->ooi_total_npages = npages;
1853 for (i = 0; i < npages; i++) {
1854 pages[i] = alloc_page(GFP_NOFS);
1855 if (pages[i] == NULL)
1859 req = ptlrpc_request_alloc(osp->opd_obd->u.cli.cl_import,
1864 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, OBD_IDX_READ);
1866 ptlrpc_request_free(req);
1870 osp_set_req_replay(osp, req);
1871 req->rq_request_portal = OUT_PORTAL;
1872 ii = req_capsule_client_get(&req->rq_pill, &RMF_IDX_INFO);
1873 memset(ii, 0, sizeof(*ii));
1874 if (fid_is_last_id(lu_object_fid(&it->ooi_obj->do_lu))) {
1875 /* LFSCK will iterate orphan object[FID_SEQ_LAYOUT_BTREE,
1876 * ost_index, 0] with LAST_ID FID, so it needs to replace
1877 * the FID with orphan FID here */
1878 ii->ii_fid.f_seq = FID_SEQ_LAYOUT_RBTREE;
1879 ii->ii_fid.f_oid = osp->opd_index;
1880 ii->ii_fid.f_ver = 0;
1881 ii->ii_flags = II_FL_NOHASH;
1882 ii->ii_attrs = osp_dev2node(osp);
1884 ii->ii_fid = *lu_object_fid(&it->ooi_obj->do_lu);
1885 ii->ii_flags = II_FL_NOHASH | II_FL_NOKEY | II_FL_VARKEY |
1887 ii->ii_attrs = it->ooi_attr;
1889 ii->ii_magic = IDX_INFO_MAGIC;
1890 ii->ii_count = npages * LU_PAGE_COUNT;
1891 ii->ii_hash_start = it->ooi_next;
1893 ptlrpc_at_set_req_timeout(req);
1895 desc = ptlrpc_prep_bulk_imp(req, npages, 1,
1896 PTLRPC_BULK_PUT_SINK,
1898 &ptlrpc_bulk_kiov_pin_ops);
1900 GOTO(out, rc = -ENOMEM);
1902 for (i = 0; i < npages; i++)
1903 desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0,
1906 ptlrpc_request_set_replen(req);
1907 rc = ptlrpc_queue_wait(req);
1911 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk,
1912 req->rq_bulk->bd_nob_transferred);
1917 ii = req_capsule_server_get(&req->rq_pill, &RMF_IDX_INFO);
1918 if (ii->ii_magic != IDX_INFO_MAGIC)
1919 GOTO(out, rc = -EPROTO);
1921 npages = (ii->ii_count + LU_PAGE_COUNT - 1) >>
1922 (PAGE_SHIFT - LU_PAGE_SHIFT);
1923 if (npages > it->ooi_total_npages) {
1924 CERROR("%s: returned more pages than expected, %u > %u\n",
1925 osp->opd_obd->obd_name, npages, it->ooi_total_npages);
1926 GOTO(out, rc = -EINVAL);
1929 it->ooi_rec_size = ii->ii_recsize;
1930 it->ooi_valid_npages = npages;
1931 if (req_capsule_rep_need_swab(&req->rq_pill))
1934 it->ooi_next = ii->ii_hash_end;
1937 ptlrpc_req_finished(req);
1943 * Move the iteration cursor to the next lu_page.
1945 * One system page (PAGE_SIZE) may contain multiple lu_page (4KB),
1946 * that depends on the LU_PAGE_COUNT. If it is not the last lu_page
1947 * in current system page, then move the iteration cursor to the next
1948 * lu_page in current system page. Otherwise, if there are more system
1949 * pages in the cache, then move the iteration cursor to the next system
1950 * page. If all the cached records (pages) have been iterated, then fetch
1951 * more records via osp_it_fetch().
1953 * \param[in] env pointer to the thread context
1954 * \param[in] di pointer to the iteration structure
1956 * \retval positive for end of the directory
1957 * \retval 0 for success
1958 * \retval negative error number on failure
1960 int osp_it_next_page(const struct lu_env *env, struct dt_it *di)
1962 struct osp_it *it = (struct osp_it *)di;
1963 struct lu_idxpage *idxpage;
1964 struct page **pages;
1970 idxpage = it->ooi_cur_idxpage;
1971 if (idxpage != NULL) {
1972 if (idxpage->lip_nr == 0)
1975 if (it->ooi_pos_ent < idxpage->lip_nr) {
1976 CDEBUG(D_INFO, "ooi_pos %d nr %d\n",
1977 (int)it->ooi_pos_ent, (int)idxpage->lip_nr);
1980 it->ooi_cur_idxpage = NULL;
1981 it->ooi_pos_lu_page++;
1984 if (it->ooi_pos_lu_page < LU_PAGE_COUNT) {
1985 it->ooi_cur_idxpage = (void *)it->ooi_cur_page +
1986 LU_PAGE_SIZE * it->ooi_pos_lu_page;
1988 lustre_swab_lip_header(it->ooi_cur_idxpage);
1989 if (it->ooi_cur_idxpage->lip_magic != LIP_MAGIC) {
1990 struct osp_device *osp =
1991 lu2osp_dev(it->ooi_obj->do_lu.lo_dev);
1993 CERROR("%s: invalid magic (%x != %x) for page "
1994 "%d/%d while read layout orphan index\n",
1995 osp->opd_obd->obd_name,
1996 it->ooi_cur_idxpage->lip_magic,
1997 LIP_MAGIC, it->ooi_pos_page,
1998 it->ooi_pos_lu_page);
1999 /* Skip this lu_page next time. */
2000 it->ooi_pos_ent = idxpage->lip_nr - 1;
2003 it->ooi_pos_ent = -1;
2007 kunmap(it->ooi_cur_page);
2008 it->ooi_cur_page = NULL;
2012 pages = it->ooi_pages;
2013 if (it->ooi_pos_page < it->ooi_valid_npages) {
2014 it->ooi_cur_page = kmap(pages[it->ooi_pos_page]);
2015 it->ooi_pos_lu_page = 0;
2019 for (i = 0; i < it->ooi_total_npages; i++) {
2020 if (pages[i] != NULL)
2021 __free_page(pages[i]);
2023 OBD_FREE_PTR_ARRAY(pages, it->ooi_total_npages);
2025 it->ooi_pos_page = 0;
2026 it->ooi_total_npages = 0;
2027 it->ooi_valid_npages = 0;
2030 it->ooi_cur_page = NULL;
2031 it->ooi_cur_idxpage = NULL;
2032 it->ooi_pages = NULL;
2035 if (it->ooi_next == II_END_OFF)
2038 rc = osp_it_fetch(env, it);
2046 * Move the iteration cursor to the next record.
2048 * If there are more records in the lu_page, then move the iteration
2049 * cursor to the next record directly. Otherwise, move the iteration
2050 * cursor to the record in the next lu_page via osp_it_next_page()
2052 * \param[in] env pointer to the thread context
2053 * \param[in] di pointer to the iteration structure
2055 * \retval positive for end of the directory
2056 * \retval 0 for success
2057 * \retval negative error number on failure
2059 static int osp_orphan_it_next(const struct lu_env *env, struct dt_it *di)
2061 struct osp_it *it = (struct osp_it *)di;
2062 struct lu_idxpage *idxpage;
2067 idxpage = it->ooi_cur_idxpage;
2068 if (idxpage != NULL) {
2069 if (idxpage->lip_nr == 0)
2073 if (it->ooi_pos_ent < idxpage->lip_nr) {
2074 if (it->ooi_rec_size ==
2075 sizeof(struct lu_orphan_rec_v3)) {
2077 (struct lu_orphan_ent_v3 *)idxpage->lip_entries+
2080 lustre_swab_orphan_ent_v3(it->ooi_ent);
2081 } else if (it->ooi_rec_size ==
2082 sizeof(struct lu_orphan_rec_v2)) {
2084 (struct lu_orphan_ent_v2 *)idxpage->lip_entries+
2087 lustre_swab_orphan_ent_v2(it->ooi_ent);
2090 (struct lu_orphan_ent *)idxpage->lip_entries +
2093 lustre_swab_orphan_ent(it->ooi_ent);
2099 rc = osp_it_next_page(env, di);
2106 int osp_it_get(const struct lu_env *env, struct dt_it *di,
2107 const struct dt_key *key)
2112 void osp_it_put(const struct lu_env *env, struct dt_it *di)
2116 static struct dt_key *osp_orphan_it_key(const struct lu_env *env,
2117 const struct dt_it *di)
2119 struct osp_it *it = (struct osp_it *)di;
2120 struct lu_orphan_ent *ent = (struct lu_orphan_ent *)it->ooi_ent;
2122 if (likely(ent != NULL))
2123 return (struct dt_key *)(&ent->loe_key);
2128 static int osp_orphan_it_key_size(const struct lu_env *env,
2129 const struct dt_it *di)
2131 return sizeof(struct lu_fid);
2134 static int osp_orphan_it_rec(const struct lu_env *env, const struct dt_it *di,
2135 struct dt_rec *rec, __u32 attr)
2137 struct osp_it *it = (struct osp_it *)di;
2139 if (likely(it->ooi_ent)) {
2140 if (it->ooi_rec_size == sizeof(struct lu_orphan_rec_v3)) {
2141 struct lu_orphan_ent_v3 *ent =
2142 (struct lu_orphan_ent_v3 *)it->ooi_ent;
2144 *(struct lu_orphan_rec_v3 *)rec = ent->loe_rec;
2145 } else if (it->ooi_rec_size ==
2146 sizeof(struct lu_orphan_rec_v2)) {
2147 struct lu_orphan_ent_v2 *ent =
2148 (struct lu_orphan_ent_v2 *)it->ooi_ent;
2150 *(struct lu_orphan_rec_v2 *)rec = ent->loe_rec;
2152 struct lu_orphan_ent *ent =
2153 (struct lu_orphan_ent *)it->ooi_ent;
2155 *(struct lu_orphan_rec *)rec = ent->loe_rec;
2163 __u64 osp_it_store(const struct lu_env *env, const struct dt_it *di)
2165 struct osp_it *it = (struct osp_it *)di;
2167 return it->ooi_next;
2171 * Locate the iteration cursor to the specified position (cookie).
2173 * \param[in] env pointer to the thread context
2174 * \param[in] di pointer to the iteration structure
2175 * \param[in] hash the specified position
2177 * \retval positive number for locating to the exactly position
2179 * \retval 0 for arriving at the end of the iteration
2180 * \retval negative error number on failure
2182 int osp_orphan_it_load(const struct lu_env *env, const struct dt_it *di,
2185 struct osp_it *it = (struct osp_it *)di;
2188 it->ooi_next = hash;
2189 rc = osp_orphan_it_next(env, (struct dt_it *)di);
2199 int osp_it_key_rec(const struct lu_env *env, const struct dt_it *di,
2205 static const struct dt_index_operations osp_orphan_index_ops = {
2206 .dio_lookup = osp_orphan_index_lookup,
2207 .dio_declare_insert = osp_orphan_index_declare_insert,
2208 .dio_insert = osp_orphan_index_insert,
2209 .dio_declare_delete = osp_orphan_index_declare_delete,
2210 .dio_delete = osp_orphan_index_delete,
2212 .init = osp_it_init,
2213 .fini = osp_it_fini,
2214 .next = osp_orphan_it_next,
2217 .key = osp_orphan_it_key,
2218 .key_size = osp_orphan_it_key_size,
2219 .rec = osp_orphan_it_rec,
2220 .store = osp_it_store,
2221 .load = osp_orphan_it_load,
2222 .key_rec = osp_it_key_rec,
2227 * Implement OSP layer dt_object_operations::do_index_try() interface.
2229 * Negotiate the index type.
2231 * If the target index is an IDIF object, then use osp_orphan_index_ops.
2232 * Otherwise, assign osp_md_index_ops to the dt_object::do_index_ops.
2233 * (\see lustre/include/lustre_fid.h for IDIF.)
2235 * \param[in] env pointer to the thread context
2236 * \param[in] dt pointer to the OSP layer dt_object
2237 * \param[in] feat unused
2239 * \retval 0 for success
2241 static int osp_index_try(const struct lu_env *env,
2242 struct dt_object *dt,
2243 const struct dt_index_features *feat)
2245 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2247 if (fid_is_last_id(fid) && fid_is_idif(fid))
2248 dt->do_index_ops = &osp_orphan_index_ops;
2250 dt->do_index_ops = &osp_md_index_ops;
2254 static const struct dt_object_operations osp_obj_ops = {
2255 .do_declare_attr_get = osp_declare_attr_get,
2256 .do_attr_get = osp_attr_get,
2257 .do_declare_attr_set = osp_declare_attr_set,
2258 .do_attr_set = osp_attr_set,
2259 .do_declare_xattr_get = osp_declare_xattr_get,
2260 .do_xattr_get = osp_xattr_get,
2261 .do_declare_xattr_set = osp_declare_xattr_set,
2262 .do_xattr_set = osp_xattr_set,
2263 .do_declare_create = osp_declare_create,
2264 .do_create = osp_create,
2265 .do_declare_destroy = osp_declare_destroy,
2266 .do_destroy = osp_destroy,
2267 .do_index_try = osp_index_try,
2271 * Implement OSP layer lu_object_operations::loo_object_init() interface.
2273 * Initialize the object.
2275 * If it is a remote MDT object, then call do_attr_get() to fetch
2276 * the attribute from the peer.
2278 * \param[in] env pointer to the thread context
2279 * \param[in] o pointer to the OSP layer lu_object
2280 * \param[in] conf unused
2282 * \retval 0 for success
2283 * \retval negative error number on failure
2285 static int osp_object_init(const struct lu_env *env, struct lu_object *o,
2286 const struct lu_object_conf *conf)
2288 struct osp_object *po = lu2osp_obj(o);
2293 o->lo_header->loh_attr |= LOHA_REMOTE;
2295 if (is_ost_obj(o)) {
2296 po->opo_obj.do_ops = &osp_obj_ops;
2298 struct lu_attr *la = &osp_env_info(env)->osi_attr;
2300 po->opo_obj.do_ops = &osp_md_obj_ops;
2301 po->opo_obj.do_body_ops = &osp_md_body_ops;
2303 if (conf != NULL && conf->loc_flags & LOC_F_NEW) {
2304 po->opo_non_exist = 1;
2306 rc = po->opo_obj.do_ops->do_attr_get(env, lu2dt_obj(o),
2309 o->lo_header->loh_attr |=
2310 LOHA_EXISTS | (la->la_mode & S_IFMT);
2311 if (rc == -ENOENT) {
2312 po->opo_non_exist = 1;
2321 static void osp_object_free_rcu(struct rcu_head *head)
2323 struct osp_object *obj = container_of(head, struct osp_object,
2324 opo_header.loh_rcu);
2326 kmem_cache_free(osp_object_kmem, obj);
2330 * Implement OSP layer lu_object_operations::loo_object_free() interface.
2332 * Finalize the object.
2334 * If the OSP object has attributes cache, then destroy the cache.
2335 * Free the object finally.
2337 * \param[in] env pointer to the thread context
2338 * \param[in] o pointer to the OSP layer lu_object
2340 static void osp_object_free(const struct lu_env *env, struct lu_object *o)
2342 struct osp_object *obj = lu2osp_obj(o);
2343 struct lu_object_header *h = o->lo_header;
2344 struct osp_xattr_entry *oxe;
2345 struct osp_xattr_entry *tmp;
2348 dt_object_fini(&obj->opo_obj);
2350 lu_object_header_fini(h);
2351 list_for_each_entry_safe(oxe, tmp, &obj->opo_xattr_list, oxe_list) {
2352 list_del_init(&oxe->oxe_list);
2353 count = atomic_read(&oxe->oxe_ref);
2354 LASSERTF(count == 1,
2355 "Still has %d users on the xattr entry %.*s\n",
2356 count-1, (int)oxe->oxe_namelen, oxe->oxe_name);
2358 osp_oac_xattr_free(oxe);
2360 OBD_FREE_PRE(obj, sizeof(*obj), "slab-freed");
2361 call_rcu(&obj->opo_header.loh_rcu, osp_object_free_rcu);
2365 * Implement OSP layer lu_object_operations::loo_object_release() interface.
2367 * Cleanup (not free) the object.
2369 * If it is a reserved object but failed to be created, or it is an OST
2370 * object, then mark the object as non-cached.
2372 * \param[in] env pointer to the thread context
2373 * \param[in] o pointer to the OSP layer lu_object
2375 static void osp_object_release(const struct lu_env *env, struct lu_object *o)
2377 struct osp_object *po = lu2osp_obj(o);
2378 struct osp_device *d = lu2osp_dev(o->lo_dev);
2383 * release reservation if object was declared but not created
2384 * this may require lu_object_put() in LOD
2386 if (unlikely(po->opo_reserved)) {
2387 LASSERT(d->opd_pre != NULL);
2388 LASSERT(d->opd_pre_reserved > 0);
2389 spin_lock(&d->opd_pre_lock);
2390 d->opd_pre_reserved--;
2391 spin_unlock(&d->opd_pre_lock);
2394 * Check that osp_precreate_cleanup_orphans is not blocked
2395 * due to opd_pre_reserved > 0.
2397 if (unlikely(d->opd_pre_reserved == 0 &&
2398 (d->opd_pre_recovering || d->opd_pre_status)))
2399 wake_up(&d->opd_pre_waitq);
2401 /* not needed in cache any more */
2402 set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
2406 /* XXX: Currently, NOT cache OST-object on MDT because:
2407 * 1. it is not often accessed on MDT.
2408 * 2. avoid up layer (such as LFSCK) to load too many
2409 * once-used OST-objects. */
2410 set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
2415 static int osp_object_print(const struct lu_env *env, void *cookie,
2416 lu_printer_t p, const struct lu_object *l)
2418 const struct osp_object *o = lu2osp_obj((struct lu_object *)l);
2420 return (*p)(env, cookie, LUSTRE_OSP_NAME"-object@%p", o);
2423 static int osp_object_invariant(const struct lu_object *o)
2428 const struct lu_object_operations osp_lu_obj_ops = {
2429 .loo_object_init = osp_object_init,
2430 .loo_object_free = osp_object_free,
2431 .loo_object_release = osp_object_release,
2432 .loo_object_print = osp_object_print,
2433 .loo_object_invariant = osp_object_invariant