4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2016, Intel Corporation.
29 * lustre/osp/osp_object.c
31 * Lustre OST Proxy Device (OSP) is the agent on the local MDT for the OST
34 * OSP object attributes cache
35 * ---------------------------
36 * OSP object is the stub of the remote OST-object or MDT-object. Both the
37 * attribute and the extended attributes are stored on the peer side remotely.
38 * It is inefficient to send RPC to peer to fetch those attributes when every
39 * get_attr()/get_xattr() called. For a large system, the LFSCK synchronous
40 * mode scanning is prohibitively inefficient.
42 * So the OSP maintains the OSP object attributes cache to cache some
43 * attributes on the local MDT. The cache is organized against the OSP
46 * struct osp_xattr_entry {
47 * struct list_head oxe_list;
53 * unsigned int oxe_exist:1,
60 * struct lu_attr opo_attr;
61 * struct list_head opo_xattr_list;
62 * spinlock_t opo_lock;
66 * The basic attributes, such as owner/mode/flags, are stored in the
67 * osp_object::opo_attr. The extended attributes will be stored
68 * as osp_xattr_entry. Every extended attribute has an independent
69 * osp_xattr_entry, and all the osp_xattr_entry are linked into the
70 * osp_object::opo_xattr_list. The OSP object attributes cache
71 * is protected by the osp_object::opo_lock.
73 * Not all OSP objects have an attributes cache because maintaining
74 * the cache requires some resources. Currently, the OSP object
75 * attributes cache will be initialized when the attributes or the
76 * extended attributes are pre-fetched via osp_declare_attr_get()
77 * or osp_declare_xattr_get(). That is usually for LFSCK purpose,
78 * but it also can be shared by others.
81 * XXX: NOT prepare out RPC for remote transaction. ((please refer to the
82 * comment of osp_trans_create() for remote transaction)
84 * According to our current transaction/dt_object_lock framework (to make
85 * the cross-MDTs modification for DNE1 to be workable), the transaction
86 * sponsor will start the transaction firstly, then try to acquire related
87 * dt_object_lock if needed. Under such rules, if we want to prepare the
88 * OUT RPC in the transaction declare phase, then related attr/xattr
89 * should be known without dt_object_lock. But such condition maybe not
90 * true for some remote transaction case. For example:
92 * For linkEA repairing (by LFSCK) case, before the LFSCK thread obtained
93 * the dt_object_lock on the target MDT-object, it cannot know whether
94 * the MDT-object has linkEA or not, neither invalid or not.
96 * Since the LFSCK thread cannot hold dt_object_lock before the remote
97 * transaction start (otherwise there will be some potential deadlock),
98 * it cannot prepare related OUT RPC for repairing during the declare
99 * phase as other normal transactions do.
101 * To resolve the trouble, we will make OSP to prepare related OUT RPC
102 * after remote transaction started, and trigger the remote updating
103 * (send RPC) when trans_stop. Then the up layer users, such as LFSCK,
104 * can follow the general rule to handle trans_start/dt_object_lock
105 * for repairing linkEA inconsistency without distinguishing remote
108 * In fact, above solution for remote transaction should be the normal
109 * model without considering DNE1. The trouble brought by DNE1 will be
110 * resolved in DNE2. At that time, this patch can be removed.
113 * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
114 * Author: Mikhail Pershin <mike.tappro@intel.com>
117 #define DEBUG_SUBSYSTEM S_MDS
119 #include <lustre_obdo.h>
120 #include <lustre_swab.h>
122 #include "osp_internal.h"
124 static inline __u32 osp_dev2node(struct osp_device *osp)
126 return osp->opd_storage->dd_lu_dev.ld_site->ld_seq_site->ss_node_id;
129 static inline const char *osp_dto2name(struct osp_object *obj)
131 return obj->opo_obj.do_lu.lo_dev->ld_obd->obd_name;
134 static inline bool is_ost_obj(struct lu_object *lo)
136 return !lu2osp_dev(lo->lo_dev)->opd_connect_mdt;
139 static inline void __osp_oac_xattr_assignment(struct osp_object *obj,
140 struct osp_xattr_entry *oxe,
141 const struct lu_buf *buf)
144 memcpy(oxe->oxe_value, buf->lb_buf, buf->lb_len);
146 oxe->oxe_vallen = buf->lb_len;
152 * Assign FID to the OST object.
154 * This function will assign the FID to the OST object of a striped file.
156 * \param[in] env pointer to the thread context
157 * \param[in] d pointer to the OSP device
158 * \param[in] o pointer to the OSP object that the FID will be
161 static void osp_object_assign_fid(const struct lu_env *env,
162 struct osp_device *d, struct osp_object *o)
164 struct osp_thread_info *osi = osp_env_info(env);
166 LASSERT(fid_is_zero(lu_object_fid(&o->opo_obj.do_lu)));
167 LASSERT(o->opo_reserved);
170 osp_precreate_get_fid(env, d, &osi->osi_fid);
172 lu_object_assign_fid(env, &o->opo_obj.do_lu, &osi->osi_fid);
175 #define OXE_DEFAULT_LEN 16
178 * Release reference from the OSP object extended attribute entry.
180 * If it is the last reference, then free the entry.
182 * \param[in] oxe pointer to the OSP object extended attribute entry.
184 static inline void osp_oac_xattr_put(struct osp_xattr_entry *oxe)
186 if (atomic_dec_and_test(&oxe->oxe_ref)) {
187 LASSERT(list_empty(&oxe->oxe_list));
189 OBD_FREE(oxe, oxe->oxe_buflen);
194 * Find the named extended attribute in the OSP object attributes cache.
196 * The caller should take the osp_object::opo_lock before calling
199 * \param[in] obj pointer to the OSP object
200 * \param[in] name the name of the extended attribute
201 * \param[in] namelen the name length of the extended attribute
203 * \retval pointer to the found extended attribute entry
204 * \retval NULL if the specified extended attribute is not
207 static struct osp_xattr_entry *
208 osp_oac_xattr_find_locked(struct osp_object *obj, const char *name,
211 struct osp_xattr_entry *oxe;
213 list_for_each_entry(oxe, &obj->opo_xattr_list, oxe_list) {
214 if (namelen == oxe->oxe_namelen &&
215 strncmp(name, oxe->oxe_buf, namelen) == 0)
223 * Find the named extended attribute in the OSP object attributes cache.
225 * Call osp_oac_xattr_find_locked() with the osp_object::opo_lock held.
227 * \param[in] obj pointer to the OSP object
228 * \param[in] name the name of the extended attribute
229 * \param[in] unlink true if the extended attribute entry is to be removed
232 * \retval pointer to the found extended attribute entry
233 * \retval NULL if the specified extended attribute is not
236 static struct osp_xattr_entry *osp_oac_xattr_find(struct osp_object *obj,
237 const char *name, bool unlink)
239 struct osp_xattr_entry *oxe = NULL;
241 spin_lock(&obj->opo_lock);
242 oxe = osp_oac_xattr_find_locked(obj, name, strlen(name));
245 list_del_init(&oxe->oxe_list);
247 atomic_inc(&oxe->oxe_ref);
249 spin_unlock(&obj->opo_lock);
255 * Find the named extended attribute in the OSP object attributes cache.
257 * If it is not in the cache, then add an empty entry (that will be
258 * filled later) to cache with the given name.
260 * \param[in] obj pointer to the OSP object
261 * \param[in] name the name of the extended attribute
262 * \param[in] len the length of the extended attribute value
264 * \retval pointer to the found or new-created extended
266 * \retval NULL if the specified extended attribute is not in the
267 * cache or fail to add new empty entry to the cache.
269 static struct osp_xattr_entry *
270 osp_oac_xattr_find_or_add(struct osp_object *obj, const char *name, size_t len)
272 struct osp_xattr_entry *oxe;
273 struct osp_xattr_entry *tmp = NULL;
274 size_t namelen = strlen(name);
275 size_t size = sizeof(*oxe) + namelen + 1 +
276 (len ? len : OXE_DEFAULT_LEN);
278 oxe = osp_oac_xattr_find(obj, name, false);
282 OBD_ALLOC(oxe, size);
286 INIT_LIST_HEAD(&oxe->oxe_list);
287 oxe->oxe_buflen = size;
288 oxe->oxe_namelen = namelen;
289 memcpy(oxe->oxe_buf, name, namelen);
290 oxe->oxe_value = oxe->oxe_buf + namelen + 1;
291 /* One ref is for the caller, the other is for the entry on the list. */
292 atomic_set(&oxe->oxe_ref, 2);
294 spin_lock(&obj->opo_lock);
295 tmp = osp_oac_xattr_find_locked(obj, name, namelen);
297 list_add_tail(&oxe->oxe_list, &obj->opo_xattr_list);
299 atomic_inc(&tmp->oxe_ref);
300 spin_unlock(&obj->opo_lock);
311 * Assign the cached OST-object's EA with the given value.
313 * If the current EA entry in cache has not enough space to hold the new
314 * value, remove it, create a new one, then assign with the given value.
316 * \param[in] obj pointer to the OSP object
317 * \param[in] oxe pointer to the cached EA entry to be assigned
318 * \param[in] buf pointer to the buffer with new EA value
320 * \retval pointer to the new created EA entry in cache if
321 * current entry is not big enough; otherwise, the
322 * input 'oxe' will be returned.
324 static struct osp_xattr_entry *
325 osp_oac_xattr_assignment(struct osp_object *obj, struct osp_xattr_entry *oxe,
326 const struct lu_buf *buf)
328 struct osp_xattr_entry *new = NULL;
329 struct osp_xattr_entry *old = NULL;
330 int namelen = oxe->oxe_namelen;
331 size_t size = sizeof(*oxe) + namelen + 1 + buf->lb_len;
332 bool unlink_only = false;
334 if (oxe->oxe_buflen < size) {
335 OBD_ALLOC(new, size);
337 INIT_LIST_HEAD(&new->oxe_list);
338 new->oxe_buflen = size;
339 new->oxe_namelen = namelen;
340 memcpy(new->oxe_buf, oxe->oxe_buf, namelen);
341 new->oxe_value = new->oxe_buf + namelen + 1;
342 /* One ref is for the caller,
343 * the other is for the entry on the list. */
344 atomic_set(&new->oxe_ref, 2);
345 __osp_oac_xattr_assignment(obj, new, buf);
348 CWARN("%s: cannot update cached xattr %.*s of "DFID"\n",
349 osp_dto2name(obj), namelen, oxe->oxe_buf,
350 PFID(lu_object_fid(&obj->opo_obj.do_lu)));
354 spin_lock(&obj->opo_lock);
355 old = osp_oac_xattr_find_locked(obj, oxe->oxe_buf, namelen);
358 /* Unlink the 'old'. */
359 list_del_init(&old->oxe_list);
361 /* Drop the ref for 'old' on list. */
362 osp_oac_xattr_put(old);
364 /* Drop the ref for current using. */
365 osp_oac_xattr_put(oxe);
368 /* Insert 'new' into list. */
369 list_add_tail(&new->oxe_list, &obj->opo_xattr_list);
370 } else if (unlink_only) {
371 /* Unlink the 'old'. */
372 list_del_init(&old->oxe_list);
374 /* Drop the ref for 'old' on list. */
375 osp_oac_xattr_put(old);
377 __osp_oac_xattr_assignment(obj, oxe, buf);
380 /* Drop the ref for current using. */
381 osp_oac_xattr_put(oxe);
384 /* Someone unlinked the 'old' by race,
385 * insert the 'new' one into list. */
386 list_add_tail(&new->oxe_list, &obj->opo_xattr_list);
388 spin_unlock(&obj->opo_lock);
394 * Parse the OSP object attribute from the RPC reply.
396 * If the attribute is valid, then it will be added to the OSP object
399 * \param[in] env pointer to the thread context
400 * \param[in] reply pointer to the RPC reply
401 * \param[in] req pointer to the RPC request
402 * \param[out] attr pointer to buffer to hold the output attribute
403 * \param[in] obj pointer to the OSP object
404 * \param[in] index the index of the attribute buffer in the reply
406 * \retval 0 for success
407 * \retval negative error number on failure
409 static int osp_get_attr_from_reply(const struct lu_env *env,
410 struct object_update_reply *reply,
411 struct ptlrpc_request *req,
412 struct lu_attr *attr,
413 struct osp_object *obj, int index)
415 struct osp_thread_info *osi = osp_env_info(env);
416 struct lu_buf *rbuf = &osi->osi_lb2;
417 struct obdo *lobdo = &osi->osi_obdo;
421 rc = object_update_result_data_get(reply, rbuf, index);
425 wobdo = rbuf->lb_buf;
426 if (rbuf->lb_len != sizeof(*wobdo))
429 LASSERT(req != NULL);
430 if (ptlrpc_req_need_swab(req))
431 lustre_swab_obdo(wobdo);
433 lustre_get_wire_obdo(NULL, lobdo, wobdo);
434 spin_lock(&obj->opo_lock);
435 la_from_obdo(&obj->opo_attr, lobdo, lobdo->o_valid);
437 *attr = obj->opo_attr;
438 spin_unlock(&obj->opo_lock);
444 * Interpreter function for getting OSP object attribute asynchronously.
446 * Called to interpret the result of an async mode RPC for getting the
447 * OSP object attribute.
449 * \param[in] env pointer to the thread context
450 * \param[in] reply pointer to the RPC reply
451 * \param[in] req pointer to the RPC request
452 * \param[in] obj pointer to the OSP object
453 * \param[out] data pointer to buffer to hold the output attribute
454 * \param[in] index the index of the attribute buffer in the reply
455 * \param[in] rc the result for handling the RPC
457 * \retval 0 for success
458 * \retval negative error number on failure
460 static int osp_attr_get_interpterer(const struct lu_env *env,
461 struct object_update_reply *reply,
462 struct ptlrpc_request *req,
463 struct osp_object *obj,
464 void *data, int index, int rc)
466 struct lu_attr *attr = data;
469 osp2lu_obj(obj)->lo_header->loh_attr |= LOHA_EXISTS;
470 obj->opo_non_exist = 0;
472 return osp_get_attr_from_reply(env, reply, req, NULL, obj,
476 osp2lu_obj(obj)->lo_header->loh_attr &= ~LOHA_EXISTS;
477 obj->opo_non_exist = 1;
480 spin_lock(&obj->opo_lock);
482 spin_unlock(&obj->opo_lock);
489 * Implement OSP layer dt_object_operations::do_declare_attr_get() interface.
491 * Declare that the caller will get attribute from the specified OST object.
493 * This function adds an Object Unified Target (OUT) sub-request to the per-OSP
494 * based shared asynchronous request queue. The osp_attr_get_interpterer()
495 * is registered as the interpreter function to handle the result of this
498 * \param[in] env pointer to the thread context
499 * \param[in] dt pointer to the OSP layer dt_object
501 * \retval 0 for success
502 * \retval negative error number on failure
504 static int osp_declare_attr_get(const struct lu_env *env, struct dt_object *dt)
506 struct osp_object *obj = dt2osp_obj(dt);
507 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
510 mutex_lock(&osp->opd_async_requests_mutex);
511 rc = osp_insert_async_request(env, OUT_ATTR_GET, obj, 0, NULL, NULL,
512 &obj->opo_attr, sizeof(struct obdo),
513 osp_attr_get_interpterer);
514 mutex_unlock(&osp->opd_async_requests_mutex);
520 * Implement OSP layer dt_object_operations::do_attr_get() interface.
522 * Get attribute from the specified MDT/OST object.
524 * If the attribute is in the OSP object attributes cache, then return
525 * the cached attribute directly. Otherwise it will trigger an OUT RPC
526 * to the peer to get the attribute synchronously, if successful, add it
527 * to the OSP attributes cache. (\see lustre/osp/osp_trans.c for OUT RPC.)
529 * \param[in] env pointer to the thread context
530 * \param[in] dt pointer to the OSP layer dt_object
531 * \param[out] attr pointer to the buffer to hold the output attribute
533 * \retval 0 for success
534 * \retval negative error number on failure
536 int osp_attr_get(const struct lu_env *env, struct dt_object *dt,
537 struct lu_attr *attr)
539 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
540 struct osp_object *obj = dt2osp_obj(dt);
541 struct dt_device *dev = &osp->opd_dt_dev;
542 struct osp_update_request *update;
543 struct object_update_reply *reply;
544 struct ptlrpc_request *req = NULL;
548 if (is_ost_obj(&dt->do_lu) && obj->opo_non_exist)
551 spin_lock(&obj->opo_lock);
552 if (obj->opo_attr.la_valid != 0 && !obj->opo_stale) {
553 *attr = obj->opo_attr;
554 spin_unlock(&obj->opo_lock);
558 spin_unlock(&obj->opo_lock);
560 update = osp_update_request_create(dev);
562 RETURN(PTR_ERR(update));
564 rc = osp_update_rpc_pack(env, attr_get, update, OUT_ATTR_GET,
565 lu_object_fid(&dt->do_lu));
567 CERROR("%s: Insert update error "DFID": rc = %d\n",
568 dev->dd_lu_dev.ld_obd->obd_name,
569 PFID(lu_object_fid(&dt->do_lu)), rc);
574 rc = osp_remote_sync(env, osp, update, &req);
577 osp2lu_obj(obj)->lo_header->loh_attr &= ~LOHA_EXISTS;
578 obj->opo_non_exist = 1;
580 CERROR("%s:osp_attr_get update error "DFID": rc = %d\n",
581 dev->dd_lu_dev.ld_obd->obd_name,
582 PFID(lu_object_fid(&dt->do_lu)), rc);
588 osp2lu_obj(obj)->lo_header->loh_attr |= LOHA_EXISTS;
589 obj->opo_non_exist = 0;
590 reply = req_capsule_server_sized_get(&req->rq_pill,
591 &RMF_OUT_UPDATE_REPLY,
592 OUT_UPDATE_REPLY_SIZE);
593 if (reply == NULL || reply->ourp_magic != UPDATE_REPLY_MAGIC)
594 GOTO(out, rc = -EPROTO);
596 rc = osp_get_attr_from_reply(env, reply, req, attr, obj, 0);
600 spin_lock(&obj->opo_lock);
602 spin_unlock(&obj->opo_lock);
608 ptlrpc_req_finished(req);
610 osp_update_request_destroy(env, update);
616 * Implement OSP layer dt_object_operations::do_declare_attr_set() interface.
618 * If the transaction is not remote one, then declare the credits that will
619 * be used for the subsequent llog record for the object's attributes.
621 * \param[in] env pointer to the thread context
622 * \param[in] dt pointer to the OSP layer dt_object
623 * \param[in] attr pointer to the attribute to be set
624 * \param[in] th pointer to the transaction handler
626 * \retval 0 for success
627 * \retval negative error number on failure
629 static int osp_declare_attr_set(const struct lu_env *env, struct dt_object *dt,
630 const struct lu_attr *attr, struct thandle *th)
632 struct osp_device *d = lu2osp_dev(dt->do_lu.lo_dev);
633 struct osp_object *o = dt2osp_obj(dt);
636 if (is_only_remote_trans(th))
637 return osp_md_declare_attr_set(env, dt, attr, th);
639 * Usually we don't allow server stack to manipulate size
640 * but there is a special case when striping is created
641 * late, after stripeless file got truncated to non-zero.
643 * In this case we do the following:
645 * 1) grab id in declare - this can lead to leaked OST objects
646 * but we don't currently have proper mechanism and the only
647 * options we have are to do truncate RPC holding transaction
648 * open (very bad) or to grab id in declare at cost of leaked
649 * OST object in same very rare unfortunate case (just bad)
650 * notice 1.6-2.0 do assignment outside of running transaction
651 * all the time, meaning many more chances for leaked objects.
653 * 2) send synchronous truncate RPC with just assigned id
656 /* there are few places in MDD code still passing NULL
657 * XXX: to be fixed soon */
661 if (attr->la_valid & LA_SIZE && attr->la_size > 0 &&
662 fid_is_zero(lu_object_fid(&o->opo_obj.do_lu))) {
663 LASSERT(!dt_object_exists(dt));
664 osp_object_assign_fid(env, d, o);
665 rc = osp_object_truncate(env, dt, attr->la_size);
670 if (!(attr->la_valid & LA_REMOTE_ATTR_SET))
673 /* track all UID/GID, projid, and layout version changes via llog */
674 rc = osp_sync_declare_add(env, o, MDS_SETATTR64_REC, th);
680 * Implement OSP layer dt_object_operations::do_attr_set() interface.
682 * Set attribute to the specified OST object.
684 * If the transaction is a remote one, then add OUT_ATTR_SET sub-request
685 * in the OUT RPC that will be flushed when the remote transaction stop.
686 * Otherwise, it will generate a MDS_SETATTR64_REC record in the llog that
687 * will be handled by a dedicated thread asynchronously.
689 * If the attribute entry exists in the OSP object attributes cache,
690 * then update the cached attribute according to given attribute.
692 * \param[in] env pointer to the thread context
693 * \param[in] dt pointer to the OSP layer dt_object
694 * \param[in] attr pointer to the attribute to be set
695 * \param[in] th pointer to the transaction handler
697 * \retval 0 for success
698 * \retval negative error number on failure
700 static int osp_attr_set(const struct lu_env *env, struct dt_object *dt,
701 const struct lu_attr *attr, struct thandle *th)
703 struct osp_object *o = dt2osp_obj(dt);
707 /* we're interested in uid/gid/projid/layout version changes only */
708 if (!(attr->la_valid & LA_REMOTE_ATTR_SET))
711 if (!is_only_remote_trans(th)) {
712 rc = osp_sync_add(env, o, MDS_SETATTR64_REC, th, attr);
713 /* XXX: send new uid/gid to OST ASAP? */
717 /* It is for OST-object attr_set directly without updating
718 * local MDT-object attribute. It is usually used by LFSCK. */
719 rc = osp_md_attr_set(env, dt, attr, th);
720 CDEBUG(D_INFO, "(1) set attr "DFID": rc = %d\n",
721 PFID(&dt->do_lu.lo_header->loh_fid), rc);
726 /* Update the OSP object attributes cache. */
728 spin_lock(&o->opo_lock);
729 if (attr->la_valid & LA_UID) {
730 la->la_uid = attr->la_uid;
731 la->la_valid |= LA_UID;
734 if (attr->la_valid & LA_GID) {
735 la->la_gid = attr->la_gid;
736 la->la_valid |= LA_GID;
738 if (attr->la_valid & LA_PROJID) {
739 la->la_projid = attr->la_projid;
740 la->la_valid |= LA_PROJID;
742 spin_unlock(&o->opo_lock);
749 * Interpreter function for getting OSP object extended attribute asynchronously
751 * Called to interpret the result of an async mode RPC for getting the
752 * OSP object extended attribute.
754 * \param[in] env pointer to the thread context
755 * \param[in] reply pointer to the RPC reply
756 * \param[in] req pointer to the RPC request
757 * \param[in] obj pointer to the OSP object
758 * \param[out] data pointer to OSP object attributes cache
759 * \param[in] index the index of the attribute buffer in the reply
760 * \param[in] rc the result for handling the RPC
762 * \retval 0 for success
763 * \retval negative error number on failure
765 static int osp_xattr_get_interpterer(const struct lu_env *env,
766 struct object_update_reply *reply,
767 struct ptlrpc_request *req,
768 struct osp_object *obj,
769 void *data, int index, int rc)
771 struct osp_xattr_entry *oxe = data;
772 struct lu_buf *rbuf = &osp_env_info(env)->osi_lb2;
775 size_t len = sizeof(*oxe) + oxe->oxe_namelen + 1;
777 rc = object_update_result_data_get(reply, rbuf, index);
778 spin_lock(&obj->opo_lock);
779 if (rc < 0 || rbuf->lb_len == 0 ||
780 rbuf->lb_len > (oxe->oxe_buflen - len)) {
781 if (unlikely(rc == -ENODATA)) {
787 spin_unlock(&obj->opo_lock);
788 /* Put the reference obtained in the
789 * osp_declare_xattr_get(). */
790 osp_oac_xattr_put(oxe);
792 return rc < 0 ? rc : -ERANGE;
795 __osp_oac_xattr_assignment(obj, oxe, rbuf);
796 spin_unlock(&obj->opo_lock);
797 } else if (rc == -ENOENT || rc == -ENODATA) {
798 spin_lock(&obj->opo_lock);
801 spin_unlock(&obj->opo_lock);
803 spin_lock(&obj->opo_lock);
805 spin_unlock(&obj->opo_lock);
808 /* Put the reference obtained in the osp_declare_xattr_get(). */
809 osp_oac_xattr_put(oxe);
815 * Implement OSP dt_object_operations::do_declare_xattr_get() interface.
817 * Declare that the caller will get extended attribute from the specified
820 * This function will add an OUT_XATTR_GET sub-request to the per OSP
821 * based shared asynchronous request queue with the interpreter function:
822 * osp_xattr_get_interpterer().
824 * \param[in] env pointer to the thread context
825 * \param[in] dt pointer to the OSP layer dt_object
826 * \param[out] buf pointer to the lu_buf to hold the extended attribute
827 * \param[in] name the name for the expected extended attribute
829 * \retval 0 for success
830 * \retval negative error number on failure
832 static int osp_declare_xattr_get(const struct lu_env *env, struct dt_object *dt,
833 struct lu_buf *buf, const char *name)
835 struct osp_object *obj = dt2osp_obj(dt);
836 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
837 struct osp_xattr_entry *oxe;
841 LASSERT(buf != NULL);
842 LASSERT(name != NULL);
844 if (unlikely(buf->lb_len == 0))
847 oxe = osp_oac_xattr_find_or_add(obj, name, buf->lb_len);
851 namelen = strlen(name);
852 mutex_lock(&osp->opd_async_requests_mutex);
853 rc = osp_insert_async_request(env, OUT_XATTR_GET, obj, 1,
854 &namelen, (const void **)&name,
856 osp_xattr_get_interpterer);
858 mutex_unlock(&osp->opd_async_requests_mutex);
859 osp_oac_xattr_put(oxe);
861 struct osp_update_request *our;
862 struct osp_update_request_sub *ours;
864 /* XXX: Currently, we trigger the batched async OUT
865 * RPC via dt_declare_xattr_get(). It is not
866 * perfect solution, but works well now.
868 * We will improve it in the future. */
869 our = osp->opd_async_requests;
870 ours = osp_current_object_update_request(our);
871 if (ours != NULL && ours->ours_req != NULL &&
872 ours->ours_req->ourq_count > 0) {
873 osp->opd_async_requests = NULL;
874 mutex_unlock(&osp->opd_async_requests_mutex);
875 rc = osp_unplug_async_request(env, osp, our);
877 mutex_unlock(&osp->opd_async_requests_mutex);
885 * Implement OSP layer dt_object_operations::do_xattr_get() interface.
887 * Get extended attribute from the specified MDT/OST object.
889 * If the extended attribute is in the OSP object attributes cache, then
890 * return the cached extended attribute directly. Otherwise it will get
891 * the extended attribute synchronously, if successful, add it to the OSP
892 * attributes cache. (\see lustre/osp/osp_trans.c for OUT RPC.)
894 * There is a race condition: some other thread has added the named extended
895 * attributed entry to the OSP object attributes cache during the current
896 * OUT_XATTR_GET handling. If such case happens, the OSP will replace the
897 * (just) existing extended attribute entry with the new replied one.
899 * \param[in] env pointer to the thread context
900 * \param[in] dt pointer to the OSP layer dt_object
901 * \param[out] buf pointer to the lu_buf to hold the extended attribute
902 * \param[in] name the name for the expected extended attribute
904 * \retval 0 for success
905 * \retval negative error number on failure
907 int osp_xattr_get(const struct lu_env *env, struct dt_object *dt,
908 struct lu_buf *buf, const char *name)
910 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
911 struct osp_object *obj = dt2osp_obj(dt);
912 struct dt_device *dev = &osp->opd_dt_dev;
913 struct lu_buf *rbuf = &osp_env_info(env)->osi_lb2;
914 struct osp_update_request *update = NULL;
915 struct ptlrpc_request *req = NULL;
916 struct object_update_reply *reply;
917 struct osp_xattr_entry *oxe = NULL;
918 const char *dname = osp_dto2name(obj);
922 LASSERT(buf != NULL);
923 LASSERT(name != NULL);
925 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_NETWORK) &&
926 osp->opd_index == cfs_fail_val) {
927 if (is_ost_obj(&dt->do_lu)) {
928 if (osp_dev2node(osp) == cfs_fail_val)
931 if (strcmp(name, XATTR_NAME_LINK) == 0)
936 if (unlikely(obj->opo_non_exist))
939 oxe = osp_oac_xattr_find(obj, name, false);
941 spin_lock(&obj->opo_lock);
942 if (oxe->oxe_ready) {
944 GOTO(unlock, rc = -ENODATA);
946 if (buf->lb_buf == NULL)
947 GOTO(unlock, rc = oxe->oxe_vallen);
949 if (buf->lb_len < oxe->oxe_vallen)
950 GOTO(unlock, rc = -ERANGE);
952 memcpy(buf->lb_buf, oxe->oxe_value,
955 GOTO(unlock, rc = oxe->oxe_vallen);
958 spin_unlock(&obj->opo_lock);
959 osp_oac_xattr_put(oxe);
963 spin_unlock(&obj->opo_lock);
965 update = osp_update_request_create(dev);
967 GOTO(out, rc = PTR_ERR(update));
969 rc = osp_update_rpc_pack(env, xattr_get, update, OUT_XATTR_GET,
970 lu_object_fid(&dt->do_lu), name, buf->lb_len);
972 CERROR("%s: Insert update error "DFID": rc = %d\n",
973 dname, PFID(lu_object_fid(&dt->do_lu)), rc);
977 rc = osp_remote_sync(env, osp, update, &req);
980 dt->do_lu.lo_header->loh_attr &= ~LOHA_EXISTS;
981 obj->opo_non_exist = 1;
985 oxe = osp_oac_xattr_find_or_add(obj, name, buf->lb_len);
988 CWARN("%s: Fail to add xattr (%s) to cache for "
989 DFID" (1): rc = %d\n", dname, name,
990 PFID(lu_object_fid(&dt->do_lu)), rc);
995 spin_lock(&obj->opo_lock);
996 if (rc == -ENOENT || rc == -ENODATA) {
1002 spin_unlock(&obj->opo_lock);
1007 reply = req_capsule_server_sized_get(&req->rq_pill,
1008 &RMF_OUT_UPDATE_REPLY,
1009 OUT_UPDATE_REPLY_SIZE);
1010 if (reply->ourp_magic != UPDATE_REPLY_MAGIC) {
1011 CERROR("%s: Wrong version %x expected %x "DFID": rc = %d\n",
1012 dname, reply->ourp_magic, UPDATE_REPLY_MAGIC,
1013 PFID(lu_object_fid(&dt->do_lu)), -EPROTO);
1015 GOTO(out, rc = -EPROTO);
1018 rc = object_update_result_data_get(reply, rbuf, 0);
1019 if (rc < 0 || rbuf->lb_len == 0) {
1021 spin_lock(&obj->opo_lock);
1022 if (unlikely(rc == -ENODATA)) {
1028 spin_unlock(&obj->opo_lock);
1034 /* For detecting EA size. */
1039 oxe = osp_oac_xattr_find_or_add(obj, name, rbuf->lb_len);
1041 CWARN("%s: Fail to add xattr (%s) to "
1042 "cache for "DFID" (2): rc = %d\n",
1043 dname, name, PFID(lu_object_fid(&dt->do_lu)), rc);
1049 oxe = osp_oac_xattr_assignment(obj, oxe, rbuf);
1054 if (rc > 0 && buf->lb_buf) {
1055 if (unlikely(buf->lb_len < rbuf->lb_len))
1058 memcpy(buf->lb_buf, rbuf->lb_buf, rbuf->lb_len);
1062 ptlrpc_req_finished(req);
1064 if (update && !IS_ERR(update))
1065 osp_update_request_destroy(env, update);
1068 osp_oac_xattr_put(oxe);
1074 * Implement OSP layer dt_object_operations::do_declare_xattr_set() interface.
1076 * Declare that the caller will set extended attribute to the specified
1079 * If it is non-remote transaction, it will add an OUT_XATTR_SET sub-request
1080 * to the OUT RPC that will be flushed when the transaction start. And if the
1081 * OSP attributes cache is initialized, then check whether the name extended
1082 * attribute entry exists in the cache or not. If yes, replace it; otherwise,
1083 * add the extended attribute to the cache.
1085 * \param[in] env pointer to the thread context
1086 * \param[in] dt pointer to the OSP layer dt_object
1087 * \param[in] buf pointer to the lu_buf to hold the extended attribute
1088 * \param[in] name the name of the extended attribute to be set
1089 * \param[in] flag to indicate the detailed set operation: LU_XATTR_CREATE
1090 * or LU_XATTR_REPLACE or others
1091 * \param[in] th pointer to the transaction handler
1093 * \retval 0 for success
1094 * \retval negative error number on failure
1096 int osp_declare_xattr_set(const struct lu_env *env, struct dt_object *dt,
1097 const struct lu_buf *buf, const char *name,
1098 int flag, struct thandle *th)
1100 return osp_trans_update_request_create(th);
1104 * Implement OSP layer dt_object_operations::do_xattr_set() interface.
1106 * Set extended attribute to the specified MDT/OST object.
1108 * Add an OUT_XATTR_SET sub-request into the OUT RPC that will be flushed in
1109 * the transaction stop. And if the OSP attributes cache is initialized, then
1110 * check whether the name extended attribute entry exists in the cache or not.
1111 * If yes, replace it; otherwise, add the extended attribute to the cache.
1113 * \param[in] env pointer to the thread context
1114 * \param[in] dt pointer to the OSP layer dt_object
1115 * \param[in] buf pointer to the lu_buf to hold the extended attribute
1116 * \param[in] name the name of the extended attribute to be set
1117 * \param[in] fl to indicate the detailed set operation: LU_XATTR_CREATE
1118 * or LU_XATTR_REPLACE or others
1119 * \param[in] th pointer to the transaction handler
1121 * \retval 0 for success
1122 * \retval negative error number on failure
1124 int osp_xattr_set(const struct lu_env *env, struct dt_object *dt,
1125 const struct lu_buf *buf, const char *name, int fl,
1128 struct osp_object *o = dt2osp_obj(dt);
1129 struct osp_update_request *update;
1130 struct osp_xattr_entry *oxe;
1134 update = thandle_to_osp_update_request(th);
1135 LASSERT(update != NULL);
1137 CDEBUG(D_INODE, DFID" set xattr '%s' with size %zd\n",
1138 PFID(lu_object_fid(&dt->do_lu)), name, buf->lb_len);
1140 rc = osp_update_rpc_pack(env, xattr_set, update, OUT_XATTR_SET,
1141 lu_object_fid(&dt->do_lu), buf, name, fl);
1145 /* Do not cache linkEA that may be self-adjusted by peers
1146 * under EA overflow case. */
1147 if (strcmp(name, XATTR_NAME_LINK) == 0) {
1148 oxe = osp_oac_xattr_find(o, name, true);
1150 osp_oac_xattr_put(oxe);
1155 oxe = osp_oac_xattr_find_or_add(o, name, buf->lb_len);
1157 CWARN("%s: cannot cache xattr '%s' of "DFID"\n",
1158 osp_dto2name(o), name, PFID(lu_object_fid(&dt->do_lu)));
1163 oxe = osp_oac_xattr_assignment(o, oxe, buf);
1165 osp_oac_xattr_put(oxe);
1171 * Implement OSP layer dt_object_operations::do_declare_xattr_del() interface.
1173 * Declare that the caller will delete extended attribute on the specified
1176 * If it is non-remote transaction, it will add an OUT_XATTR_DEL sub-request
1177 * to the OUT RPC that will be flushed when the transaction start. And if the
1178 * name extended attribute entry exists in the OSP attributes cache, then remove
1179 * it from the cache.
1181 * \param[in] env pointer to the thread context
1182 * \param[in] dt pointer to the OSP layer dt_object
1183 * \param[in] name the name of the extended attribute to be set
1184 * \param[in] th pointer to the transaction handler
1186 * \retval 0 for success
1187 * \retval negative error number on failure
1189 int osp_declare_xattr_del(const struct lu_env *env, struct dt_object *dt,
1190 const char *name, struct thandle *th)
1192 return osp_trans_update_request_create(th);
1196 * Implement OSP layer dt_object_operations::do_xattr_del() interface.
1198 * Delete extended attribute on the specified MDT/OST object.
1200 * If it is remote transaction, it will add an OUT_XATTR_DEL sub-request into
1201 * the OUT RPC that will be flushed when the transaction stop. And if the name
1202 * extended attribute entry exists in the OSP attributes cache, then remove it
1205 * \param[in] env pointer to the thread context
1206 * \param[in] dt pointer to the OSP layer dt_object
1207 * \param[in] name the name of the extended attribute to be set
1208 * \param[in] th pointer to the transaction handler
1210 * \retval 0 for success
1211 * \retval negative error number on failure
1213 int osp_xattr_del(const struct lu_env *env, struct dt_object *dt,
1214 const char *name, struct thandle *th)
1216 struct osp_update_request *update;
1217 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1218 struct osp_object *o = dt2osp_obj(dt);
1219 struct osp_xattr_entry *oxe;
1222 update = thandle_to_osp_update_request(th);
1223 LASSERT(update != NULL);
1225 rc = osp_update_rpc_pack(env, xattr_del, update, OUT_XATTR_DEL,
1230 oxe = osp_oac_xattr_find(o, name, true);
1232 /* Drop the ref for entry on list. */
1233 osp_oac_xattr_put(oxe);
1238 void osp_obj_invalidate_cache(struct osp_object *obj)
1240 struct osp_xattr_entry *oxe;
1241 struct osp_xattr_entry *tmp;
1243 spin_lock(&obj->opo_lock);
1244 list_for_each_entry_safe(oxe, tmp, &obj->opo_xattr_list, oxe_list) {
1246 list_del_init(&oxe->oxe_list);
1247 osp_oac_xattr_put(oxe);
1249 obj->opo_attr.la_valid = 0;
1250 spin_unlock(&obj->opo_lock);
1254 * Implement OSP layer dt_object_operations::do_invalidate() interface.
1256 * Invalidate attributes cached on the specified MDT/OST object.
1258 * \param[in] env pointer to the thread context
1259 * \param[in] dt pointer to the OSP layer dt_object
1261 * \retval 0 for success
1262 * \retval negative error number on failure
1264 int osp_invalidate(const struct lu_env *env, struct dt_object *dt)
1266 struct osp_object *obj = dt2osp_obj(dt);
1269 CDEBUG(D_HA, "Invalidate osp_object "DFID"\n",
1270 PFID(lu_object_fid(&dt->do_lu)));
1271 osp_obj_invalidate_cache(obj);
1273 spin_lock(&obj->opo_lock);
1275 spin_unlock(&obj->opo_lock);
1281 * Implement OSP layer dt_object_operations::do_declare_create() interface.
1283 * Declare that the caller will create the OST object.
1285 * If the transaction is a remote transaction and the FID for the OST-object
1286 * has been assigned already, then handle it as creating (remote) MDT object
1287 * via osp_md_declare_create(). This function is usually used for LFSCK
1288 * to re-create the lost OST object. Otherwise, if it is not replay case, the
1289 * OSP will reserve pre-created object for the subsequent create operation;
1290 * if the MDT side cached pre-created objects are less than some threshold,
1291 * then it will wakeup the pre-create thread.
1293 * \param[in] env pointer to the thread context
1294 * \param[in] dt pointer to the OSP layer dt_object
1295 * \param[in] attr the attribute for the object to be created
1296 * \param[in] hint pointer to the hint for creating the object, such as
1298 * \param[in] dof pointer to the dt_object_format for help the creation
1299 * \param[in] th pointer to the transaction handler
1301 * \retval 0 for success
1302 * \retval negative error number on failure
1304 static int osp_declare_create(const struct lu_env *env, struct dt_object *dt,
1305 struct lu_attr *attr,
1306 struct dt_allocation_hint *hint,
1307 struct dt_object_format *dof, struct thandle *th)
1309 struct osp_thread_info *osi = osp_env_info(env);
1310 struct osp_device *d = lu2osp_dev(dt->do_lu.lo_dev);
1311 struct osp_object *o = dt2osp_obj(dt);
1312 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1313 struct thandle *local_th;
1318 if (is_only_remote_trans(th) && !fid_is_zero(fid)) {
1319 LASSERT(fid_is_sane(fid));
1321 rc = osp_md_declare_create(env, dt, attr, hint, dof, th);
1326 /* should happen to non-0 OSP only so that at least one object
1327 * has been already declared in the scenario and LOD should
1329 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_CREATE_FAIL) && d->opd_index == 1)
1332 LASSERT(d->opd_last_used_oid_file);
1335 * There can be gaps in precreated ids and record to unlink llog
1336 * XXX: we do not handle gaps yet, implemented before solution
1337 * was found to be racy, so we disabled that. there is no
1338 * point in making useless but expensive llog declaration.
1340 /* rc = osp_sync_declare_add(env, o, MDS_UNLINK64_REC, th); */
1342 local_th = osp_get_storage_thandle(env, th, d);
1343 if (IS_ERR(local_th))
1344 RETURN(PTR_ERR(local_th));
1346 if (unlikely(!fid_is_zero(fid))) {
1347 /* replay case: caller knows fid */
1348 osi->osi_off = sizeof(osi->osi_id) * d->opd_index;
1349 osi->osi_lb.lb_len = sizeof(osi->osi_id);
1350 osi->osi_lb.lb_buf = NULL;
1352 rc = dt_declare_record_write(env, d->opd_last_used_oid_file,
1353 &osi->osi_lb, osi->osi_off,
1359 * in declaration we need to reserve object so that we don't block
1360 * awaiting precreation RPC to complete
1362 rc = osp_precreate_reserve(env, d);
1364 * we also need to declare update to local "last used id" file for
1365 * recovery if object isn't used for a reason, we need to release
1366 * reservation, this can be made in osd_object_release()
1369 /* mark id is reserved: in create we don't want to talk
1371 LASSERT(o->opo_reserved == 0);
1372 o->opo_reserved = 1;
1374 /* common for all OSPs file hystorically */
1375 osi->osi_off = sizeof(osi->osi_id) * d->opd_index;
1376 osi->osi_lb.lb_len = sizeof(osi->osi_id);
1377 osi->osi_lb.lb_buf = NULL;
1378 rc = dt_declare_record_write(env, d->opd_last_used_oid_file,
1379 &osi->osi_lb, osi->osi_off,
1382 /* not needed in the cache anymore */
1383 set_bit(LU_OBJECT_HEARD_BANSHEE,
1384 &dt->do_lu.lo_header->loh_flags);
1390 * Implement OSP layer dt_object_operations::do_create() interface.
1392 * Create the OST object.
1394 * If the transaction is a remote transaction and the FID for the OST-object
1395 * has been assigned already, then handle it as handling MDT object via the
1396 * osp_md_create(). For other cases, the OSP will assign FID to the
1397 * object to be created, and update last_used Object ID (OID) file.
1399 * \param[in] env pointer to the thread context
1400 * \param[in] dt pointer to the OSP layer dt_object
1401 * \param[in] attr the attribute for the object to be created
1402 * \param[in] hint pointer to the hint for creating the object, such as
1404 * \param[in] dof pointer to the dt_object_format for help the creation
1405 * \param[in] th pointer to the transaction handler
1407 * \retval 0 for success
1408 * \retval negative error number on failure
1410 static int osp_create(const struct lu_env *env, struct dt_object *dt,
1411 struct lu_attr *attr, struct dt_allocation_hint *hint,
1412 struct dt_object_format *dof, struct thandle *th)
1414 struct osp_thread_info *osi = osp_env_info(env);
1415 struct osp_device *d = lu2osp_dev(dt->do_lu.lo_dev);
1416 struct osp_object *o = dt2osp_obj(dt);
1418 struct lu_fid *fid = &osi->osi_fid;
1419 struct thandle *local_th;
1420 struct lu_fid *last_fid = &d->opd_last_used_fid;
1423 if (is_only_remote_trans(th) &&
1424 !fid_is_zero(lu_object_fid(&dt->do_lu))) {
1425 LASSERT(fid_is_sane(lu_object_fid(&dt->do_lu)));
1427 rc = osp_md_create(env, dt, attr, hint, dof, th);
1429 o->opo_non_exist = 0;
1434 o->opo_non_exist = 0;
1435 if (o->opo_reserved) {
1436 /* regular case, fid is assigned holding transaction open */
1437 osp_object_assign_fid(env, d, o);
1440 memcpy(fid, lu_object_fid(&dt->do_lu), sizeof(*fid));
1442 LASSERTF(fid_is_sane(fid), "fid for osp_object %p is insane"DFID"!\n",
1445 if (!o->opo_reserved) {
1446 /* special case, id was assigned outside of transaction
1447 * see comments in osp_declare_attr_set */
1448 LASSERT(d->opd_pre != NULL);
1449 spin_lock(&d->opd_pre_lock);
1450 osp_update_last_fid(d, fid);
1451 spin_unlock(&d->opd_pre_lock);
1454 CDEBUG(D_INODE, "fid for osp_object %p is "DFID"\n", o, PFID(fid));
1456 /* If the precreate ends, it means it will be ready to rollover to
1457 * the new sequence soon, all the creation should be synchronized,
1458 * otherwise during replay, the replay fid will be inconsistent with
1459 * last_used/create fid */
1460 if (osp_precreate_end_seq(env, d) && osp_is_fid_client(d))
1463 local_th = osp_get_storage_thandle(env, th, d);
1464 if (IS_ERR(local_th))
1465 RETURN(PTR_ERR(local_th));
1467 * it's OK if the import is inactive by this moment - id was created
1468 * by OST earlier, we just need to maintain it consistently on the disk
1469 * once import is reconnected, OSP will claim this and other objects
1470 * used and OST either keep them, if they exist or recreate
1473 /* we might have lost precreated objects */
1474 if (unlikely(d->opd_gap_count) > 0) {
1475 LASSERT(d->opd_pre != NULL);
1476 spin_lock(&d->opd_pre_lock);
1477 if (d->opd_gap_count > 0) {
1478 int count = d->opd_gap_count;
1480 rc = ostid_set_id(&osi->osi_oi,
1481 fid_oid(&d->opd_gap_start_fid));
1483 spin_unlock(&d->opd_pre_lock);
1486 d->opd_gap_count = 0;
1487 spin_unlock(&d->opd_pre_lock);
1489 CDEBUG(D_HA, "Writing gap "DFID"+%d in llog\n",
1490 PFID(&d->opd_gap_start_fid), count);
1491 /* real gap handling is disabled intil ORI-692 will be
1492 * fixed, now we only report gaps */
1494 spin_unlock(&d->opd_pre_lock);
1498 /* Only need update last_used oid file, seq file will only be update
1499 * during seq rollover */
1500 if (fid_is_idif((last_fid)))
1501 osi->osi_id = fid_idif_id(fid_seq(last_fid),
1502 fid_oid(last_fid), fid_ver(last_fid));
1504 osi->osi_id = fid_oid(last_fid);
1505 osp_objid_buf_prep(&osi->osi_lb, &osi->osi_off,
1506 &osi->osi_id, d->opd_index);
1508 rc = dt_record_write(env, d->opd_last_used_oid_file, &osi->osi_lb,
1509 &osi->osi_off, local_th);
1511 CDEBUG(D_HA, "%s: Wrote last used FID: "DFID", index %d: %d\n",
1512 d->opd_obd->obd_name, PFID(fid), d->opd_index, rc);
1518 * Implement OSP layer dt_object_operations::do_declare_destroy() interface.
1520 * Declare that the caller will destroy the specified OST object.
1522 * The OST object destroy will be handled via llog asynchronously. This
1523 * function will declare the credits for generating MDS_UNLINK64_REC llog.
1525 * \param[in] env pointer to the thread context
1526 * \param[in] dt pointer to the OSP layer dt_object to be destroyed
1527 * \param[in] th pointer to the transaction handler
1529 * \retval 0 for success
1530 * \retval negative error number on failure
1532 int osp_declare_destroy(const struct lu_env *env, struct dt_object *dt,
1535 struct osp_object *o = dt2osp_obj(dt);
1536 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
1541 LASSERT(!osp->opd_connect_mdt);
1542 rc = osp_sync_declare_add(env, o, MDS_UNLINK64_REC, th);
1548 * Implement OSP layer dt_object_operations::do_destroy() interface.
1550 * Destroy the specified OST object.
1552 * The OSP generates a MDS_UNLINK64_REC record in the llog. There
1553 * will be some dedicated thread to handle the llog asynchronously.
1555 * It also marks the object as non-cached.
1557 * \param[in] env pointer to the thread context
1558 * \param[in] dt pointer to the OSP layer dt_object to be destroyed
1559 * \param[in] th pointer to the transaction handler
1561 * \retval 0 for success
1562 * \retval negative error number on failure
1564 static int osp_destroy(const struct lu_env *env, struct dt_object *dt,
1567 struct osp_object *o = dt2osp_obj(dt);
1568 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
1573 o->opo_non_exist = 1;
1575 LASSERT(!osp->opd_connect_mdt);
1576 /* once transaction is committed put proper command on
1577 * the queue going to our OST. */
1578 rc = osp_sync_add(env, o, MDS_UNLINK64_REC, th, NULL);
1582 /* not needed in cache any more */
1583 set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
1588 static int osp_orphan_index_lookup(const struct lu_env *env,
1589 struct dt_object *dt,
1591 const struct dt_key *key)
1596 static int osp_orphan_index_declare_insert(const struct lu_env *env,
1597 struct dt_object *dt,
1598 const struct dt_rec *rec,
1599 const struct dt_key *key,
1600 struct thandle *handle)
1605 static int osp_orphan_index_insert(const struct lu_env *env,
1606 struct dt_object *dt,
1607 const struct dt_rec *rec,
1608 const struct dt_key *key,
1609 struct thandle *handle,
1615 static int osp_orphan_index_declare_delete(const struct lu_env *env,
1616 struct dt_object *dt,
1617 const struct dt_key *key,
1618 struct thandle *handle)
1623 static int osp_orphan_index_delete(const struct lu_env *env,
1624 struct dt_object *dt,
1625 const struct dt_key *key,
1626 struct thandle *handle)
1632 * Initialize the OSP layer index iteration.
1634 * \param[in] env pointer to the thread context
1635 * \param[in] dt pointer to the index object to be iterated
1636 * \param[in] attr unused
1638 * \retval pointer to the iteration structure
1639 * \retval negative error number on failure
1641 struct dt_it *osp_it_init(const struct lu_env *env, struct dt_object *dt,
1648 return ERR_PTR(-ENOMEM);
1650 it->ooi_pos_ent = -1;
1652 it->ooi_attr = attr;
1654 return (struct dt_it *)it;
1658 * Finalize the OSP layer index iteration.
1660 * \param[in] env pointer to the thread context
1661 * \param[in] di pointer to the iteration structure
1663 void osp_it_fini(const struct lu_env *env, struct dt_it *di)
1665 struct osp_it *it = (struct osp_it *)di;
1666 struct page **pages = it->ooi_pages;
1667 int npages = it->ooi_total_npages;
1670 if (pages != NULL) {
1671 for (i = 0; i < npages; i++) {
1672 if (pages[i] != NULL) {
1673 if (pages[i] == it->ooi_cur_page) {
1675 it->ooi_cur_page = NULL;
1677 __free_page(pages[i]);
1680 OBD_FREE(pages, npages * sizeof(*pages));
1686 * Get more records for the iteration from peer.
1688 * The new records will be filled in an array of pages. The OSP side
1689 * allows 1MB bulk data to be transferred.
1691 * \param[in] env pointer to the thread context
1692 * \param[in] it pointer to the iteration structure
1694 * \retval 0 for success
1695 * \retval negative error number on failure
1697 static int osp_it_fetch(const struct lu_env *env, struct osp_it *it)
1699 struct lu_device *dev = it->ooi_obj->do_lu.lo_dev;
1700 struct osp_device *osp = lu2osp_dev(dev);
1701 struct page **pages;
1702 struct ptlrpc_request *req = NULL;
1703 struct ptlrpc_bulk_desc *desc;
1704 struct idx_info *ii;
1711 npages = min_t(unsigned int, OFD_MAX_BRW_SIZE, 1 << 20);
1712 npages /= PAGE_SIZE;
1714 OBD_ALLOC(pages, npages * sizeof(*pages));
1718 it->ooi_pages = pages;
1719 it->ooi_total_npages = npages;
1720 for (i = 0; i < npages; i++) {
1721 pages[i] = alloc_page(GFP_NOFS);
1722 if (pages[i] == NULL)
1726 req = ptlrpc_request_alloc(osp->opd_obd->u.cli.cl_import,
1731 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, OBD_IDX_READ);
1733 ptlrpc_request_free(req);
1737 osp_set_req_replay(osp, req);
1738 req->rq_request_portal = OUT_PORTAL;
1739 ii = req_capsule_client_get(&req->rq_pill, &RMF_IDX_INFO);
1740 memset(ii, 0, sizeof(*ii));
1741 if (fid_is_last_id(lu_object_fid(&it->ooi_obj->do_lu))) {
1742 /* LFSCK will iterate orphan object[FID_SEQ_LAYOUT_BTREE,
1743 * ost_index, 0] with LAST_ID FID, so it needs to replace
1744 * the FID with orphan FID here */
1745 ii->ii_fid.f_seq = FID_SEQ_LAYOUT_RBTREE;
1746 ii->ii_fid.f_oid = osp->opd_index;
1747 ii->ii_fid.f_ver = 0;
1748 ii->ii_flags = II_FL_NOHASH;
1749 ii->ii_attrs = osp_dev2node(osp);
1751 ii->ii_fid = *lu_object_fid(&it->ooi_obj->do_lu);
1752 ii->ii_flags = II_FL_NOHASH | II_FL_NOKEY | II_FL_VARKEY |
1754 ii->ii_attrs = it->ooi_attr;
1756 ii->ii_magic = IDX_INFO_MAGIC;
1757 ii->ii_count = npages * LU_PAGE_COUNT;
1758 ii->ii_hash_start = it->ooi_next;
1760 ptlrpc_at_set_req_timeout(req);
1762 desc = ptlrpc_prep_bulk_imp(req, npages, 1,
1763 PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
1765 &ptlrpc_bulk_kiov_pin_ops);
1767 GOTO(out, rc = -ENOMEM);
1769 for (i = 0; i < npages; i++)
1770 desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0,
1773 ptlrpc_request_set_replen(req);
1774 rc = ptlrpc_queue_wait(req);
1778 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk,
1779 req->rq_bulk->bd_nob_transferred);
1784 ii = req_capsule_server_get(&req->rq_pill, &RMF_IDX_INFO);
1785 if (ii->ii_magic != IDX_INFO_MAGIC)
1786 GOTO(out, rc = -EPROTO);
1788 npages = (ii->ii_count + LU_PAGE_COUNT - 1) >>
1789 (PAGE_SHIFT - LU_PAGE_SHIFT);
1790 if (npages > it->ooi_total_npages) {
1791 CERROR("%s: returned more pages than expected, %u > %u\n",
1792 osp->opd_obd->obd_name, npages, it->ooi_total_npages);
1793 GOTO(out, rc = -EINVAL);
1796 it->ooi_rec_size = ii->ii_recsize;
1797 it->ooi_valid_npages = npages;
1798 if (ptlrpc_rep_need_swab(req))
1801 it->ooi_next = ii->ii_hash_end;
1804 ptlrpc_req_finished(req);
1810 * Move the iteration cursor to the next lu_page.
1812 * One system page (PAGE_SIZE) may contain multiple lu_page (4KB),
1813 * that depends on the LU_PAGE_COUNT. If it is not the last lu_page
1814 * in current system page, then move the iteration cursor to the next
1815 * lu_page in current system page. Otherwise, if there are more system
1816 * pages in the cache, then move the iteration cursor to the next system
1817 * page. If all the cached records (pages) have been iterated, then fetch
1818 * more records via osp_it_fetch().
1820 * \param[in] env pointer to the thread context
1821 * \param[in] di pointer to the iteration structure
1823 * \retval positive for end of the directory
1824 * \retval 0 for success
1825 * \retval negative error number on failure
1827 int osp_it_next_page(const struct lu_env *env, struct dt_it *di)
1829 struct osp_it *it = (struct osp_it *)di;
1830 struct lu_idxpage *idxpage;
1831 struct page **pages;
1837 idxpage = it->ooi_cur_idxpage;
1838 if (idxpage != NULL) {
1839 if (idxpage->lip_nr == 0)
1842 if (it->ooi_pos_ent < idxpage->lip_nr) {
1843 CDEBUG(D_INFO, "ooi_pos %d nr %d\n",
1844 (int)it->ooi_pos_ent, (int)idxpage->lip_nr);
1847 it->ooi_cur_idxpage = NULL;
1848 it->ooi_pos_lu_page++;
1851 if (it->ooi_pos_lu_page < LU_PAGE_COUNT) {
1852 it->ooi_cur_idxpage = (void *)it->ooi_cur_page +
1853 LU_PAGE_SIZE * it->ooi_pos_lu_page;
1855 lustre_swab_lip_header(it->ooi_cur_idxpage);
1856 if (it->ooi_cur_idxpage->lip_magic != LIP_MAGIC) {
1857 struct osp_device *osp =
1858 lu2osp_dev(it->ooi_obj->do_lu.lo_dev);
1860 CERROR("%s: invalid magic (%x != %x) for page "
1861 "%d/%d while read layout orphan index\n",
1862 osp->opd_obd->obd_name,
1863 it->ooi_cur_idxpage->lip_magic,
1864 LIP_MAGIC, it->ooi_pos_page,
1865 it->ooi_pos_lu_page);
1866 /* Skip this lu_page next time. */
1867 it->ooi_pos_ent = idxpage->lip_nr - 1;
1870 it->ooi_pos_ent = -1;
1874 kunmap(it->ooi_cur_page);
1875 it->ooi_cur_page = NULL;
1879 pages = it->ooi_pages;
1880 if (it->ooi_pos_page < it->ooi_valid_npages) {
1881 it->ooi_cur_page = kmap(pages[it->ooi_pos_page]);
1882 it->ooi_pos_lu_page = 0;
1886 for (i = 0; i < it->ooi_total_npages; i++) {
1887 if (pages[i] != NULL)
1888 __free_page(pages[i]);
1890 OBD_FREE(pages, it->ooi_total_npages * sizeof(*pages));
1892 it->ooi_pos_page = 0;
1893 it->ooi_total_npages = 0;
1894 it->ooi_valid_npages = 0;
1897 it->ooi_cur_page = NULL;
1898 it->ooi_cur_idxpage = NULL;
1899 it->ooi_pages = NULL;
1902 if (it->ooi_next == II_END_OFF)
1905 rc = osp_it_fetch(env, it);
1913 * Move the iteration cursor to the next record.
1915 * If there are more records in the lu_page, then move the iteration
1916 * cursor to the next record directly. Otherwise, move the iteration
1917 * cursor to the record in the next lu_page via osp_it_next_page()
1919 * \param[in] env pointer to the thread context
1920 * \param[in] di pointer to the iteration structure
1922 * \retval positive for end of the directory
1923 * \retval 0 for success
1924 * \retval negative error number on failure
1926 static int osp_orphan_it_next(const struct lu_env *env, struct dt_it *di)
1928 struct osp_it *it = (struct osp_it *)di;
1929 struct lu_idxpage *idxpage;
1934 idxpage = it->ooi_cur_idxpage;
1935 if (idxpage != NULL) {
1936 if (idxpage->lip_nr == 0)
1940 if (it->ooi_pos_ent < idxpage->lip_nr) {
1941 if (it->ooi_rec_size ==
1942 sizeof(struct lu_orphan_rec_v2)) {
1944 (struct lu_orphan_ent_v2 *)idxpage->lip_entries+
1947 lustre_swab_orphan_ent_v2(it->ooi_ent);
1950 (struct lu_orphan_ent *)idxpage->lip_entries +
1953 lustre_swab_orphan_ent(it->ooi_ent);
1959 rc = osp_it_next_page(env, di);
1966 int osp_it_get(const struct lu_env *env, struct dt_it *di,
1967 const struct dt_key *key)
1972 void osp_it_put(const struct lu_env *env, struct dt_it *di)
1976 static struct dt_key *osp_orphan_it_key(const struct lu_env *env,
1977 const struct dt_it *di)
1979 struct osp_it *it = (struct osp_it *)di;
1980 struct lu_orphan_ent *ent = (struct lu_orphan_ent *)it->ooi_ent;
1982 if (likely(ent != NULL))
1983 return (struct dt_key *)(&ent->loe_key);
1988 static int osp_orphan_it_key_size(const struct lu_env *env,
1989 const struct dt_it *di)
1991 return sizeof(struct lu_fid);
1994 static int osp_orphan_it_rec(const struct lu_env *env, const struct dt_it *di,
1995 struct dt_rec *rec, __u32 attr)
1997 struct osp_it *it = (struct osp_it *)di;
1999 if (likely(it->ooi_ent)) {
2000 if (it->ooi_rec_size == sizeof(struct lu_orphan_rec_v2)) {
2001 struct lu_orphan_ent_v2 *ent =
2002 (struct lu_orphan_ent_v2 *)it->ooi_ent;
2004 *(struct lu_orphan_rec_v2 *)rec = ent->loe_rec;
2006 struct lu_orphan_ent *ent =
2007 (struct lu_orphan_ent *)it->ooi_ent;
2009 *(struct lu_orphan_rec *)rec = ent->loe_rec;
2017 __u64 osp_it_store(const struct lu_env *env, const struct dt_it *di)
2019 struct osp_it *it = (struct osp_it *)di;
2021 return it->ooi_next;
2025 * Locate the iteration cursor to the specified position (cookie).
2027 * \param[in] env pointer to the thread context
2028 * \param[in] di pointer to the iteration structure
2029 * \param[in] hash the specified position
2031 * \retval positive number for locating to the exactly position
2033 * \retval 0 for arriving at the end of the iteration
2034 * \retval negative error number on failure
2036 int osp_orphan_it_load(const struct lu_env *env, const struct dt_it *di,
2039 struct osp_it *it = (struct osp_it *)di;
2042 it->ooi_next = hash;
2043 rc = osp_orphan_it_next(env, (struct dt_it *)di);
2053 int osp_it_key_rec(const struct lu_env *env, const struct dt_it *di,
2059 static const struct dt_index_operations osp_orphan_index_ops = {
2060 .dio_lookup = osp_orphan_index_lookup,
2061 .dio_declare_insert = osp_orphan_index_declare_insert,
2062 .dio_insert = osp_orphan_index_insert,
2063 .dio_declare_delete = osp_orphan_index_declare_delete,
2064 .dio_delete = osp_orphan_index_delete,
2066 .init = osp_it_init,
2067 .fini = osp_it_fini,
2068 .next = osp_orphan_it_next,
2071 .key = osp_orphan_it_key,
2072 .key_size = osp_orphan_it_key_size,
2073 .rec = osp_orphan_it_rec,
2074 .store = osp_it_store,
2075 .load = osp_orphan_it_load,
2076 .key_rec = osp_it_key_rec,
2081 * Implement OSP layer dt_object_operations::do_index_try() interface.
2083 * Negotiate the index type.
2085 * If the target index is an IDIF object, then use osp_orphan_index_ops.
2086 * Otherwise, assign osp_md_index_ops to the dt_object::do_index_ops.
2087 * (\see lustre/include/lustre_fid.h for IDIF.)
2089 * \param[in] env pointer to the thread context
2090 * \param[in] dt pointer to the OSP layer dt_object
2091 * \param[in] feat unused
2093 * \retval 0 for success
2095 static int osp_index_try(const struct lu_env *env,
2096 struct dt_object *dt,
2097 const struct dt_index_features *feat)
2099 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2101 if (fid_is_last_id(fid) && fid_is_idif(fid))
2102 dt->do_index_ops = &osp_orphan_index_ops;
2104 dt->do_index_ops = &osp_md_index_ops;
2108 static struct dt_object_operations osp_obj_ops = {
2109 .do_declare_attr_get = osp_declare_attr_get,
2110 .do_attr_get = osp_attr_get,
2111 .do_declare_attr_set = osp_declare_attr_set,
2112 .do_attr_set = osp_attr_set,
2113 .do_declare_xattr_get = osp_declare_xattr_get,
2114 .do_xattr_get = osp_xattr_get,
2115 .do_declare_xattr_set = osp_declare_xattr_set,
2116 .do_xattr_set = osp_xattr_set,
2117 .do_declare_create = osp_declare_create,
2118 .do_create = osp_create,
2119 .do_declare_destroy = osp_declare_destroy,
2120 .do_destroy = osp_destroy,
2121 .do_index_try = osp_index_try,
2125 * Implement OSP layer lu_object_operations::loo_object_init() interface.
2127 * Initialize the object.
2129 * If it is a remote MDT object, then call do_attr_get() to fetch
2130 * the attribute from the peer.
2132 * \param[in] env pointer to the thread context
2133 * \param[in] o pointer to the OSP layer lu_object
2134 * \param[in] conf unused
2136 * \retval 0 for success
2137 * \retval negative error number on failure
2139 static int osp_object_init(const struct lu_env *env, struct lu_object *o,
2140 const struct lu_object_conf *conf)
2142 struct osp_object *po = lu2osp_obj(o);
2146 spin_lock_init(&po->opo_lock);
2147 o->lo_header->loh_attr |= LOHA_REMOTE;
2148 INIT_LIST_HEAD(&po->opo_xattr_list);
2149 INIT_LIST_HEAD(&po->opo_invalidate_cb_list);
2151 if (is_ost_obj(o)) {
2152 po->opo_obj.do_ops = &osp_obj_ops;
2154 struct lu_attr *la = &osp_env_info(env)->osi_attr;
2156 po->opo_obj.do_ops = &osp_md_obj_ops;
2157 po->opo_obj.do_body_ops = &osp_md_body_ops;
2159 if (conf != NULL && conf->loc_flags & LOC_F_NEW) {
2160 po->opo_non_exist = 1;
2162 rc = po->opo_obj.do_ops->do_attr_get(env, lu2dt_obj(o),
2165 o->lo_header->loh_attr |=
2166 LOHA_EXISTS | (la->la_mode & S_IFMT);
2167 if (rc == -ENOENT) {
2168 po->opo_non_exist = 1;
2172 init_rwsem(&po->opo_sem);
2178 * Implement OSP layer lu_object_operations::loo_object_free() interface.
2180 * Finalize the object.
2182 * If the OSP object has attributes cache, then destroy the cache.
2183 * Free the object finally.
2185 * \param[in] env pointer to the thread context
2186 * \param[in] o pointer to the OSP layer lu_object
2188 static void osp_object_free(const struct lu_env *env, struct lu_object *o)
2190 struct osp_object *obj = lu2osp_obj(o);
2191 struct lu_object_header *h = o->lo_header;
2192 struct osp_xattr_entry *oxe;
2193 struct osp_xattr_entry *tmp;
2196 dt_object_fini(&obj->opo_obj);
2197 lu_object_header_fini(h);
2198 list_for_each_entry_safe(oxe, tmp, &obj->opo_xattr_list, oxe_list) {
2199 list_del(&oxe->oxe_list);
2200 count = atomic_read(&oxe->oxe_ref);
2201 LASSERTF(count == 1,
2202 "Still has %d users on the xattr entry %.*s\n",
2203 count-1, (int)oxe->oxe_namelen, oxe->oxe_buf);
2205 OBD_FREE(oxe, oxe->oxe_buflen);
2207 OBD_SLAB_FREE_PTR(obj, osp_object_kmem);
2211 * Implement OSP layer lu_object_operations::loo_object_release() interface.
2213 * Cleanup (not free) the object.
2215 * If it is a reserved object but failed to be created, or it is an OST
2216 * object, then mark the object as non-cached.
2218 * \param[in] env pointer to the thread context
2219 * \param[in] o pointer to the OSP layer lu_object
2221 static void osp_object_release(const struct lu_env *env, struct lu_object *o)
2223 struct osp_object *po = lu2osp_obj(o);
2224 struct osp_device *d = lu2osp_dev(o->lo_dev);
2229 * release reservation if object was declared but not created
2230 * this may require lu_object_put() in LOD
2232 if (unlikely(po->opo_reserved)) {
2233 LASSERT(d->opd_pre != NULL);
2234 LASSERT(d->opd_pre_reserved > 0);
2235 spin_lock(&d->opd_pre_lock);
2236 d->opd_pre_reserved--;
2237 spin_unlock(&d->opd_pre_lock);
2239 /* not needed in cache any more */
2240 set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
2244 /* XXX: Currently, NOT cache OST-object on MDT because:
2245 * 1. it is not often accessed on MDT.
2246 * 2. avoid up layer (such as LFSCK) to load too many
2247 * once-used OST-objects. */
2248 set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
2253 static int osp_object_print(const struct lu_env *env, void *cookie,
2254 lu_printer_t p, const struct lu_object *l)
2256 const struct osp_object *o = lu2osp_obj((struct lu_object *)l);
2258 return (*p)(env, cookie, LUSTRE_OSP_NAME"-object@%p", o);
2261 static int osp_object_invariant(const struct lu_object *o)
2266 struct lu_object_operations osp_lu_obj_ops = {
2267 .loo_object_init = osp_object_init,
2268 .loo_object_free = osp_object_free,
2269 .loo_object_release = osp_object_release,
2270 .loo_object_print = osp_object_print,
2271 .loo_object_invariant = osp_object_invariant