4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, 2015, Intel Corporation.
33 * lustre/osp/osp_object.c
35 * Lustre OST Proxy Device (OSP) is the agent on the local MDT for the OST
38 * OSP object attributes cache
39 * ---------------------------
40 * OSP object is the stub of the remote OST-object or MDT-object. Both the
41 * attribute and the extended attributes are stored on the peer side remotely.
42 * It is inefficient to send RPC to peer to fetch those attributes when every
43 * get_attr()/get_xattr() called. For a large system, the LFSCK synchronous
44 * mode scanning is prohibitively inefficient.
46 * So the OSP maintains the OSP object attributes cache to cache some
47 * attributes on the local MDT. The cache is organized against the OSP
50 * struct osp_xattr_entry {
51 * struct list_head oxe_list;
57 * unsigned int oxe_exist:1,
64 * struct lu_attr opo_attr;
65 * struct list_head opo_xattr_list;
66 * spinlock_t opo_lock;
70 * The basic attributes, such as owner/mode/flags, are stored in the
71 * osp_object::opo_attr. The extended attributes will be stored
72 * as osp_xattr_entry. Every extended attribute has an independent
73 * osp_xattr_entry, and all the osp_xattr_entry are linked into the
74 * osp_object::opo_xattr_list. The OSP object attributes cache
75 * is protected by the osp_object::opo_lock.
77 * Not all OSP objects have an attributes cache because maintaining
78 * the cache requires some resources. Currently, the OSP object
79 * attributes cache will be initialized when the attributes or the
80 * extended attributes are pre-fetched via osp_declare_attr_get()
81 * or osp_declare_xattr_get(). That is usually for LFSCK purpose,
82 * but it also can be shared by others.
85 * XXX: NOT prepare out RPC for remote transaction. ((please refer to the
86 * comment of osp_trans_create() for remote transaction)
88 * According to our current transaction/dt_object_lock framework (to make
89 * the cross-MDTs modification for DNE1 to be workable), the transaction
90 * sponsor will start the transaction firstly, then try to acquire related
91 * dt_object_lock if needed. Under such rules, if we want to prepare the
92 * OUT RPC in the transaction declare phase, then related attr/xattr
93 * should be known without dt_object_lock. But such condition maybe not
94 * true for some remote transaction case. For example:
96 * For linkEA repairing (by LFSCK) case, before the LFSCK thread obtained
97 * the dt_object_lock on the target MDT-object, it cannot know whether
98 * the MDT-object has linkEA or not, neither invalid or not.
100 * Since the LFSCK thread cannot hold dt_object_lock before the remote
101 * transaction start (otherwise there will be some potential deadlock),
102 * it cannot prepare related OUT RPC for repairing during the declare
103 * phase as other normal transactions do.
105 * To resolve the trouble, we will make OSP to prepare related OUT RPC
106 * after remote transaction started, and trigger the remote updating
107 * (send RPC) when trans_stop. Then the up layer users, such as LFSCK,
108 * can follow the general rule to handle trans_start/dt_object_lock
109 * for repairing linkEA inconsistency without distinguishing remote
112 * In fact, above solution for remote transaction should be the normal
113 * model without considering DNE1. The trouble brought by DNE1 will be
114 * resolved in DNE2. At that time, this patch can be removed.
117 * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
118 * Author: Mikhail Pershin <mike.tappro@intel.com>
121 #define DEBUG_SUBSYSTEM S_MDS
123 #include <lustre_obdo.h>
124 #include <lustre_swab.h>
126 #include "osp_internal.h"
128 static inline __u32 osp_dev2node(struct osp_device *osp)
130 return osp->opd_storage->dd_lu_dev.ld_site->ld_seq_site->ss_node_id;
133 static inline bool is_ost_obj(struct lu_object *lo)
135 return !lu2osp_dev(lo->lo_dev)->opd_connect_mdt;
139 * Assign FID to the OST object.
141 * This function will assign the FID to the OST object of a striped file.
143 * \param[in] env pointer to the thread context
144 * \param[in] d pointer to the OSP device
145 * \param[in] o pointer to the OSP object that the FID will be
148 static void osp_object_assign_fid(const struct lu_env *env,
149 struct osp_device *d, struct osp_object *o)
151 struct osp_thread_info *osi = osp_env_info(env);
153 LASSERT(fid_is_zero(lu_object_fid(&o->opo_obj.do_lu)));
154 LASSERT(o->opo_reserved);
157 osp_precreate_get_fid(env, d, &osi->osi_fid);
159 lu_object_assign_fid(env, &o->opo_obj.do_lu, &osi->osi_fid);
163 * Find the named extended attribute in the OSP object attributes cache.
165 * The caller should take the osp_object::opo_lock before calling
168 * \param[in] obj pointer to the OSP object
169 * \param[in] name the name of the extended attribute
170 * \param[in] namelen the name length of the extended attribute
172 * \retval pointer to the found extended attribute entry
173 * \retval NULL if the specified extended attribute is not
176 static struct osp_xattr_entry *
177 osp_oac_xattr_find_locked(struct osp_object *obj, const char *name,
180 struct osp_xattr_entry *oxe;
182 list_for_each_entry(oxe, &obj->opo_xattr_list, oxe_list) {
183 if (namelen == oxe->oxe_namelen &&
184 strncmp(name, oxe->oxe_buf, namelen) == 0)
192 * Find the named extended attribute in the OSP object attributes cache.
194 * Call osp_oac_xattr_find_locked() with the osp_object::opo_lock held.
196 * \param[in] obj pointer to the OSP object
197 * \param[in] name the name of the extended attribute
198 * \param[in] unlink true if the extended attribute entry is to be removed
201 * \retval pointer to the found extended attribute entry
202 * \retval NULL if the specified extended attribute is not
205 static struct osp_xattr_entry *osp_oac_xattr_find(struct osp_object *obj,
206 const char *name, bool unlink)
208 struct osp_xattr_entry *oxe = NULL;
210 spin_lock(&obj->opo_lock);
211 oxe = osp_oac_xattr_find_locked(obj, name, strlen(name));
214 list_del_init(&oxe->oxe_list);
216 atomic_inc(&oxe->oxe_ref);
218 spin_unlock(&obj->opo_lock);
224 * Find the named extended attribute in the OSP object attributes cache.
226 * If it is not in the cache, then add an empty entry (that will be
227 * filled later) to cache with the given name.
229 * \param[in] obj pointer to the OSP object
230 * \param[in] name the name of the extended attribute
231 * \param[in] len the length of the extended attribute value
233 * \retval pointer to the found or new-created extended
235 * \retval NULL if the specified extended attribute is not in the
236 * cache or fail to add new empty entry to the cache.
238 static struct osp_xattr_entry *
239 osp_oac_xattr_find_or_add(struct osp_object *obj, const char *name, size_t len)
241 struct osp_xattr_entry *oxe;
242 struct osp_xattr_entry *tmp = NULL;
243 size_t namelen = strlen(name);
244 size_t size = sizeof(*oxe) + namelen + 1 + len;
246 oxe = osp_oac_xattr_find(obj, name, false);
250 OBD_ALLOC(oxe, size);
251 if (unlikely(oxe == NULL))
254 INIT_LIST_HEAD(&oxe->oxe_list);
255 oxe->oxe_buflen = size;
256 oxe->oxe_namelen = namelen;
257 memcpy(oxe->oxe_buf, name, namelen);
258 oxe->oxe_value = oxe->oxe_buf + namelen + 1;
259 /* One ref is for the caller, the other is for the entry on the list. */
260 atomic_set(&oxe->oxe_ref, 2);
262 spin_lock(&obj->opo_lock);
263 tmp = osp_oac_xattr_find_locked(obj, name, namelen);
265 list_add_tail(&oxe->oxe_list, &obj->opo_xattr_list);
267 atomic_inc(&tmp->oxe_ref);
268 spin_unlock(&obj->opo_lock);
279 * Add the given extended attribute to the OSP object attributes cache.
281 * If there is an old extended attributed entry with the same name,
282 * remove it from the cache and return it via the parameter \a poxe.
284 * \param[in] obj pointer to the OSP object
285 * \param[in,out] poxe double pointer to the OSP object extended attribute
286 * entry: the new extended attribute entry is transferred
287 * via such pointer target, and if old the extended
288 * attribute entry exists, then it will be returned back
289 * via such pointer target.
290 * \param[in] len the length of the (new) extended attribute value
292 * \retval pointer to the new extended attribute entry
293 * \retval NULL for failure cases.
295 static struct osp_xattr_entry *
296 osp_oac_xattr_replace(struct osp_object *obj,
297 struct osp_xattr_entry **poxe, size_t len)
299 struct osp_xattr_entry *oxe;
300 size_t namelen = (*poxe)->oxe_namelen;
301 size_t size = sizeof(*oxe) + namelen + 1 + len;
303 OBD_ALLOC(oxe, size);
304 if (unlikely(oxe == NULL))
307 INIT_LIST_HEAD(&oxe->oxe_list);
308 oxe->oxe_buflen = size;
309 oxe->oxe_namelen = namelen;
310 memcpy(oxe->oxe_buf, (*poxe)->oxe_buf, namelen);
311 oxe->oxe_value = oxe->oxe_buf + namelen + 1;
312 /* One ref is for the caller, the other is for the entry on the list. */
313 atomic_set(&oxe->oxe_ref, 2);
315 spin_lock(&obj->opo_lock);
316 *poxe = osp_oac_xattr_find_locked(obj, oxe->oxe_buf, namelen);
317 LASSERT(*poxe != NULL);
319 list_del_init(&(*poxe)->oxe_list);
320 list_add_tail(&oxe->oxe_list, &obj->opo_xattr_list);
321 spin_unlock(&obj->opo_lock);
327 * Release reference from the OSP object extended attribute entry.
329 * If it is the last reference, then free the entry.
331 * \param[in] oxe pointer to the OSP object extended attribute entry.
333 static inline void osp_oac_xattr_put(struct osp_xattr_entry *oxe)
335 if (atomic_dec_and_test(&oxe->oxe_ref)) {
336 LASSERT(list_empty(&oxe->oxe_list));
338 OBD_FREE(oxe, oxe->oxe_buflen);
343 * Parse the OSP object attribute from the RPC reply.
345 * If the attribute is valid, then it will be added to the OSP object
348 * \param[in] env pointer to the thread context
349 * \param[in] reply pointer to the RPC reply
350 * \param[in] req pointer to the RPC request
351 * \param[out] attr pointer to buffer to hold the output attribute
352 * \param[in] obj pointer to the OSP object
353 * \param[in] index the index of the attribute buffer in the reply
355 * \retval 0 for success
356 * \retval negative error number on failure
358 static int osp_get_attr_from_reply(const struct lu_env *env,
359 struct object_update_reply *reply,
360 struct ptlrpc_request *req,
361 struct lu_attr *attr,
362 struct osp_object *obj, int index)
364 struct osp_thread_info *osi = osp_env_info(env);
365 struct lu_buf *rbuf = &osi->osi_lb2;
366 struct obdo *lobdo = &osi->osi_obdo;
370 rc = object_update_result_data_get(reply, rbuf, index);
374 wobdo = rbuf->lb_buf;
375 if (rbuf->lb_len != sizeof(*wobdo))
378 LASSERT(req != NULL);
379 if (ptlrpc_req_need_swab(req))
380 lustre_swab_obdo(wobdo);
382 lustre_get_wire_obdo(NULL, lobdo, wobdo);
383 spin_lock(&obj->opo_lock);
384 la_from_obdo(&obj->opo_attr, lobdo, lobdo->o_valid);
386 *attr = obj->opo_attr;
387 spin_unlock(&obj->opo_lock);
393 * Interpreter function for getting OSP object attribute asynchronously.
395 * Called to interpret the result of an async mode RPC for getting the
396 * OSP object attribute.
398 * \param[in] env pointer to the thread context
399 * \param[in] reply pointer to the RPC reply
400 * \param[in] req pointer to the RPC request
401 * \param[in] obj pointer to the OSP object
402 * \param[out] data pointer to buffer to hold the output attribute
403 * \param[in] index the index of the attribute buffer in the reply
404 * \param[in] rc the result for handling the RPC
406 * \retval 0 for success
407 * \retval negative error number on failure
409 static int osp_attr_get_interpterer(const struct lu_env *env,
410 struct object_update_reply *reply,
411 struct ptlrpc_request *req,
412 struct osp_object *obj,
413 void *data, int index, int rc)
415 struct lu_attr *attr = data;
418 osp2lu_obj(obj)->lo_header->loh_attr |= LOHA_EXISTS;
419 obj->opo_non_exist = 0;
421 return osp_get_attr_from_reply(env, reply, req, NULL, obj,
425 osp2lu_obj(obj)->lo_header->loh_attr &= ~LOHA_EXISTS;
426 obj->opo_non_exist = 1;
429 spin_lock(&obj->opo_lock);
431 spin_unlock(&obj->opo_lock);
438 * Implement OSP layer dt_object_operations::do_declare_attr_get() interface.
440 * Declare that the caller will get attribute from the specified OST object.
442 * This function adds an Object Unified Target (OUT) sub-request to the per-OSP
443 * based shared asynchronous request queue. The osp_attr_get_interpterer()
444 * is registered as the interpreter function to handle the result of this
447 * \param[in] env pointer to the thread context
448 * \param[in] dt pointer to the OSP layer dt_object
450 * \retval 0 for success
451 * \retval negative error number on failure
453 static int osp_declare_attr_get(const struct lu_env *env, struct dt_object *dt)
455 struct osp_object *obj = dt2osp_obj(dt);
456 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
459 mutex_lock(&osp->opd_async_requests_mutex);
460 rc = osp_insert_async_request(env, OUT_ATTR_GET, obj, 0, NULL, NULL,
461 &obj->opo_attr, sizeof(struct obdo),
462 osp_attr_get_interpterer);
463 mutex_unlock(&osp->opd_async_requests_mutex);
469 * Implement OSP layer dt_object_operations::do_attr_get() interface.
471 * Get attribute from the specified MDT/OST object.
473 * If the attribute is in the OSP object attributes cache, then return
474 * the cached attribute directly. Otherwise it will trigger an OUT RPC
475 * to the peer to get the attribute synchronously, if successful, add it
476 * to the OSP attributes cache. (\see lustre/osp/osp_trans.c for OUT RPC.)
478 * \param[in] env pointer to the thread context
479 * \param[in] dt pointer to the OSP layer dt_object
480 * \param[out] attr pointer to the buffer to hold the output attribute
482 * \retval 0 for success
483 * \retval negative error number on failure
485 int osp_attr_get(const struct lu_env *env, struct dt_object *dt,
486 struct lu_attr *attr)
488 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
489 struct osp_object *obj = dt2osp_obj(dt);
490 struct dt_device *dev = &osp->opd_dt_dev;
491 struct osp_update_request *update;
492 struct object_update_reply *reply;
493 struct ptlrpc_request *req = NULL;
497 if (is_ost_obj(&dt->do_lu) && obj->opo_non_exist)
500 spin_lock(&obj->opo_lock);
501 if (obj->opo_attr.la_valid != 0 && !obj->opo_stale) {
502 *attr = obj->opo_attr;
503 spin_unlock(&obj->opo_lock);
507 spin_unlock(&obj->opo_lock);
509 update = osp_update_request_create(dev);
511 RETURN(PTR_ERR(update));
513 rc = osp_update_rpc_pack(env, attr_get, update, OUT_ATTR_GET,
514 lu_object_fid(&dt->do_lu));
516 CERROR("%s: Insert update error "DFID": rc = %d\n",
517 dev->dd_lu_dev.ld_obd->obd_name,
518 PFID(lu_object_fid(&dt->do_lu)), rc);
523 rc = osp_remote_sync(env, osp, update, &req);
526 osp2lu_obj(obj)->lo_header->loh_attr &= ~LOHA_EXISTS;
527 obj->opo_non_exist = 1;
529 CERROR("%s:osp_attr_get update error "DFID": rc = %d\n",
530 dev->dd_lu_dev.ld_obd->obd_name,
531 PFID(lu_object_fid(&dt->do_lu)), rc);
537 osp2lu_obj(obj)->lo_header->loh_attr |= LOHA_EXISTS;
538 obj->opo_non_exist = 0;
539 reply = req_capsule_server_sized_get(&req->rq_pill,
540 &RMF_OUT_UPDATE_REPLY,
541 OUT_UPDATE_REPLY_SIZE);
542 if (reply == NULL || reply->ourp_magic != UPDATE_REPLY_MAGIC)
543 GOTO(out, rc = -EPROTO);
545 rc = osp_get_attr_from_reply(env, reply, req, attr, obj, 0);
549 spin_lock(&obj->opo_lock);
551 spin_unlock(&obj->opo_lock);
557 ptlrpc_req_finished(req);
559 osp_update_request_destroy(env, update);
565 * Implement OSP layer dt_object_operations::do_declare_attr_set() interface.
567 * If the transaction is not remote one, then declare the credits that will
568 * be used for the subsequent llog record for the object's attributes.
570 * \param[in] env pointer to the thread context
571 * \param[in] dt pointer to the OSP layer dt_object
572 * \param[in] attr pointer to the attribute to be set
573 * \param[in] th pointer to the transaction handler
575 * \retval 0 for success
576 * \retval negative error number on failure
578 static int osp_declare_attr_set(const struct lu_env *env, struct dt_object *dt,
579 const struct lu_attr *attr, struct thandle *th)
581 struct osp_device *d = lu2osp_dev(dt->do_lu.lo_dev);
582 struct osp_object *o = dt2osp_obj(dt);
585 if (is_only_remote_trans(th))
586 return osp_md_declare_attr_set(env, dt, attr, th);
588 * Usually we don't allow server stack to manipulate size
589 * but there is a special case when striping is created
590 * late, after stripeless file got truncated to non-zero.
592 * In this case we do the following:
594 * 1) grab id in declare - this can lead to leaked OST objects
595 * but we don't currently have proper mechanism and the only
596 * options we have are to do truncate RPC holding transaction
597 * open (very bad) or to grab id in declare at cost of leaked
598 * OST object in same very rare unfortunate case (just bad)
599 * notice 1.6-2.0 do assignment outside of running transaction
600 * all the time, meaning many more chances for leaked objects.
602 * 2) send synchronous truncate RPC with just assigned id
605 /* there are few places in MDD code still passing NULL
606 * XXX: to be fixed soon */
610 if (attr->la_valid & LA_SIZE && attr->la_size > 0 &&
611 fid_is_zero(lu_object_fid(&o->opo_obj.do_lu))) {
612 LASSERT(!dt_object_exists(dt));
613 osp_object_assign_fid(env, d, o);
614 rc = osp_object_truncate(env, dt, attr->la_size);
619 if (!(attr->la_valid & (LA_UID | LA_GID)))
622 /* track all UID/GID changes via llog */
623 rc = osp_sync_declare_add(env, o, MDS_SETATTR64_REC, th);
629 * Implement OSP layer dt_object_operations::do_attr_set() interface.
631 * Set attribute to the specified OST object.
633 * If the transaction is a remote one, then add OUT_ATTR_SET sub-request
634 * in the OUT RPC that will be flushed when the remote transaction stop.
635 * Otherwise, it will generate a MDS_SETATTR64_REC record in the llog that
636 * will be handled by a dedicated thread asynchronously.
638 * If the attribute entry exists in the OSP object attributes cache,
639 * then update the cached attribute according to given attribute.
641 * \param[in] env pointer to the thread context
642 * \param[in] dt pointer to the OSP layer dt_object
643 * \param[in] attr pointer to the attribute to be set
644 * \param[in] th pointer to the transaction handler
646 * \retval 0 for success
647 * \retval negative error number on failure
649 static int osp_attr_set(const struct lu_env *env, struct dt_object *dt,
650 const struct lu_attr *attr, struct thandle *th)
652 struct osp_object *o = dt2osp_obj(dt);
656 /* we're interested in uid/gid changes only */
657 if (!(attr->la_valid & (LA_UID | LA_GID)))
660 if (!is_only_remote_trans(th)) {
661 rc = osp_sync_add(env, o, MDS_SETATTR64_REC, th, attr);
662 /* XXX: send new uid/gid to OST ASAP? */
666 /* It is for OST-object attr_set directly without updating
667 * local MDT-object attribute. It is usually used by LFSCK. */
668 rc = osp_md_attr_set(env, dt, attr, th);
669 CDEBUG(D_INFO, "(1) set attr "DFID": rc = %d\n",
670 PFID(&dt->do_lu.lo_header->loh_fid), rc);
675 /* Update the OSP object attributes cache. */
677 spin_lock(&o->opo_lock);
678 if (attr->la_valid & LA_UID) {
679 la->la_uid = attr->la_uid;
680 la->la_valid |= LA_UID;
683 if (attr->la_valid & LA_GID) {
684 la->la_gid = attr->la_gid;
685 la->la_valid |= LA_GID;
687 spin_unlock(&o->opo_lock);
694 * Interpreter function for getting OSP object extended attribute asynchronously
696 * Called to interpret the result of an async mode RPC for getting the
697 * OSP object extended attribute.
699 * \param[in] env pointer to the thread context
700 * \param[in] reply pointer to the RPC reply
701 * \param[in] req pointer to the RPC request
702 * \param[in] obj pointer to the OSP object
703 * \param[out] data pointer to OSP object attributes cache
704 * \param[in] index the index of the attribute buffer in the reply
705 * \param[in] rc the result for handling the RPC
707 * \retval 0 for success
708 * \retval negative error number on failure
710 static int osp_xattr_get_interpterer(const struct lu_env *env,
711 struct object_update_reply *reply,
712 struct ptlrpc_request *req,
713 struct osp_object *obj,
714 void *data, int index, int rc)
716 struct osp_xattr_entry *oxe = data;
717 struct lu_buf *rbuf = &osp_env_info(env)->osi_lb2;
720 size_t len = sizeof(*oxe) + oxe->oxe_namelen + 1;
722 rc = object_update_result_data_get(reply, rbuf, index);
723 if (rc < 0 || rbuf->lb_len > (oxe->oxe_buflen - len)) {
724 spin_lock(&obj->opo_lock);
726 spin_unlock(&obj->opo_lock);
727 osp_oac_xattr_put(oxe);
729 return rc < 0 ? rc : -ERANGE;
732 spin_lock(&obj->opo_lock);
733 oxe->oxe_vallen = rbuf->lb_len;
734 memcpy(oxe->oxe_value, rbuf->lb_buf, rbuf->lb_len);
737 spin_unlock(&obj->opo_lock);
738 } else if (rc == -ENOENT || rc == -ENODATA) {
739 spin_lock(&obj->opo_lock);
742 spin_unlock(&obj->opo_lock);
744 spin_lock(&obj->opo_lock);
746 spin_unlock(&obj->opo_lock);
749 osp_oac_xattr_put(oxe);
755 * Implement OSP dt_object_operations::do_declare_xattr_get() interface.
757 * Declare that the caller will get extended attribute from the specified
760 * This function will add an OUT_XATTR_GET sub-request to the per OSP
761 * based shared asynchronous request queue with the interpreter function:
762 * osp_xattr_get_interpterer().
764 * \param[in] env pointer to the thread context
765 * \param[in] dt pointer to the OSP layer dt_object
766 * \param[out] buf pointer to the lu_buf to hold the extended attribute
767 * \param[in] name the name for the expected extended attribute
769 * \retval 0 for success
770 * \retval negative error number on failure
772 static int osp_declare_xattr_get(const struct lu_env *env, struct dt_object *dt,
773 struct lu_buf *buf, const char *name)
775 struct osp_object *obj = dt2osp_obj(dt);
776 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
777 struct osp_xattr_entry *oxe;
778 __u16 namelen = strlen(name);
781 LASSERT(buf != NULL);
782 LASSERT(name != NULL);
784 /* If only for xattr size, return directly. */
785 if (unlikely(buf->lb_len == 0))
788 oxe = osp_oac_xattr_find_or_add(obj, name, buf->lb_len);
792 mutex_lock(&osp->opd_async_requests_mutex);
793 rc = osp_insert_async_request(env, OUT_XATTR_GET, obj, 1,
794 &namelen, (const void **)&name,
796 osp_xattr_get_interpterer);
798 mutex_unlock(&osp->opd_async_requests_mutex);
799 osp_oac_xattr_put(oxe);
801 struct osp_update_request *our;
802 struct osp_update_request_sub *ours;
804 /* XXX: Currently, we trigger the batched async OUT
805 * RPC via dt_declare_xattr_get(). It is not
806 * perfect solution, but works well now.
808 * We will improve it in the future. */
809 our = osp->opd_async_requests;
810 ours = osp_current_object_update_request(our);
811 if (ours != NULL && ours->ours_req != NULL &&
812 ours->ours_req->ourq_count > 0) {
813 osp->opd_async_requests = NULL;
814 mutex_unlock(&osp->opd_async_requests_mutex);
815 rc = osp_unplug_async_request(env, osp, our);
817 mutex_unlock(&osp->opd_async_requests_mutex);
825 * Implement OSP layer dt_object_operations::do_xattr_get() interface.
827 * Get extended attribute from the specified MDT/OST object.
829 * If the extended attribute is in the OSP object attributes cache, then
830 * return the cached extended attribute directly. Otherwise it will get
831 * the extended attribute synchronously, if successful, add it to the OSP
832 * attributes cache. (\see lustre/osp/osp_trans.c for OUT RPC.)
834 * There is a race condition: some other thread has added the named extended
835 * attributed entry to the OSP object attributes cache during the current
836 * OUT_XATTR_GET handling. If such case happens, the OSP will replace the
837 * (just) existing extended attribute entry with the new replied one.
839 * \param[in] env pointer to the thread context
840 * \param[in] dt pointer to the OSP layer dt_object
841 * \param[out] buf pointer to the lu_buf to hold the extended attribute
842 * \param[in] name the name for the expected extended attribute
844 * \retval 0 for success
845 * \retval negative error number on failure
847 int osp_xattr_get(const struct lu_env *env, struct dt_object *dt,
848 struct lu_buf *buf, const char *name)
850 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
851 struct osp_object *obj = dt2osp_obj(dt);
852 struct dt_device *dev = &osp->opd_dt_dev;
853 struct lu_buf *rbuf = &osp_env_info(env)->osi_lb2;
854 struct osp_update_request *update = NULL;
855 struct ptlrpc_request *req = NULL;
856 struct object_update_reply *reply;
857 struct osp_xattr_entry *oxe = NULL;
858 const char *dname = dt->do_lu.lo_dev->ld_obd->obd_name;
862 LASSERT(buf != NULL);
863 LASSERT(name != NULL);
865 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_NETWORK) &&
866 osp->opd_index == cfs_fail_val) {
867 if (is_ost_obj(&dt->do_lu)) {
868 if (osp_dev2node(osp) == cfs_fail_val)
871 if (strcmp(name, XATTR_NAME_LINK) == 0)
876 if (unlikely(obj->opo_non_exist))
879 oxe = osp_oac_xattr_find(obj, name, false);
881 spin_lock(&obj->opo_lock);
882 if (oxe->oxe_ready) {
884 GOTO(unlock, rc = -ENODATA);
886 if (buf->lb_buf == NULL)
887 GOTO(unlock, rc = oxe->oxe_vallen);
889 if (buf->lb_len < oxe->oxe_vallen)
890 GOTO(unlock, rc = -ERANGE);
892 memcpy(buf->lb_buf, oxe->oxe_value,
895 GOTO(unlock, rc = oxe->oxe_vallen);
898 spin_unlock(&obj->opo_lock);
899 osp_oac_xattr_put(oxe);
903 spin_unlock(&obj->opo_lock);
905 update = osp_update_request_create(dev);
907 GOTO(out, rc = PTR_ERR(update));
909 rc = osp_update_rpc_pack(env, xattr_get, update, OUT_XATTR_GET,
910 lu_object_fid(&dt->do_lu), name, buf->lb_len);
912 CERROR("%s: Insert update error "DFID": rc = %d\n",
913 dname, PFID(lu_object_fid(&dt->do_lu)), rc);
917 rc = osp_remote_sync(env, osp, update, &req);
920 dt->do_lu.lo_header->loh_attr &= ~LOHA_EXISTS;
921 obj->opo_non_exist = 1;
925 oxe = osp_oac_xattr_find_or_add(obj, name, buf->lb_len);
928 CWARN("%s: Fail to add xattr (%s) to cache for "
929 DFID" (1): rc = %d\n", dname, name,
930 PFID(lu_object_fid(&dt->do_lu)), rc);
935 spin_lock(&obj->opo_lock);
936 if (rc == -ENOENT || rc == -ENODATA) {
942 spin_unlock(&obj->opo_lock);
947 reply = req_capsule_server_sized_get(&req->rq_pill,
948 &RMF_OUT_UPDATE_REPLY,
949 OUT_UPDATE_REPLY_SIZE);
950 if (reply->ourp_magic != UPDATE_REPLY_MAGIC) {
951 CERROR("%s: Wrong version %x expected %x "DFID": rc = %d\n",
952 dname, reply->ourp_magic, UPDATE_REPLY_MAGIC,
953 PFID(lu_object_fid(&dt->do_lu)), -EPROTO);
955 GOTO(out, rc = -EPROTO);
958 rc = object_update_result_data_get(reply, rbuf, 0);
962 if (buf->lb_buf == NULL)
965 if (unlikely(buf->lb_len < rbuf->lb_len))
966 GOTO(out, rc = -ERANGE);
968 memcpy(buf->lb_buf, rbuf->lb_buf, rbuf->lb_len);
971 oxe = osp_oac_xattr_find_or_add(obj, name, rbuf->lb_len);
973 CWARN("%s: Fail to add xattr (%s) to "
974 "cache for "DFID" (2): rc = %d\n",
975 dname, name, PFID(lu_object_fid(&dt->do_lu)), rc);
981 if (oxe->oxe_buflen - oxe->oxe_namelen - 1 < rbuf->lb_len) {
982 struct osp_xattr_entry *old = oxe;
983 struct osp_xattr_entry *tmp;
985 tmp = osp_oac_xattr_replace(obj, &old, rbuf->lb_len);
986 osp_oac_xattr_put(oxe);
989 CWARN("%s: Fail to update xattr (%s) to "
990 "cache for "DFID": rc = %d\n",
991 dname, name, PFID(lu_object_fid(&dt->do_lu)), rc);
992 spin_lock(&obj->opo_lock);
994 spin_unlock(&obj->opo_lock);
999 /* Drop the ref for entry on list. */
1000 osp_oac_xattr_put(old);
1003 spin_lock(&obj->opo_lock);
1004 oxe->oxe_vallen = rbuf->lb_len;
1005 memcpy(oxe->oxe_value, rbuf->lb_buf, rbuf->lb_len);
1008 spin_unlock(&obj->opo_lock);
1014 ptlrpc_req_finished(req);
1016 if (update != NULL && !IS_ERR(update))
1017 osp_update_request_destroy(env, update);
1020 osp_oac_xattr_put(oxe);
1026 * Implement OSP layer dt_object_operations::do_declare_xattr_set() interface.
1028 * Declare that the caller will set extended attribute to the specified
1031 * If it is non-remote transaction, it will add an OUT_XATTR_SET sub-request
1032 * to the OUT RPC that will be flushed when the transaction start. And if the
1033 * OSP attributes cache is initialized, then check whether the name extended
1034 * attribute entry exists in the cache or not. If yes, replace it; otherwise,
1035 * add the extended attribute to the cache.
1037 * \param[in] env pointer to the thread context
1038 * \param[in] dt pointer to the OSP layer dt_object
1039 * \param[in] buf pointer to the lu_buf to hold the extended attribute
1040 * \param[in] name the name of the extended attribute to be set
1041 * \param[in] flag to indicate the detailed set operation: LU_XATTR_CREATE
1042 * or LU_XATTR_REPLACE or others
1043 * \param[in] th pointer to the transaction handler
1045 * \retval 0 for success
1046 * \retval negative error number on failure
1048 int osp_declare_xattr_set(const struct lu_env *env, struct dt_object *dt,
1049 const struct lu_buf *buf, const char *name,
1050 int flag, struct thandle *th)
1052 return osp_trans_update_request_create(th);
1056 * Implement OSP layer dt_object_operations::do_xattr_set() interface.
1058 * Set extended attribute to the specified MDT/OST object.
1060 * Add an OUT_XATTR_SET sub-request into the OUT RPC that will be flushed in
1061 * the transaction stop. And if the OSP attributes cache is initialized, then
1062 * check whether the name extended attribute entry exists in the cache or not.
1063 * If yes, replace it; otherwise, add the extended attribute to the cache.
1065 * \param[in] env pointer to the thread context
1066 * \param[in] dt pointer to the OSP layer dt_object
1067 * \param[in] buf pointer to the lu_buf to hold the extended attribute
1068 * \param[in] name the name of the extended attribute to be set
1069 * \param[in] fl to indicate the detailed set operation: LU_XATTR_CREATE
1070 * or LU_XATTR_REPLACE or others
1071 * \param[in] th pointer to the transaction handler
1073 * \retval 0 for success
1074 * \retval negative error number on failure
1076 int osp_xattr_set(const struct lu_env *env, struct dt_object *dt,
1077 const struct lu_buf *buf, const char *name, int fl,
1080 struct osp_object *o = dt2osp_obj(dt);
1081 struct osp_update_request *update;
1082 struct osp_xattr_entry *oxe;
1086 update = thandle_to_osp_update_request(th);
1087 LASSERT(update != NULL);
1089 CDEBUG(D_INODE, DFID" set xattr '%s' with size %zd\n",
1090 PFID(lu_object_fid(&dt->do_lu)), name, buf->lb_len);
1092 rc = osp_update_rpc_pack(env, xattr_set, update, OUT_XATTR_SET,
1093 lu_object_fid(&dt->do_lu), buf, name, fl);
1097 oxe = osp_oac_xattr_find_or_add(o, name, buf->lb_len);
1099 CWARN("%s: cannot cache xattr '%s' of "DFID"\n",
1100 dt->do_lu.lo_dev->ld_obd->obd_name,
1101 name, PFID(lu_object_fid(&dt->do_lu)));
1106 if (oxe->oxe_buflen - oxe->oxe_namelen - 1 < buf->lb_len) {
1107 struct osp_xattr_entry *old = oxe;
1108 struct osp_xattr_entry *tmp;
1110 tmp = osp_oac_xattr_replace(o, &old, buf->lb_len);
1111 osp_oac_xattr_put(oxe);
1114 CWARN("%s: cannot update cached xattr '%s' of "DFID"\n",
1115 dt->do_lu.lo_dev->ld_obd->obd_name,
1116 name, PFID(lu_object_fid(&dt->do_lu)));
1117 spin_lock(&o->opo_lock);
1119 spin_unlock(&o->opo_lock);
1124 /* Drop the ref for entry on list. */
1125 osp_oac_xattr_put(old);
1128 spin_lock(&o->opo_lock);
1129 oxe->oxe_vallen = buf->lb_len;
1130 memcpy(oxe->oxe_value, buf->lb_buf, buf->lb_len);
1133 spin_unlock(&o->opo_lock);
1134 osp_oac_xattr_put(oxe);
1140 * Implement OSP layer dt_object_operations::do_declare_xattr_del() interface.
1142 * Declare that the caller will delete extended attribute on the specified
1145 * If it is non-remote transaction, it will add an OUT_XATTR_DEL sub-request
1146 * to the OUT RPC that will be flushed when the transaction start. And if the
1147 * name extended attribute entry exists in the OSP attributes cache, then remove
1148 * it from the cache.
1150 * \param[in] env pointer to the thread context
1151 * \param[in] dt pointer to the OSP layer dt_object
1152 * \param[in] name the name of the extended attribute to be set
1153 * \param[in] th pointer to the transaction handler
1155 * \retval 0 for success
1156 * \retval negative error number on failure
1158 int osp_declare_xattr_del(const struct lu_env *env, struct dt_object *dt,
1159 const char *name, struct thandle *th)
1161 return osp_trans_update_request_create(th);
1165 * Implement OSP layer dt_object_operations::do_xattr_del() interface.
1167 * Delete extended attribute on the specified MDT/OST object.
1169 * If it is remote transaction, it will add an OUT_XATTR_DEL sub-request into
1170 * the OUT RPC that will be flushed when the transaction stop. And if the name
1171 * extended attribute entry exists in the OSP attributes cache, then remove it
1174 * \param[in] env pointer to the thread context
1175 * \param[in] dt pointer to the OSP layer dt_object
1176 * \param[in] name the name of the extended attribute to be set
1177 * \param[in] th pointer to the transaction handler
1179 * \retval 0 for success
1180 * \retval negative error number on failure
1182 int osp_xattr_del(const struct lu_env *env, struct dt_object *dt,
1183 const char *name, struct thandle *th)
1185 struct osp_update_request *update;
1186 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1187 struct osp_object *o = dt2osp_obj(dt);
1188 struct osp_xattr_entry *oxe;
1191 update = thandle_to_osp_update_request(th);
1192 LASSERT(update != NULL);
1194 rc = osp_update_rpc_pack(env, xattr_del, update, OUT_XATTR_DEL,
1199 oxe = osp_oac_xattr_find(o, name, true);
1201 /* Drop the ref for entry on list. */
1202 osp_oac_xattr_put(oxe);
1208 * Implement OSP layer dt_object_operations::do_invalidate() interface.
1210 * Invalidate attributes cached on the specified MDT/OST object.
1212 * \param[in] env pointer to the thread context
1213 * \param[in] dt pointer to the OSP layer dt_object
1215 * \retval 0 for success
1216 * \retval negative error number on failure
1218 int osp_invalidate(const struct lu_env *env, struct dt_object *dt)
1220 struct osp_object *obj = dt2osp_obj(dt);
1221 struct osp_xattr_entry *oxe;
1222 struct osp_xattr_entry *tmp;
1225 spin_lock(&obj->opo_lock);
1226 list_for_each_entry_safe(oxe, tmp, &obj->opo_xattr_list, oxe_list) {
1228 list_del_init(&oxe->oxe_list);
1229 osp_oac_xattr_put(oxe);
1231 obj->opo_attr.la_valid = 0;
1233 spin_unlock(&obj->opo_lock);
1239 * Implement OSP layer dt_object_operations::do_declare_create() interface.
1241 * Declare that the caller will create the OST object.
1243 * If the transaction is a remote transaction and the FID for the OST-object
1244 * has been assigned already, then handle it as creating (remote) MDT object
1245 * via osp_md_declare_object_create(). This function is usually used for LFSCK
1246 * to re-create the lost OST object. Otherwise, if it is not replay case, the
1247 * OSP will reserve pre-created object for the subsequent create operation;
1248 * if the MDT side cached pre-created objects are less than some threshold,
1249 * then it will wakeup the pre-create thread.
1251 * \param[in] env pointer to the thread context
1252 * \param[in] dt pointer to the OSP layer dt_object
1253 * \param[in] attr the attribute for the object to be created
1254 * \param[in] hint pointer to the hint for creating the object, such as
1256 * \param[in] dof pointer to the dt_object_format for help the creation
1257 * \param[in] th pointer to the transaction handler
1259 * \retval 0 for success
1260 * \retval negative error number on failure
1262 static int osp_declare_object_create(const struct lu_env *env,
1263 struct dt_object *dt,
1264 struct lu_attr *attr,
1265 struct dt_allocation_hint *hint,
1266 struct dt_object_format *dof,
1269 struct osp_thread_info *osi = osp_env_info(env);
1270 struct osp_device *d = lu2osp_dev(dt->do_lu.lo_dev);
1271 struct osp_object *o = dt2osp_obj(dt);
1272 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
1273 struct thandle *local_th;
1278 if (is_only_remote_trans(th) && !fid_is_zero(fid)) {
1279 LASSERT(fid_is_sane(fid));
1281 rc = osp_md_declare_object_create(env, dt, attr, hint, dof, th);
1286 /* should happen to non-0 OSP only so that at least one object
1287 * has been already declared in the scenario and LOD should
1289 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OSC_CREATE_FAIL) && d->opd_index == 1)
1292 LASSERT(d->opd_last_used_oid_file);
1295 * There can be gaps in precreated ids and record to unlink llog
1296 * XXX: we do not handle gaps yet, implemented before solution
1297 * was found to be racy, so we disabled that. there is no
1298 * point in making useless but expensive llog declaration.
1300 /* rc = osp_sync_declare_add(env, o, MDS_UNLINK64_REC, th); */
1302 local_th = osp_get_storage_thandle(env, th, d);
1303 if (IS_ERR(local_th))
1304 RETURN(PTR_ERR(local_th));
1306 if (unlikely(!fid_is_zero(fid))) {
1307 /* replay case: caller knows fid */
1308 osi->osi_off = sizeof(osi->osi_id) * d->opd_index;
1309 osi->osi_lb.lb_len = sizeof(osi->osi_id);
1310 osi->osi_lb.lb_buf = NULL;
1312 rc = dt_declare_record_write(env, d->opd_last_used_oid_file,
1313 &osi->osi_lb, osi->osi_off,
1319 * in declaration we need to reserve object so that we don't block
1320 * awaiting precreation RPC to complete
1322 rc = osp_precreate_reserve(env, d);
1324 * we also need to declare update to local "last used id" file for
1325 * recovery if object isn't used for a reason, we need to release
1326 * reservation, this can be made in osd_object_release()
1329 /* mark id is reserved: in create we don't want to talk
1331 LASSERT(o->opo_reserved == 0);
1332 o->opo_reserved = 1;
1334 /* common for all OSPs file hystorically */
1335 osi->osi_off = sizeof(osi->osi_id) * d->opd_index;
1336 osi->osi_lb.lb_len = sizeof(osi->osi_id);
1337 osi->osi_lb.lb_buf = NULL;
1338 rc = dt_declare_record_write(env, d->opd_last_used_oid_file,
1339 &osi->osi_lb, osi->osi_off,
1342 /* not needed in the cache anymore */
1343 set_bit(LU_OBJECT_HEARD_BANSHEE,
1344 &dt->do_lu.lo_header->loh_flags);
1350 * Implement OSP layer dt_object_operations::do_create() interface.
1352 * Create the OST object.
1354 * If the transaction is a remote transaction and the FID for the OST-object
1355 * has been assigned already, then handle it as handling MDT object via the
1356 * osp_md_object_create(). For other cases, the OSP will assign FID to the
1357 * object to be created, and update last_used Object ID (OID) file.
1359 * \param[in] env pointer to the thread context
1360 * \param[in] dt pointer to the OSP layer dt_object
1361 * \param[in] attr the attribute for the object to be created
1362 * \param[in] hint pointer to the hint for creating the object, such as
1364 * \param[in] dof pointer to the dt_object_format for help the creation
1365 * \param[in] th pointer to the transaction handler
1367 * \retval 0 for success
1368 * \retval negative error number on failure
1370 static int osp_object_create(const struct lu_env *env, struct dt_object *dt,
1371 struct lu_attr *attr,
1372 struct dt_allocation_hint *hint,
1373 struct dt_object_format *dof, struct thandle *th)
1375 struct osp_thread_info *osi = osp_env_info(env);
1376 struct osp_device *d = lu2osp_dev(dt->do_lu.lo_dev);
1377 struct osp_object *o = dt2osp_obj(dt);
1379 struct lu_fid *fid = &osi->osi_fid;
1380 struct thandle *local_th;
1383 if (is_only_remote_trans(th) &&
1384 !fid_is_zero(lu_object_fid(&dt->do_lu))) {
1385 LASSERT(fid_is_sane(lu_object_fid(&dt->do_lu)));
1387 rc = osp_md_object_create(env, dt, attr, hint, dof, th);
1389 o->opo_non_exist = 0;
1394 o->opo_non_exist = 0;
1395 if (o->opo_reserved) {
1396 /* regular case, fid is assigned holding transaction open */
1397 osp_object_assign_fid(env, d, o);
1400 memcpy(fid, lu_object_fid(&dt->do_lu), sizeof(*fid));
1402 LASSERTF(fid_is_sane(fid), "fid for osp_object %p is insane"DFID"!\n",
1405 if (!o->opo_reserved) {
1406 /* special case, id was assigned outside of transaction
1407 * see comments in osp_declare_attr_set */
1408 LASSERT(d->opd_pre != NULL);
1409 spin_lock(&d->opd_pre_lock);
1410 osp_update_last_fid(d, fid);
1411 spin_unlock(&d->opd_pre_lock);
1414 CDEBUG(D_INODE, "fid for osp_object %p is "DFID"\n", o, PFID(fid));
1416 /* If the precreate ends, it means it will be ready to rollover to
1417 * the new sequence soon, all the creation should be synchronized,
1418 * otherwise during replay, the replay fid will be inconsistent with
1419 * last_used/create fid */
1420 if (osp_precreate_end_seq(env, d) && osp_is_fid_client(d))
1423 local_th = osp_get_storage_thandle(env, th, d);
1424 if (IS_ERR(local_th))
1425 RETURN(PTR_ERR(local_th));
1427 * it's OK if the import is inactive by this moment - id was created
1428 * by OST earlier, we just need to maintain it consistently on the disk
1429 * once import is reconnected, OSP will claim this and other objects
1430 * used and OST either keep them, if they exist or recreate
1433 /* we might have lost precreated objects */
1434 if (unlikely(d->opd_gap_count) > 0) {
1435 LASSERT(d->opd_pre != NULL);
1436 spin_lock(&d->opd_pre_lock);
1437 if (d->opd_gap_count > 0) {
1438 int count = d->opd_gap_count;
1440 ostid_set_id(&osi->osi_oi,
1441 fid_oid(&d->opd_gap_start_fid));
1442 d->opd_gap_count = 0;
1443 spin_unlock(&d->opd_pre_lock);
1445 CDEBUG(D_HA, "Writing gap "DFID"+%d in llog\n",
1446 PFID(&d->opd_gap_start_fid), count);
1447 /* real gap handling is disabled intil ORI-692 will be
1448 * fixed, now we only report gaps */
1450 spin_unlock(&d->opd_pre_lock);
1454 /* Only need update last_used oid file, seq file will only be update
1455 * during seq rollover */
1456 osp_objid_buf_prep(&osi->osi_lb, &osi->osi_off,
1457 &d->opd_last_used_fid.f_oid, d->opd_index);
1459 rc = dt_record_write(env, d->opd_last_used_oid_file, &osi->osi_lb,
1460 &osi->osi_off, local_th);
1462 CDEBUG(D_HA, "%s: Wrote last used FID: "DFID", index %d: %d\n",
1463 d->opd_obd->obd_name, PFID(fid), d->opd_index, rc);
1469 * Implement OSP layer dt_object_operations::do_declare_destroy() interface.
1471 * Declare that the caller will destroy the specified OST object.
1473 * The OST object destroy will be handled via llog asynchronously. This
1474 * function will declare the credits for generating MDS_UNLINK64_REC llog.
1476 * \param[in] env pointer to the thread context
1477 * \param[in] dt pointer to the OSP layer dt_object to be destroyed
1478 * \param[in] th pointer to the transaction handler
1480 * \retval 0 for success
1481 * \retval negative error number on failure
1483 int osp_declare_object_destroy(const struct lu_env *env,
1484 struct dt_object *dt, struct thandle *th)
1486 struct osp_object *o = dt2osp_obj(dt);
1487 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
1492 LASSERT(!osp->opd_connect_mdt);
1493 rc = osp_sync_declare_add(env, o, MDS_UNLINK64_REC, th);
1499 * Implement OSP layer dt_object_operations::do_destroy() interface.
1501 * Destroy the specified OST object.
1503 * The OSP generates a MDS_UNLINK64_REC record in the llog. There
1504 * will be some dedicated thread to handle the llog asynchronously.
1506 * It also marks the object as non-cached.
1508 * \param[in] env pointer to the thread context
1509 * \param[in] dt pointer to the OSP layer dt_object to be destroyed
1510 * \param[in] th pointer to the transaction handler
1512 * \retval 0 for success
1513 * \retval negative error number on failure
1515 static int osp_object_destroy(const struct lu_env *env, struct dt_object *dt,
1518 struct osp_object *o = dt2osp_obj(dt);
1519 struct osp_device *osp = lu2osp_dev(dt->do_lu.lo_dev);
1524 o->opo_non_exist = 1;
1526 LASSERT(!osp->opd_connect_mdt);
1527 /* once transaction is committed put proper command on
1528 * the queue going to our OST. */
1529 rc = osp_sync_add(env, o, MDS_UNLINK64_REC, th, NULL);
1533 /* not needed in cache any more */
1534 set_bit(LU_OBJECT_HEARD_BANSHEE, &dt->do_lu.lo_header->loh_flags);
1539 static int osp_orphan_index_lookup(const struct lu_env *env,
1540 struct dt_object *dt,
1542 const struct dt_key *key)
1547 static int osp_orphan_index_declare_insert(const struct lu_env *env,
1548 struct dt_object *dt,
1549 const struct dt_rec *rec,
1550 const struct dt_key *key,
1551 struct thandle *handle)
1556 static int osp_orphan_index_insert(const struct lu_env *env,
1557 struct dt_object *dt,
1558 const struct dt_rec *rec,
1559 const struct dt_key *key,
1560 struct thandle *handle,
1566 static int osp_orphan_index_declare_delete(const struct lu_env *env,
1567 struct dt_object *dt,
1568 const struct dt_key *key,
1569 struct thandle *handle)
1574 static int osp_orphan_index_delete(const struct lu_env *env,
1575 struct dt_object *dt,
1576 const struct dt_key *key,
1577 struct thandle *handle)
1583 * Initialize the OSP layer index iteration.
1585 * \param[in] env pointer to the thread context
1586 * \param[in] dt pointer to the index object to be iterated
1587 * \param[in] attr unused
1589 * \retval pointer to the iteration structure
1590 * \retval negative error number on failure
1592 struct dt_it *osp_it_init(const struct lu_env *env, struct dt_object *dt,
1599 return ERR_PTR(-ENOMEM);
1601 it->ooi_pos_ent = -1;
1603 it->ooi_attr = attr;
1605 return (struct dt_it *)it;
1609 * Finalize the OSP layer index iteration.
1611 * \param[in] env pointer to the thread context
1612 * \param[in] di pointer to the iteration structure
1614 void osp_it_fini(const struct lu_env *env, struct dt_it *di)
1616 struct osp_it *it = (struct osp_it *)di;
1617 struct page **pages = it->ooi_pages;
1618 int npages = it->ooi_total_npages;
1621 if (pages != NULL) {
1622 for (i = 0; i < npages; i++) {
1623 if (pages[i] != NULL) {
1624 if (pages[i] == it->ooi_cur_page) {
1626 it->ooi_cur_page = NULL;
1628 __free_page(pages[i]);
1631 OBD_FREE(pages, npages * sizeof(*pages));
1637 * Get more records for the iteration from peer.
1639 * The new records will be filled in an array of pages. The OSP side
1640 * allows 1MB bulk data to be transferred.
1642 * \param[in] env pointer to the thread context
1643 * \param[in] it pointer to the iteration structure
1645 * \retval 0 for success
1646 * \retval negative error number on failure
1648 static int osp_it_fetch(const struct lu_env *env, struct osp_it *it)
1650 struct lu_device *dev = it->ooi_obj->do_lu.lo_dev;
1651 struct osp_device *osp = lu2osp_dev(dev);
1652 struct page **pages;
1653 struct lu_device *top_device;
1654 struct ptlrpc_request *req = NULL;
1655 struct ptlrpc_bulk_desc *desc;
1656 struct idx_info *ii;
1663 npages = min_t(unsigned int, OFD_MAX_BRW_SIZE, 1 << 20);
1664 npages /= PAGE_CACHE_SIZE;
1666 OBD_ALLOC(pages, npages * sizeof(*pages));
1670 it->ooi_pages = pages;
1671 it->ooi_total_npages = npages;
1672 for (i = 0; i < npages; i++) {
1673 pages[i] = alloc_page(GFP_IOFS);
1674 if (pages[i] == NULL)
1678 req = ptlrpc_request_alloc(osp->opd_obd->u.cli.cl_import,
1683 rc = ptlrpc_request_pack(req, LUSTRE_OBD_VERSION, OBD_IDX_READ);
1685 ptlrpc_request_free(req);
1689 /* Let's allow this request during recovery, otherwise
1690 * if the remote target is also in recovery status,
1691 * it might cause deadlock */
1692 top_device = dev->ld_site->ls_top_dev;
1693 if (top_device->ld_obd->obd_recovering)
1694 req->rq_allow_replay = 1;
1696 req->rq_request_portal = OUT_PORTAL;
1697 ii = req_capsule_client_get(&req->rq_pill, &RMF_IDX_INFO);
1698 memset(ii, 0, sizeof(*ii));
1699 if (fid_is_last_id(lu_object_fid(&it->ooi_obj->do_lu))) {
1700 /* LFSCK will iterate orphan object[FID_SEQ_LAYOUT_BTREE,
1701 * ost_index, 0] with LAST_ID FID, so it needs to replace
1702 * the FID with orphan FID here */
1703 ii->ii_fid.f_seq = FID_SEQ_LAYOUT_RBTREE;
1704 ii->ii_fid.f_oid = osp->opd_index;
1705 ii->ii_fid.f_ver = 0;
1706 ii->ii_flags = II_FL_NOHASH;
1707 ii->ii_attrs = osp_dev2node(osp);
1709 ii->ii_fid = *lu_object_fid(&it->ooi_obj->do_lu);
1710 ii->ii_flags = II_FL_NOHASH | II_FL_NOKEY | II_FL_VARKEY |
1712 ii->ii_attrs = it->ooi_attr;
1714 ii->ii_magic = IDX_INFO_MAGIC;
1715 ii->ii_count = npages * LU_PAGE_COUNT;
1716 ii->ii_hash_start = it->ooi_next;
1718 ptlrpc_at_set_req_timeout(req);
1720 desc = ptlrpc_prep_bulk_imp(req, npages, 1,
1721 PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
1723 &ptlrpc_bulk_kiov_pin_ops);
1725 ptlrpc_request_free(req);
1729 for (i = 0; i < npages; i++)
1730 desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0,
1733 ptlrpc_request_set_replen(req);
1734 rc = ptlrpc_queue_wait(req);
1738 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk,
1739 req->rq_bulk->bd_nob_transferred);
1744 ii = req_capsule_server_get(&req->rq_pill, &RMF_IDX_INFO);
1745 if (ii->ii_magic != IDX_INFO_MAGIC)
1746 GOTO(out, rc = -EPROTO);
1748 npages = (ii->ii_count + LU_PAGE_COUNT - 1) >>
1749 (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT);
1750 if (npages > it->ooi_total_npages) {
1751 CERROR("%s: returned more pages than expected, %u > %u\n",
1752 osp->opd_obd->obd_name, npages, it->ooi_total_npages);
1753 GOTO(out, rc = -EINVAL);
1756 it->ooi_valid_npages = npages;
1757 if (ptlrpc_rep_need_swab(req))
1760 it->ooi_next = ii->ii_hash_end;
1763 ptlrpc_req_finished(req);
1769 * Move the iteration cursor to the next lu_page.
1771 * One system page (PAGE_SIZE) may contain multiple lu_page (4KB),
1772 * that depends on the LU_PAGE_COUNT. If it is not the last lu_page
1773 * in current system page, then move the iteration cursor to the next
1774 * lu_page in current system page. Otherwise, if there are more system
1775 * pages in the cache, then move the iteration cursor to the next system
1776 * page. If all the cached records (pages) have been iterated, then fetch
1777 * more records via osp_it_fetch().
1779 * \param[in] env pointer to the thread context
1780 * \param[in] di pointer to the iteration structure
1782 * \retval positive for end of the directory
1783 * \retval 0 for success
1784 * \retval negative error number on failure
1786 int osp_it_next_page(const struct lu_env *env, struct dt_it *di)
1788 struct osp_it *it = (struct osp_it *)di;
1789 struct lu_idxpage *idxpage;
1790 struct page **pages;
1796 idxpage = it->ooi_cur_idxpage;
1797 if (idxpage != NULL) {
1798 if (idxpage->lip_nr == 0)
1801 if (it->ooi_pos_ent < idxpage->lip_nr) {
1802 CDEBUG(D_INFO, "ooi_pos %d nr %d\n",
1803 (int)it->ooi_pos_ent, (int)idxpage->lip_nr);
1806 it->ooi_cur_idxpage = NULL;
1807 it->ooi_pos_lu_page++;
1810 if (it->ooi_pos_lu_page < LU_PAGE_COUNT) {
1811 it->ooi_cur_idxpage = (void *)it->ooi_cur_page +
1812 LU_PAGE_SIZE * it->ooi_pos_lu_page;
1814 lustre_swab_lip_header(it->ooi_cur_idxpage);
1815 if (it->ooi_cur_idxpage->lip_magic != LIP_MAGIC) {
1816 struct osp_device *osp =
1817 lu2osp_dev(it->ooi_obj->do_lu.lo_dev);
1819 CERROR("%s: invalid magic (%x != %x) for page "
1820 "%d/%d while read layout orphan index\n",
1821 osp->opd_obd->obd_name,
1822 it->ooi_cur_idxpage->lip_magic,
1823 LIP_MAGIC, it->ooi_pos_page,
1824 it->ooi_pos_lu_page);
1825 /* Skip this lu_page next time. */
1826 it->ooi_pos_ent = idxpage->lip_nr - 1;
1829 it->ooi_pos_ent = -1;
1833 kunmap(it->ooi_cur_page);
1834 it->ooi_cur_page = NULL;
1838 pages = it->ooi_pages;
1839 if (it->ooi_pos_page < it->ooi_valid_npages) {
1840 it->ooi_cur_page = kmap(pages[it->ooi_pos_page]);
1841 it->ooi_pos_lu_page = 0;
1845 for (i = 0; i < it->ooi_total_npages; i++) {
1846 if (pages[i] != NULL)
1847 __free_page(pages[i]);
1849 OBD_FREE(pages, it->ooi_total_npages * sizeof(*pages));
1851 it->ooi_pos_page = 0;
1852 it->ooi_total_npages = 0;
1853 it->ooi_valid_npages = 0;
1856 it->ooi_cur_page = NULL;
1857 it->ooi_cur_idxpage = NULL;
1858 it->ooi_pages = NULL;
1861 if (it->ooi_next == II_END_OFF)
1864 rc = osp_it_fetch(env, it);
1872 * Move the iteration cursor to the next record.
1874 * If there are more records in the lu_page, then move the iteration
1875 * cursor to the next record directly. Otherwise, move the iteration
1876 * cursor to the record in the next lu_page via osp_it_next_page()
1878 * \param[in] env pointer to the thread context
1879 * \param[in] di pointer to the iteration structure
1881 * \retval positive for end of the directory
1882 * \retval 0 for success
1883 * \retval negative error number on failure
1885 static int osp_orphan_it_next(const struct lu_env *env, struct dt_it *di)
1887 struct osp_it *it = (struct osp_it *)di;
1888 struct lu_idxpage *idxpage;
1893 idxpage = it->ooi_cur_idxpage;
1894 if (idxpage != NULL) {
1895 if (idxpage->lip_nr == 0)
1899 if (it->ooi_pos_ent < idxpage->lip_nr) {
1901 (struct lu_orphan_ent *)idxpage->lip_entries +
1904 lustre_swab_orphan_ent(it->ooi_ent);
1909 rc = osp_it_next_page(env, di);
1916 int osp_it_get(const struct lu_env *env, struct dt_it *di,
1917 const struct dt_key *key)
1922 void osp_it_put(const struct lu_env *env, struct dt_it *di)
1926 static struct dt_key *osp_orphan_it_key(const struct lu_env *env,
1927 const struct dt_it *di)
1929 struct osp_it *it = (struct osp_it *)di;
1930 struct lu_orphan_ent *ent = (struct lu_orphan_ent *)it->ooi_ent;
1932 if (likely(ent != NULL))
1933 return (struct dt_key *)(&ent->loe_key);
1938 static int osp_orphan_it_key_size(const struct lu_env *env,
1939 const struct dt_it *di)
1941 return sizeof(struct lu_fid);
1944 static int osp_orphan_it_rec(const struct lu_env *env, const struct dt_it *di,
1945 struct dt_rec *rec, __u32 attr)
1947 struct osp_it *it = (struct osp_it *)di;
1948 struct lu_orphan_ent *ent = (struct lu_orphan_ent *)it->ooi_ent;
1950 if (likely(ent != NULL)) {
1951 *(struct lu_orphan_rec *)rec = ent->loe_rec;
1958 __u64 osp_it_store(const struct lu_env *env, const struct dt_it *di)
1960 struct osp_it *it = (struct osp_it *)di;
1962 return it->ooi_next;
1966 * Locate the iteration cursor to the specified position (cookie).
1968 * \param[in] env pointer to the thread context
1969 * \param[in] di pointer to the iteration structure
1970 * \param[in] hash the specified position
1972 * \retval positive number for locating to the exactly position
1974 * \retval 0 for arriving at the end of the iteration
1975 * \retval negative error number on failure
1977 int osp_orphan_it_load(const struct lu_env *env, const struct dt_it *di,
1980 struct osp_it *it = (struct osp_it *)di;
1983 it->ooi_next = hash;
1984 rc = osp_orphan_it_next(env, (struct dt_it *)di);
1994 int osp_it_key_rec(const struct lu_env *env, const struct dt_it *di,
2000 static const struct dt_index_operations osp_orphan_index_ops = {
2001 .dio_lookup = osp_orphan_index_lookup,
2002 .dio_declare_insert = osp_orphan_index_declare_insert,
2003 .dio_insert = osp_orphan_index_insert,
2004 .dio_declare_delete = osp_orphan_index_declare_delete,
2005 .dio_delete = osp_orphan_index_delete,
2007 .init = osp_it_init,
2008 .fini = osp_it_fini,
2009 .next = osp_orphan_it_next,
2012 .key = osp_orphan_it_key,
2013 .key_size = osp_orphan_it_key_size,
2014 .rec = osp_orphan_it_rec,
2015 .store = osp_it_store,
2016 .load = osp_orphan_it_load,
2017 .key_rec = osp_it_key_rec,
2022 * Implement OSP layer dt_object_operations::do_index_try() interface.
2024 * Negotiate the index type.
2026 * If the target index is an IDIF object, then use osp_orphan_index_ops.
2027 * Otherwise, assign osp_md_index_ops to the dt_object::do_index_ops.
2028 * (\see lustre/include/lustre_fid.h for IDIF.)
2030 * \param[in] env pointer to the thread context
2031 * \param[in] dt pointer to the OSP layer dt_object
2032 * \param[in] feat unused
2034 * \retval 0 for success
2036 static int osp_index_try(const struct lu_env *env,
2037 struct dt_object *dt,
2038 const struct dt_index_features *feat)
2040 const struct lu_fid *fid = lu_object_fid(&dt->do_lu);
2042 if (fid_is_last_id(fid) && fid_is_idif(fid))
2043 dt->do_index_ops = &osp_orphan_index_ops;
2045 dt->do_index_ops = &osp_md_index_ops;
2049 static struct dt_object_operations osp_obj_ops = {
2050 .do_declare_attr_get = osp_declare_attr_get,
2051 .do_attr_get = osp_attr_get,
2052 .do_declare_attr_set = osp_declare_attr_set,
2053 .do_attr_set = osp_attr_set,
2054 .do_declare_xattr_get = osp_declare_xattr_get,
2055 .do_xattr_get = osp_xattr_get,
2056 .do_declare_xattr_set = osp_declare_xattr_set,
2057 .do_xattr_set = osp_xattr_set,
2058 .do_declare_create = osp_declare_object_create,
2059 .do_create = osp_object_create,
2060 .do_declare_destroy = osp_declare_object_destroy,
2061 .do_destroy = osp_object_destroy,
2062 .do_index_try = osp_index_try,
2066 * Implement OSP layer lu_object_operations::loo_object_init() interface.
2068 * Initialize the object.
2070 * If it is a remote MDT object, then call do_attr_get() to fetch
2071 * the attribute from the peer.
2073 * \param[in] env pointer to the thread context
2074 * \param[in] o pointer to the OSP layer lu_object
2075 * \param[in] conf unused
2077 * \retval 0 for success
2078 * \retval negative error number on failure
2080 static int osp_object_init(const struct lu_env *env, struct lu_object *o,
2081 const struct lu_object_conf *conf)
2083 struct osp_object *po = lu2osp_obj(o);
2087 spin_lock_init(&po->opo_lock);
2088 o->lo_header->loh_attr |= LOHA_REMOTE;
2089 INIT_LIST_HEAD(&po->opo_xattr_list);
2090 INIT_LIST_HEAD(&po->opo_invalidate_cb_list);
2092 if (is_ost_obj(o)) {
2093 po->opo_obj.do_ops = &osp_obj_ops;
2095 struct lu_attr *la = &osp_env_info(env)->osi_attr;
2097 po->opo_obj.do_ops = &osp_md_obj_ops;
2098 po->opo_obj.do_body_ops = &osp_md_body_ops;
2100 if (conf != NULL && conf->loc_flags & LOC_F_NEW) {
2101 po->opo_non_exist = 1;
2103 rc = po->opo_obj.do_ops->do_attr_get(env, lu2dt_obj(o),
2106 o->lo_header->loh_attr |=
2107 LOHA_EXISTS | (la->la_mode & S_IFMT);
2108 if (rc == -ENOENT) {
2109 po->opo_non_exist = 1;
2113 init_rwsem(&po->opo_sem);
2119 * Implement OSP layer lu_object_operations::loo_object_free() interface.
2121 * Finalize the object.
2123 * If the OSP object has attributes cache, then destroy the cache.
2124 * Free the object finally.
2126 * \param[in] env pointer to the thread context
2127 * \param[in] o pointer to the OSP layer lu_object
2129 static void osp_object_free(const struct lu_env *env, struct lu_object *o)
2131 struct osp_object *obj = lu2osp_obj(o);
2132 struct lu_object_header *h = o->lo_header;
2133 struct osp_xattr_entry *oxe;
2134 struct osp_xattr_entry *tmp;
2137 dt_object_fini(&obj->opo_obj);
2138 lu_object_header_fini(h);
2139 list_for_each_entry_safe(oxe, tmp, &obj->opo_xattr_list, oxe_list) {
2140 list_del(&oxe->oxe_list);
2141 count = atomic_read(&oxe->oxe_ref);
2142 LASSERTF(count == 1,
2143 "Still has %d users on the xattr entry %.*s\n",
2144 count-1, (int)oxe->oxe_namelen, oxe->oxe_buf);
2146 OBD_FREE(oxe, oxe->oxe_buflen);
2148 OBD_SLAB_FREE_PTR(obj, osp_object_kmem);
2152 * Implement OSP layer lu_object_operations::loo_object_release() interface.
2154 * Cleanup (not free) the object.
2156 * If it is a reserved object but failed to be created, or it is an OST
2157 * object, then mark the object as non-cached.
2159 * \param[in] env pointer to the thread context
2160 * \param[in] o pointer to the OSP layer lu_object
2162 static void osp_object_release(const struct lu_env *env, struct lu_object *o)
2164 struct osp_object *po = lu2osp_obj(o);
2165 struct osp_device *d = lu2osp_dev(o->lo_dev);
2170 * release reservation if object was declared but not created
2171 * this may require lu_object_put() in LOD
2173 if (unlikely(po->opo_reserved)) {
2174 LASSERT(d->opd_pre != NULL);
2175 LASSERT(d->opd_pre_reserved > 0);
2176 spin_lock(&d->opd_pre_lock);
2177 d->opd_pre_reserved--;
2178 spin_unlock(&d->opd_pre_lock);
2180 /* not needed in cache any more */
2181 set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
2185 /* XXX: Currently, NOT cache OST-object on MDT because:
2186 * 1. it is not often accessed on MDT.
2187 * 2. avoid up layer (such as LFSCK) to load too many
2188 * once-used OST-objects. */
2189 set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
2194 static int osp_object_print(const struct lu_env *env, void *cookie,
2195 lu_printer_t p, const struct lu_object *l)
2197 const struct osp_object *o = lu2osp_obj((struct lu_object *)l);
2199 return (*p)(env, cookie, LUSTRE_OSP_NAME"-object@%p", o);
2202 static int osp_object_invariant(const struct lu_object *o)
2207 struct lu_object_operations osp_lu_obj_ops = {
2208 .loo_object_init = osp_object_init,
2209 .loo_object_free = osp_object_free,
2210 .loo_object_release = osp_object_release,
2211 .loo_object_print = osp_object_print,
2212 .loo_object_invariant = osp_object_invariant