4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/osp/osp_internal.h
34 * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
37 #ifndef _OSP_INTERNAL_H
38 #define _OSP_INTERNAL_H
41 #include <obd_class.h>
42 #include <dt_object.h>
43 #include <md_object.h>
44 #include <lustre_fid.h>
45 #include <lustre_update.h>
46 #include <lu_target.h>
49 * Infrastructure to support tracking of last committed llog record
51 struct osp_id_tracker {
54 __u64 otr_committed_id;
55 /* callback is register once per diskfs -- that's the whole point */
56 struct dt_txn_callback otr_tx_cb;
57 /* single node can run many clusters */
58 struct list_head otr_wakeup_list;
59 struct list_head otr_list;
60 /* underlying shared device */
61 struct dt_device *otr_dev;
62 /* how many users of this tracker */
63 atomic_t otr_refcount;
66 struct osp_precreate {
71 /* last fid to assign in creation */
72 struct lu_fid osp_pre_used_fid;
73 /* last created id OST reported, next-created - available id's */
74 struct lu_fid osp_pre_last_created_fid;
75 /* how many ids are reserved in declare, we shouldn't block in create */
76 __u64 osp_pre_reserved;
77 /* consumers (who needs new ids) wait here */
78 wait_queue_head_t osp_pre_user_waitq;
79 /* current precreation status: working, failed, stopping? */
81 /* how many objects to precreate next time */
82 int osp_pre_create_count;
83 int osp_pre_min_create_count;
84 int osp_pre_max_create_count;
85 /* whether to increase precreation window next time or not */
86 int osp_pre_create_slow;
87 /* cleaning up orphans or recreating missing objects */
88 int osp_pre_recovering;
91 struct osp_update_request_sub {
92 struct object_update_request *ours_req; /* may be vmalloc'd */
94 /* Linked to osp_update_request->our_req_list */
95 struct list_head ours_list;
98 struct osp_update_request {
100 /* update request result */
103 /* List of osp_update_request_sub */
104 struct list_head our_req_list;
108 struct list_head our_cb_items;
109 struct list_head our_invalidate_cb_list;
111 /* points to thandle if this update request belongs to one */
112 struct osp_thandle *our_th;
115 __u64 our_generation;
116 /* protect our_list and flag */
117 spinlock_t our_list_lock;
118 /* linked to the list(ou_list) in osp_updates */
119 struct list_head our_list;
121 __u32 our_req_ready:1;
126 struct list_head ou_list;
128 wait_queue_head_t ou_waitq;
130 /* The next rpc version which supposed to be sent in
131 * osp_send_update_thread().*/
132 __u64 ou_rpc_version;
134 /* The rpc version assigned to the osp thandle during (osp_md_write()),
135 * which will be sent by this order. Note: the osp_thandle has be sent
136 * by this order to make sure the remote update log will follow the
137 * llog format rule. XXX: these probably should be removed once we
138 * invent new llog format */
141 /* The generation of current osp update RPC, which is used to make sure
142 * those stale RPC(with older generation) will not be sent, otherwise it
143 * will cause update lllog corruption */
147 struct osp_rpc_lock {
148 /** Lock protecting in-flight RPC concurrency. */
149 struct mutex rpcl_mutex;
150 /** Used for MDS/RPC load testing purposes. */
151 unsigned int rpcl_fakes;
155 struct dt_device opd_dt_dev;
156 /* corresponded OST index */
159 /* corrsponded MDT index, which will be used when connecting to OST
160 * for validating the connection (see ofd_parse_connect_data) */
162 /* device used to store persistent state (llogs, last ids) */
163 struct obd_export *opd_storage_exp;
164 struct dt_device *opd_storage;
165 struct dt_object *opd_last_used_oid_file;
166 struct dt_object *opd_last_used_seq_file;
168 /* stored persistently in LE format, updated directly to/from disk
169 * and required le64_to_cpu() conversion before use.
170 * Protected by opd_pre_lock */
171 struct lu_fid opd_last_used_fid;
172 /* on disk copy last_used_fid.f_oid or idif */
174 struct lu_fid opd_gap_start_fid;
176 /* connection to OST */
177 struct osp_rpc_lock opd_rpc_lock;
178 struct obd_device *opd_obd;
179 struct obd_export *opd_exp;
180 struct obd_uuid opd_cluuid;
181 struct obd_connect_data *opd_connect_data;
183 /* connection status. */
184 unsigned int opd_new_connection:1,
185 opd_got_disconnected:1,
188 opd_imp_seen_connected:1,
191 /* whether local recovery is completed:
192 * reported via ->ldo_recovery_complete() */
193 int opd_recovery_completed;
195 /* precreate structure for OSP */
196 struct osp_precreate *opd_pre;
197 /* dedicate precreate thread */
198 struct ptlrpc_thread opd_pre_thread;
199 spinlock_t opd_pre_lock;
200 /* thread waits for signals about pool going empty */
201 wait_queue_head_t opd_pre_waitq;
203 /* send update thread */
204 struct osp_updates *opd_update;
205 /* dedicate update thread */
206 struct ptlrpc_thread opd_update_thread;
209 * OST synchronization thread
211 spinlock_t opd_sync_lock;
212 /* unique generation, to recognize start of new records in the llog */
213 struct llog_gen opd_sync_generation;
214 /* number of changes to sync, used to wake up sync thread */
215 atomic_t opd_sync_changes;
216 /* processing of changes from previous mount is done? */
217 int opd_sync_prev_done;
219 struct ptlrpc_thread opd_sync_thread;
220 wait_queue_head_t opd_sync_waitq;
221 /* list of in flight rpcs */
222 struct list_head opd_sync_in_flight_list;
223 /* list of remotely committed rpc */
224 struct list_head opd_sync_committed_there;
225 /* number of RPCs in flight - flow control */
226 atomic_t opd_sync_rpcs_in_flight;
227 int opd_sync_max_rpcs_in_flight;
228 /* number of RPC in processing (including non-committed by OST) */
229 atomic_t opd_sync_rpcs_in_progress;
230 int opd_sync_max_rpcs_in_progress;
231 /* osd api's commit cb control structure */
232 struct dt_txn_callback opd_sync_txn_cb;
233 /* last used change number -- semantically similar to transno */
234 unsigned long opd_sync_last_used_id;
235 /* last committed change number -- semantically similar to
237 __u64 opd_sync_last_committed_id;
238 /* last processed catalog index */
239 int opd_sync_last_catalog_idx;
240 /* number of processed records */
241 atomic64_t opd_sync_processed_recs;
242 /* stop processing new requests until barrier=0 */
243 atomic_t opd_sync_barrier;
244 wait_queue_head_t opd_sync_barrier_waitq;
245 /* last generated id */
246 ktime_t opd_sync_next_commit_cb;
247 atomic_t opd_commits_registered;
250 * statfs related fields: OSP maintains it on its own
252 struct obd_statfs opd_statfs;
253 ktime_t opd_statfs_fresh_till;
254 struct timer_list opd_statfs_timer;
255 int opd_statfs_update_in_progress;
256 /* how often to update statfs data */
257 time64_t opd_statfs_maxage;
259 struct dentry *opd_debugfs;
261 /* If the caller wants to do some idempotent async operations on
262 * remote server, it can append the async remote requests on the
263 * osp_device::opd_async_requests via declare() functions, these
264 * requests can be packed together and sent to the remote server
265 * via single OUT RPC later. */
266 struct osp_update_request *opd_async_requests;
267 /* Protect current operations on opd_async_requests. */
268 struct mutex opd_async_requests_mutex;
269 struct list_head opd_async_updates;
270 struct rw_semaphore opd_async_updates_rwsem;
271 atomic_t opd_async_updates_count;
274 * Limit the object allocation using ENOSPC for opd_pre_status
276 int opd_reserved_mb_high;
277 int opd_reserved_mb_low;
280 #define opd_pre_used_fid opd_pre->osp_pre_used_fid
281 #define opd_pre_last_created_fid opd_pre->osp_pre_last_created_fid
282 #define opd_pre_reserved opd_pre->osp_pre_reserved
283 #define opd_pre_user_waitq opd_pre->osp_pre_user_waitq
284 #define opd_pre_status opd_pre->osp_pre_status
285 #define opd_pre_create_count opd_pre->osp_pre_create_count
286 #define opd_pre_min_create_count opd_pre->osp_pre_min_create_count
287 #define opd_pre_max_create_count opd_pre->osp_pre_max_create_count
288 #define opd_pre_create_slow opd_pre->osp_pre_create_slow
289 #define opd_pre_recovering opd_pre->osp_pre_recovering
291 extern struct kmem_cache *osp_object_kmem;
293 /* The first part of oxe_buf is xattr name, and is '\0' terminated.
294 * The left part is for value, binary mode. */
295 struct osp_xattr_entry {
296 struct list_head oxe_list;
302 unsigned int oxe_exist:1,
307 /* this is a top object */
309 struct lu_object_header opo_header;
310 struct dt_object opo_obj;
311 unsigned int opo_reserved:1,
315 /* read/write lock for md osp object */
316 struct rw_semaphore opo_sem;
317 const struct lu_env *opo_owner;
318 struct lu_attr opo_attr;
319 struct list_head opo_xattr_list;
320 struct list_head opo_invalidate_cb_list;
321 /* Protect opo_ooa. */
323 /* to implement in-flight invalidation */
324 atomic_t opo_invalidate_seq;
325 struct rw_semaphore opo_invalidate_sem;
328 extern struct lu_object_operations osp_lu_obj_ops;
329 extern const struct dt_device_operations osp_dt_ops;
330 extern struct dt_object_operations osp_md_obj_ops;
331 extern struct dt_body_operations osp_md_body_ops;
333 struct osp_thread_info {
334 struct lu_buf osi_lb;
335 struct lu_buf osi_lb2;
336 struct lu_fid osi_fid;
337 struct lu_attr osi_attr;
338 struct ost_id osi_oi;
339 struct ost_id osi_oi2;
342 struct llog_rec_hdr osi_hdr;
343 struct llog_unlink64_rec osi_unlink;
344 struct llog_setattr64_rec_v2 osi_setattr;
345 struct llog_gen_rec osi_gen;
347 struct llog_cookie osi_cookie;
348 struct llog_catid osi_cid;
349 struct lu_seq_range osi_seq;
350 struct ldlm_res_id osi_resid;
351 struct obdo osi_obdo;
354 /* Iterator for OSP */
357 __u32 ooi_pos_lu_page;
361 int ooi_total_npages;
362 int ooi_valid_npages;
363 unsigned int ooi_swab:1;
365 struct dt_object *ooi_obj;
367 struct page *ooi_cur_page;
368 struct lu_idxpage *ooi_cur_idxpage;
369 struct page **ooi_pages;
372 #define OSP_THANDLE_MAGIC 0x20141214
374 struct thandle ot_super;
376 /* OSP will use this thandle to update last oid*/
377 struct thandle *ot_storage_th;
379 struct list_head ot_commit_dcb_list;
380 struct list_head ot_stop_dcb_list;
381 struct osp_update_request *ot_our;
382 atomic_t ot_refcount;
385 static inline struct osp_thandle *
386 thandle_to_osp_thandle(struct thandle *th)
388 return container_of(th, struct osp_thandle, ot_super);
391 static inline struct osp_update_request *
392 thandle_to_osp_update_request(struct thandle *th)
394 struct osp_thandle *oth;
396 oth = thandle_to_osp_thandle(th);
400 /* The transaction only include the updates on the remote node, and
401 * no local updates at all */
402 static inline bool is_only_remote_trans(struct thandle *th)
404 return th->th_top == NULL;
407 static inline void osp_objid_buf_prep(struct lu_buf *buf, loff_t *off,
408 __u64 *id, int index)
410 /* Note: through id is only 32 bits, it will also write 64 bits
411 * for oid to keep compatibility with the previous version. */
412 buf->lb_buf = (void *)id;
413 buf->lb_len = sizeof(u64);
414 *off = sizeof(u64) * index;
417 static inline void osp_objseq_buf_prep(struct lu_buf *buf, loff_t *off,
418 __u64 *seq, int index)
420 buf->lb_buf = (void *)seq;
421 buf->lb_len = sizeof(u64);
422 *off = sizeof(u64) * index;
425 static inline void osp_buf_prep(struct lu_buf *lb, void *buf, int buf_len)
428 lb->lb_len = buf_len;
431 extern struct lu_context_key osp_thread_key;
433 static inline struct osp_thread_info *osp_env_info(const struct lu_env *env)
435 struct osp_thread_info *info;
437 info = lu_context_key_get(&env->le_ctx, &osp_thread_key);
439 lu_env_refill((struct lu_env *)env);
440 info = lu_context_key_get(&env->le_ctx, &osp_thread_key);
446 struct osp_txn_info {
447 __u64 oti_current_id;
450 extern struct lu_context_key osp_txn_key;
452 static inline struct osp_txn_info *osp_txn_info(struct lu_context *ctx)
454 struct osp_txn_info *info;
456 info = lu_context_key_get(ctx, &osp_txn_key);
460 extern const struct lu_device_operations osp_lu_ops;
462 static inline int lu_device_is_osp(struct lu_device *d)
464 return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &osp_lu_ops);
467 static inline struct osp_device *lu2osp_dev(struct lu_device *d)
469 LASSERT(lu_device_is_osp(d));
470 return container_of0(d, struct osp_device, opd_dt_dev.dd_lu_dev);
473 static inline struct lu_device *osp2lu_dev(struct osp_device *d)
475 return &d->opd_dt_dev.dd_lu_dev;
478 static inline struct osp_device *dt2osp_dev(struct dt_device *d)
480 LASSERT(lu_device_is_osp(&d->dd_lu_dev));
481 return container_of0(d, struct osp_device, opd_dt_dev);
484 static inline struct osp_object *lu2osp_obj(struct lu_object *o)
486 LASSERT(ergo(o != NULL, lu_device_is_osp(o->lo_dev)));
487 return container_of0(o, struct osp_object, opo_obj.do_lu);
490 static inline struct lu_object *osp2lu_obj(struct osp_object *obj)
492 return &obj->opo_obj.do_lu;
495 static inline struct osp_object *osp_obj(const struct lu_object *o)
497 LASSERT(lu_device_is_osp(o->lo_dev));
498 return container_of0(o, struct osp_object, opo_obj.do_lu);
501 static inline struct osp_object *dt2osp_obj(const struct dt_object *d)
503 return osp_obj(&d->do_lu);
506 static inline struct dt_object *osp_object_child(struct osp_object *o)
508 return container_of0(lu_object_next(osp2lu_obj(o)),
509 struct dt_object, do_lu);
512 static inline struct seq_server_site *osp_seq_site(struct osp_device *osp)
514 return osp->opd_dt_dev.dd_lu_dev.ld_site->ld_seq_site;
518 * Serializes in-flight MDT-modifying RPC requests to preserve idempotency.
520 * This mutex is used to implement execute-once semantics on the MDT.
521 * The MDT stores the last transaction ID and result for every client in
522 * its last_rcvd file. If the client doesn't get a reply, it can safely
523 * resend the request and the MDT will reconstruct the reply being aware
524 * that the request has already been executed. Without this lock,
525 * execution status of concurrent in-flight requests would be
528 * This imlpementation limits the extent to which we can keep a full pipeline
529 * of in-flight requests from a single client. This limitation can be
530 * overcome by allowing multiple slots per client in the last_rcvd file,
533 #define OSP_FAKE_RPCL_IT ((void *)0x2c0012bfUL)
535 static inline void osp_init_rpc_lock(struct osp_device *osp)
537 struct osp_rpc_lock *lck = &osp->opd_rpc_lock;
539 mutex_init(&lck->rpcl_mutex);
543 static inline void osp_get_rpc_lock(struct osp_device *osp)
545 struct osp_rpc_lock *lck = &osp->opd_rpc_lock;
547 /* This would normally block until the existing request finishes.
548 * If fail_loc is set it will block until the regular request is
549 * done, then increment rpcl_fakes. Once that is non-zero it
550 * will only be cleared when all fake requests are finished.
551 * Only when all fake requests are finished can normal requests
552 * be sent, to ensure they are recoverable again.
555 mutex_lock(&lck->rpcl_mutex);
557 if (CFS_FAIL_CHECK_QUIET(OBD_FAIL_MDC_RPCS_SEM) ||
558 CFS_FAIL_CHECK_QUIET(OBD_FAIL_OSP_RPCS_SEM)) {
560 mutex_unlock(&lck->rpcl_mutex);
565 /* This will only happen when the CFS_FAIL_CHECK() was just turned
566 * off but there are still requests in progress. Wait until they
567 * finish. It doesn't need to be efficient in this extremely rare
568 * case, just have low overhead in the common case when it isn't true.
570 if (unlikely(lck->rpcl_fakes)) {
571 mutex_unlock(&lck->rpcl_mutex);
572 schedule_timeout(cfs_time_seconds(1) / 4);
578 static inline void osp_put_rpc_lock(struct osp_device *osp)
580 struct osp_rpc_lock *lck = &osp->opd_rpc_lock;
582 if (lck->rpcl_fakes) { /* OBD_FAIL_OSP_RPCS_SEM */
583 mutex_lock(&lck->rpcl_mutex);
585 if (lck->rpcl_fakes) /* check again under lock */
589 mutex_unlock(&lck->rpcl_mutex);
592 static inline int osp_fid_diff(const struct lu_fid *fid1,
593 const struct lu_fid *fid2)
595 /* In 2.6+ ost_idx is packed into IDIF FID, while in 2.4 and 2.5 IDIF
596 * is always FID_SEQ_IDIF(0x100000000ULL), which does not include OST
597 * index in the seq. So we can not compare IDIF FID seq here */
598 if (fid_is_idif(fid1) && fid_is_idif(fid2)) {
599 __u32 ost_idx1 = fid_idif_ost_idx(fid1);
600 __u32 ost_idx2 = fid_idif_ost_idx(fid2);
602 LASSERTF(ost_idx1 == 0 || ost_idx2 == 0 || ost_idx1 == ost_idx2,
603 "fid1: "DFID", fid2: "DFID"\n", PFID(fid1),
606 return fid_idif_id(fid1->f_seq, fid1->f_oid, 0) -
607 fid_idif_id(fid2->f_seq, fid2->f_oid, 0);
610 LASSERTF(fid_seq(fid1) == fid_seq(fid2), "fid1:"DFID", fid2:"DFID"\n",
611 PFID(fid1), PFID(fid2));
613 return fid_oid(fid1) - fid_oid(fid2);
616 static inline void osp_fid_to_obdid(struct lu_fid *last_fid, u64 *osi_id)
618 if (fid_is_idif((last_fid)))
619 *osi_id = fid_idif_id(fid_seq(last_fid), fid_oid(last_fid),
622 *osi_id = fid_oid(last_fid);
625 static inline void osp_update_last_fid(struct osp_device *d, struct lu_fid *fid)
627 int diff = osp_fid_diff(fid, &d->opd_last_used_fid);
628 struct lu_fid *gap_start = &d->opd_gap_start_fid;
631 * we might have lost precreated objects due to VBR and precreate
632 * orphans, the gap in objid can be calculated properly only here
636 d->opd_gap_start_fid = d->opd_last_used_fid;
637 if (fid_oid(gap_start) == LUSTRE_DATA_SEQ_MAX_WIDTH) {
639 gap_start->f_oid = fid_is_idif(gap_start) ?
644 d->opd_gap_count = diff - 1;
645 CDEBUG(D_HA, "Gap in objids: start="DFID", count =%d\n",
646 PFID(&d->opd_gap_start_fid), d->opd_gap_count);
648 d->opd_last_used_fid = *fid;
649 osp_fid_to_obdid(fid, &d->opd_last_id);
653 static int osp_fid_end_seq(const struct lu_env *env, struct lu_fid *fid)
655 if (fid_is_idif(fid)) {
656 struct osp_thread_info *info = osp_env_info(env);
657 struct ost_id *oi = &info->osi_oi;
659 fid_to_ostid(fid, oi);
660 return ostid_id(oi) == IDIF_MAX_OID;
662 return fid_oid(fid) == LUSTRE_DATA_SEQ_MAX_WIDTH;
666 static inline int osp_precreate_end_seq_nolock(const struct lu_env *env,
667 struct osp_device *osp)
669 struct lu_fid *fid = &osp->opd_pre_last_created_fid;
671 return osp_fid_end_seq(env, fid);
674 static inline int osp_precreate_end_seq(const struct lu_env *env,
675 struct osp_device *osp)
679 spin_lock(&osp->opd_pre_lock);
680 rc = osp_precreate_end_seq_nolock(env, osp);
681 spin_unlock(&osp->opd_pre_lock);
685 static inline int osp_is_fid_client(struct osp_device *osp)
687 struct obd_import *imp = osp->opd_obd->u.cli.cl_import;
689 return imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_FID;
692 struct object_update *
693 update_buffer_get_update(struct object_update_request *request,
696 int osp_extend_update_buffer(const struct lu_env *env,
697 struct osp_update_request *our);
699 struct osp_update_request_sub *
700 osp_current_object_update_request(struct osp_update_request *our);
702 int osp_object_update_request_create(struct osp_update_request *our,
705 #define OSP_UPDATE_RPC_PACK(env, out_something_pack, our, ...) \
707 struct object_update *object_update; \
708 size_t max_update_length; \
709 struct osp_update_request_sub *ours; \
713 ours = osp_current_object_update_request(our); \
714 LASSERT(ours != NULL); \
715 max_update_length = ours->ours_req_size - \
716 object_update_request_size(ours->ours_req); \
718 object_update = update_buffer_get_update(ours->ours_req,\
719 ours->ours_req->ourq_count); \
720 ret = out_something_pack(env, object_update, \
721 &max_update_length, \
723 if (ret == -E2BIG) { \
725 /* Create new object update request */ \
726 rc1 = osp_object_update_request_create(our, \
727 max_update_length + \
728 offsetof(struct object_update_request, \
729 ourq_updates[0]) + 1); \
737 ours->ours_req->ourq_count++; \
738 (our)->our_update_nr++; \
739 object_update->ou_batchid = \
741 object_update->ou_flags |= \
750 static inline bool osp_send_update_thread_running(struct osp_device *osp)
752 return osp->opd_update_thread.t_flags & SVC_RUNNING;
755 static inline bool osp_send_update_thread_stopped(struct osp_device *osp)
757 return osp->opd_update_thread.t_flags & SVC_STOPPED;
760 typedef int (*osp_update_interpreter_t)(const struct lu_env *env,
761 struct object_update_reply *rep,
762 struct ptlrpc_request *req,
763 struct osp_object *obj,
764 void *data, int index, int rc);
767 void osp_update_last_id(struct osp_device *d, u64 objid);
770 int osp_insert_async_request(const struct lu_env *env, enum update_type op,
771 struct osp_object *obj, int count, __u16 *lens,
772 const void **bufs, void *data, __u32 repsize,
773 osp_update_interpreter_t interpreter);
775 int osp_unplug_async_request(const struct lu_env *env,
776 struct osp_device *osp,
777 struct osp_update_request *update);
778 int osp_trans_update_request_create(struct thandle *th);
779 struct thandle *osp_trans_create(const struct lu_env *env,
780 struct dt_device *d);
781 int osp_trans_start(const struct lu_env *env, struct dt_device *dt,
783 int osp_insert_update_callback(const struct lu_env *env,
784 struct osp_update_request *update,
785 struct osp_object *obj, void *data,
786 osp_update_interpreter_t interpreter);
788 struct osp_update_request *osp_update_request_create(struct dt_device *dt);
789 void osp_update_request_destroy(const struct lu_env *env,
790 struct osp_update_request *update);
792 int osp_send_update_thread(void *arg);
793 int osp_check_and_set_rpc_version(struct osp_thandle *oth,
794 struct osp_object *obj);
796 void osp_thandle_destroy(const struct lu_env *env, struct osp_thandle *oth);
797 static inline void osp_thandle_get(struct osp_thandle *oth)
799 atomic_inc(&oth->ot_refcount);
802 static inline void osp_thandle_put(const struct lu_env *env,
803 struct osp_thandle *oth)
805 if (atomic_dec_and_test(&oth->ot_refcount))
806 osp_thandle_destroy(env, oth);
809 int osp_prep_update_req(const struct lu_env *env, struct obd_import *imp,
810 struct osp_update_request *our,
811 struct ptlrpc_request **reqp);
812 int osp_remote_sync(const struct lu_env *env, struct osp_device *osp,
813 struct osp_update_request *update,
814 struct ptlrpc_request **reqp);
816 struct thandle *osp_get_storage_thandle(const struct lu_env *env,
818 struct osp_device *osp);
819 void osp_trans_callback(const struct lu_env *env,
820 struct osp_thandle *oth, int rc);
821 void osp_invalidate_request(struct osp_device *osp);
823 int osp_attr_get(const struct lu_env *env, struct dt_object *dt,
824 struct lu_attr *attr);
825 int osp_xattr_get(const struct lu_env *env, struct dt_object *dt,
826 struct lu_buf *buf, const char *name);
827 int osp_declare_xattr_set(const struct lu_env *env, struct dt_object *dt,
828 const struct lu_buf *buf, const char *name,
829 int flag, struct thandle *th);
830 int osp_xattr_set(const struct lu_env *env, struct dt_object *dt,
831 const struct lu_buf *buf, const char *name, int fl,
833 int osp_declare_xattr_del(const struct lu_env *env, struct dt_object *dt,
834 const char *name, struct thandle *th);
835 int osp_xattr_del(const struct lu_env *env, struct dt_object *dt,
836 const char *name, struct thandle *th);
837 int osp_invalidate(const struct lu_env *env, struct dt_object *dt);
838 void osp_obj_invalidate_cache(struct osp_object *obj);
840 int osp_trans_stop(const struct lu_env *env, struct dt_device *dt,
842 int osp_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb);
844 struct dt_it *osp_it_init(const struct lu_env *env, struct dt_object *dt,
846 void osp_it_fini(const struct lu_env *env, struct dt_it *di);
847 int osp_it_get(const struct lu_env *env, struct dt_it *di,
848 const struct dt_key *key);
849 void osp_it_put(const struct lu_env *env, struct dt_it *di);
850 __u64 osp_it_store(const struct lu_env *env, const struct dt_it *di);
851 int osp_it_key_rec(const struct lu_env *env, const struct dt_it *di,
853 int osp_it_next_page(const struct lu_env *env, struct dt_it *di);
854 /* osp_md_object.c */
855 int osp_md_declare_create(const struct lu_env *env, struct dt_object *dt,
856 struct lu_attr *attr, struct dt_allocation_hint *hint,
857 struct dt_object_format *dof, struct thandle *th);
858 int osp_md_create(const struct lu_env *env, struct dt_object *dt,
859 struct lu_attr *attr, struct dt_allocation_hint *hint,
860 struct dt_object_format *dof, struct thandle *th);
861 int osp_md_declare_attr_set(const struct lu_env *env, struct dt_object *dt,
862 const struct lu_attr *attr, struct thandle *th);
863 int osp_md_attr_set(const struct lu_env *env, struct dt_object *dt,
864 const struct lu_attr *attr, struct thandle *th);
865 extern const struct dt_index_operations osp_md_index_ops;
867 /* osp_precreate.c */
868 int osp_init_precreate(struct osp_device *d);
869 int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d);
870 __u64 osp_precreate_get_id(struct osp_device *d);
871 int osp_precreate_get_fid(const struct lu_env *env, struct osp_device *d,
873 void osp_precreate_fini(struct osp_device *d);
874 int osp_object_truncate(const struct lu_env *env, struct dt_object *dt, __u64);
875 void osp_pre_update_status(struct osp_device *d, int rc);
876 void osp_statfs_need_now(struct osp_device *d);
877 int osp_reset_last_used(const struct lu_env *env, struct osp_device *osp);
878 int osp_write_last_oid_seq_files(struct lu_env *env, struct osp_device *osp,
879 struct lu_fid *fid, int sync);
880 int osp_init_pre_fid(struct osp_device *osp);
881 int osp_init_statfs(struct osp_device *osp);
882 void osp_fini_statfs(struct osp_device *osp);
883 void osp_statfs_fini(struct osp_device *d);
886 void osp_tunables_init(struct osp_device *osp);
887 void osp_tunables_fini(struct osp_device *osp);
890 int osp_sync_declare_add(const struct lu_env *env, struct osp_object *o,
891 enum llog_op_type type, struct thandle *th);
892 int osp_sync_add(const struct lu_env *env, struct osp_object *o,
893 enum llog_op_type type, struct thandle *th,
894 const struct lu_attr *attr);
895 int osp_sync_init(const struct lu_env *env, struct osp_device *d);
896 int osp_sync_fini(struct osp_device *d);
897 void osp_sync_check_for_work(struct osp_device *osp);
898 void osp_sync_force(const struct lu_env *env, struct osp_device *d);
899 int osp_sync_add_commit_cb_1s(const struct lu_env *env, struct osp_device *d,
903 extern struct obd_ops lwp_obd_device_ops;
904 extern struct lu_device_type lwp_device_type;
906 static inline struct lu_device *osp2top(const struct osp_device *osp)
908 return osp->opd_dt_dev.dd_lu_dev.ld_site->ls_top_dev;
911 static inline void osp_set_req_replay(const struct osp_device *osp,
912 struct ptlrpc_request *req)
914 struct obd_device *obd = osp2top(osp)->ld_obd;
916 /* The RPC must be recovery related for the cases:
918 * 1. sent during recovery, or
919 * 2. sent before the recovery thread target_recovery_thread() start,
920 * such as triggered by lod_sub_recovery_thread(). */
921 if (obd->obd_recovering || (obd->obd_replayable && obd->obd_no_conn))
922 req->rq_allow_replay = 1;