4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/osp/osp_internal.h
34 * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
37 #ifndef _OSP_INTERNAL_H
38 #define _OSP_INTERNAL_H
41 #include <obd_class.h>
42 #include <dt_object.h>
43 #include <md_object.h>
44 #include <lustre_fid.h>
45 #include <lustre_update.h>
46 #include <lu_target.h>
47 #include <lustre_mdc.h>
50 * Infrastructure to support tracking of last committed llog record
52 struct osp_id_tracker {
55 __u64 otr_committed_id;
56 /* callback is register once per diskfs -- that's the whole point */
57 struct dt_txn_callback otr_tx_cb;
58 /* single node can run many clusters */
59 struct list_head otr_wakeup_list;
60 struct list_head otr_list;
61 /* underlying shared device */
62 struct dt_device *otr_dev;
63 /* how many users of this tracker */
64 atomic_t otr_refcount;
67 struct osp_precreate {
71 spinlock_t osp_pre_lock;
73 /* last fid to assign in creation */
74 struct lu_fid osp_pre_used_fid;
75 /* last created id OST reported, next-created - available id's */
76 struct lu_fid osp_pre_last_created_fid;
77 /* how many ids are reserved in declare, we shouldn't block in create */
78 __u64 osp_pre_reserved;
79 /* consumers (who needs new ids) wait here */
80 wait_queue_head_t osp_pre_user_waitq;
81 /* current precreation status: working, failed, stopping? */
83 /* how many objects to precreate next time */
84 int osp_pre_create_count;
85 int osp_pre_min_create_count;
86 int osp_pre_max_create_count;
87 /* whether to increase precreation window next time or not */
88 int osp_pre_create_slow;
89 /* cleaning up orphans or recreating missing objects */
90 int osp_pre_recovering;
93 struct osp_update_request_sub {
94 struct object_update_request *ours_req; /* may be vmalloc'd */
96 /* Linked to osp_update_request->our_req_list */
97 struct list_head ours_list;
100 struct osp_update_request {
102 /* update request result */
105 /* List of osp_update_request_sub */
106 struct list_head our_req_list;
110 struct list_head our_cb_items;
111 struct list_head our_invalidate_cb_list;
113 /* points to thandle if this update request belongs to one */
114 struct osp_thandle *our_th;
117 __u64 our_generation;
118 /* protect our_list and flag */
119 spinlock_t our_list_lock;
120 /* linked to the list(ou_list) in osp_updates */
121 struct list_head our_list;
123 __u32 our_req_ready:1;
128 struct list_head ou_list;
130 wait_queue_head_t ou_waitq;
132 /* The next rpc version which supposed to be sent in
133 * osp_send_update_thread().*/
134 __u64 ou_rpc_version;
136 /* The rpc version assigned to the osp thandle during (osp_md_write()),
137 * which will be sent by this order. Note: the osp_thandle has be sent
138 * by this order to make sure the remote update log will follow the
139 * llog format rule. XXX: these probably should be removed once we
140 * invent new llog format */
143 /* The generation of current osp update RPC, which is used to make sure
144 * those stale RPC(with older generation) will not be sent, otherwise it
145 * will cause update lllog corruption */
150 struct dt_device opd_dt_dev;
151 /* corresponded OST index */
154 /* corrsponded MDT index, which will be used when connecting to OST
155 * for validating the connection (see ofd_parse_connect_data) */
157 /* device used to store persistent state (llogs, last ids) */
158 struct obd_export *opd_storage_exp;
159 struct dt_device *opd_storage;
160 struct dt_object *opd_last_used_oid_file;
161 struct dt_object *opd_last_used_seq_file;
163 /* stored persistently in LE format, updated directly to/from disk
164 * and required le64_to_cpu() conversion before use.
165 * Protected by opd_pre_lock */
166 struct lu_fid opd_last_used_fid;
167 struct lu_fid opd_gap_start_fid;
169 /* connection to OST */
170 struct obd_device *opd_obd;
171 struct obd_export *opd_exp;
172 struct obd_uuid opd_cluuid;
173 struct obd_connect_data *opd_connect_data;
175 struct proc_dir_entry *opd_proc_entry;
176 struct lprocfs_stats *opd_stats;
177 /* connection status. */
178 unsigned int opd_new_connection:1,
179 opd_got_disconnected:1,
182 opd_imp_seen_connected:1,
185 /* whether local recovery is completed:
186 * reported via ->ldo_recovery_complete() */
187 int opd_recovery_completed;
189 /* precreate structure for OSP */
190 struct osp_precreate *opd_pre;
191 /* dedicate precreate thread */
192 struct ptlrpc_thread opd_pre_thread;
193 /* thread waits for signals about pool going empty */
194 wait_queue_head_t opd_pre_waitq;
196 /* send update thread */
197 struct osp_updates *opd_update;
198 /* dedicate update thread */
199 struct ptlrpc_thread opd_update_thread;
202 * OST synchronization
204 spinlock_t opd_syn_lock;
205 /* unique generation, to recognize start of new records in the llog */
206 struct llog_gen opd_syn_generation;
207 /* number of changes to sync, used to wake up sync thread */
208 atomic_t opd_syn_changes;
209 /* processing of changes from previous mount is done? */
210 int opd_syn_prev_done;
212 struct ptlrpc_thread opd_syn_thread;
213 wait_queue_head_t opd_syn_waitq;
214 /* list of inflight rpc */
215 struct list_head opd_syn_inflight_list;
216 /* list of remotely committed rpc */
217 struct list_head opd_syn_committed_there;
218 /* number of changes being under sync */
219 int opd_syn_sync_in_progress;
220 /* number of RPCs in flight - flow control */
221 atomic_t opd_syn_rpc_in_flight;
222 int opd_syn_max_rpc_in_flight;
223 /* number of RPC in processing (including non-committed by OST) */
224 atomic_t opd_syn_rpc_in_progress;
225 int opd_syn_max_rpc_in_progress;
226 /* osd api's commit cb control structure */
227 struct dt_txn_callback opd_syn_txn_cb;
228 /* last used change number -- semantically similar to transno */
229 __u64 opd_syn_last_used_id;
230 /* last committed change number -- semantically similar to
232 __u64 opd_syn_last_committed_id;
233 /* last processed (taken from llog) id */
234 volatile __u64 opd_syn_last_processed_id;
235 struct osp_id_tracker *opd_syn_tracker;
236 struct list_head opd_syn_ontrack;
237 /* stop processing new requests until barrier=0 */
238 atomic_t opd_syn_barrier;
239 wait_queue_head_t opd_syn_barrier_waitq;
242 * statfs related fields: OSP maintains it on its own
244 struct obd_statfs opd_statfs;
245 cfs_time_t opd_statfs_fresh_till;
246 struct timer_list opd_statfs_timer;
247 int opd_statfs_update_in_progress;
248 /* how often to update statfs data */
249 int opd_statfs_maxage;
251 struct proc_dir_entry *opd_symlink;
253 /* If the caller wants to do some idempotent async operations on
254 * remote server, it can append the async remote requests on the
255 * osp_device::opd_async_requests via declare() functions, these
256 * requests can be packed together and sent to the remote server
257 * via single OUT RPC later. */
258 struct osp_update_request *opd_async_requests;
259 /* Protect current operations on opd_async_requests. */
260 struct mutex opd_async_requests_mutex;
261 struct list_head opd_async_updates;
262 struct rw_semaphore opd_async_updates_rwsem;
263 atomic_t opd_async_updates_count;
266 * Limit the object allocation using ENOSPC for opd_pre_status
268 int opd_reserved_mb_high;
269 int opd_reserved_mb_low;
272 #define opd_pre_lock opd_pre->osp_pre_lock
273 #define opd_pre_used_fid opd_pre->osp_pre_used_fid
274 #define opd_pre_last_created_fid opd_pre->osp_pre_last_created_fid
275 #define opd_pre_reserved opd_pre->osp_pre_reserved
276 #define opd_pre_user_waitq opd_pre->osp_pre_user_waitq
277 #define opd_pre_status opd_pre->osp_pre_status
278 #define opd_pre_create_count opd_pre->osp_pre_create_count
279 #define opd_pre_min_create_count opd_pre->osp_pre_min_create_count
280 #define opd_pre_max_create_count opd_pre->osp_pre_max_create_count
281 #define opd_pre_create_slow opd_pre->osp_pre_create_slow
282 #define opd_pre_recovering opd_pre->osp_pre_recovering
284 extern struct kmem_cache *osp_object_kmem;
286 /* The first part of oxe_buf is xattr name, and is '\0' terminated.
287 * The left part is for value, binary mode. */
288 struct osp_xattr_entry {
289 struct list_head oxe_list;
295 unsigned int oxe_exist:1,
300 /* this is a top object */
302 struct lu_object_header opo_header;
303 struct dt_object opo_obj;
304 unsigned int opo_reserved:1,
308 /* read/write lock for md osp object */
309 struct rw_semaphore opo_sem;
310 const struct lu_env *opo_owner;
311 struct lu_attr opo_attr;
312 struct list_head opo_xattr_list;
313 struct list_head opo_invalidate_cb_list;
314 /* Protect opo_ooa. */
318 extern struct lu_object_operations osp_lu_obj_ops;
319 extern const struct dt_device_operations osp_dt_ops;
320 extern struct dt_object_operations osp_md_obj_ops;
321 extern struct dt_body_operations osp_md_body_ops;
323 struct osp_thread_info {
324 struct lu_buf osi_lb;
325 struct lu_buf osi_lb2;
326 struct lu_fid osi_fid;
327 struct lu_attr osi_attr;
328 struct ost_id osi_oi;
329 struct ost_id osi_oi2;
333 struct llog_rec_hdr osi_hdr;
334 struct llog_unlink64_rec osi_unlink;
335 struct llog_setattr64_rec_v2 osi_setattr;
336 struct llog_gen_rec osi_gen;
338 struct llog_cookie osi_cookie;
339 struct llog_catid osi_cid;
340 struct lu_seq_range osi_seq;
341 struct ldlm_res_id osi_resid;
342 struct obdo osi_obdo;
345 /* Iterator for OSP */
348 __u32 ooi_pos_lu_page;
352 int ooi_total_npages;
353 int ooi_valid_npages;
354 unsigned int ooi_swab:1;
356 struct dt_object *ooi_obj;
358 struct page *ooi_cur_page;
359 struct lu_idxpage *ooi_cur_idxpage;
360 struct page **ooi_pages;
363 #define OSP_THANDLE_MAGIC 0x20141214
365 struct thandle ot_super;
367 /* OSP will use this thandle to update last oid*/
368 struct thandle *ot_storage_th;
370 struct list_head ot_commit_dcb_list;
371 struct list_head ot_stop_dcb_list;
372 struct osp_update_request *ot_our;
373 atomic_t ot_refcount;
376 static inline struct osp_thandle *
377 thandle_to_osp_thandle(struct thandle *th)
379 return container_of(th, struct osp_thandle, ot_super);
382 static inline struct osp_update_request *
383 thandle_to_osp_update_request(struct thandle *th)
385 struct osp_thandle *oth;
387 oth = thandle_to_osp_thandle(th);
391 /* The transaction only include the updates on the remote node, and
392 * no local updates at all */
393 static inline bool is_only_remote_trans(struct thandle *th)
395 return th->th_top == NULL;
398 static inline void osp_objid_buf_prep(struct lu_buf *buf, loff_t *off,
399 __u32 *id, int index)
401 /* Note: through id is only 32 bits, it will also write 64 bits
402 * for oid to keep compatibility with the previous version. */
403 buf->lb_buf = (void *)id;
404 buf->lb_len = sizeof(u64);
405 *off = sizeof(u64) * index;
408 static inline void osp_objseq_buf_prep(struct lu_buf *buf, loff_t *off,
409 __u64 *seq, int index)
411 buf->lb_buf = (void *)seq;
412 buf->lb_len = sizeof(u64);
413 *off = sizeof(u64) * index;
416 static inline void osp_buf_prep(struct lu_buf *lb, void *buf, int buf_len)
419 lb->lb_len = buf_len;
422 extern struct lu_context_key osp_thread_key;
424 static inline struct osp_thread_info *osp_env_info(const struct lu_env *env)
426 struct osp_thread_info *info;
428 info = lu_context_key_get(&env->le_ctx, &osp_thread_key);
430 lu_env_refill((struct lu_env *)env);
431 info = lu_context_key_get(&env->le_ctx, &osp_thread_key);
437 struct osp_txn_info {
438 __u64 oti_current_id;
441 extern struct lu_context_key osp_txn_key;
443 static inline struct osp_txn_info *osp_txn_info(struct lu_context *ctx)
445 struct osp_txn_info *info;
447 info = lu_context_key_get(ctx, &osp_txn_key);
451 extern const struct lu_device_operations osp_lu_ops;
453 static inline int lu_device_is_osp(struct lu_device *d)
455 return ergo(d != NULL && d->ld_ops != NULL, d->ld_ops == &osp_lu_ops);
458 static inline struct osp_device *lu2osp_dev(struct lu_device *d)
460 LASSERT(lu_device_is_osp(d));
461 return container_of0(d, struct osp_device, opd_dt_dev.dd_lu_dev);
464 static inline struct lu_device *osp2lu_dev(struct osp_device *d)
466 return &d->opd_dt_dev.dd_lu_dev;
469 static inline struct osp_device *dt2osp_dev(struct dt_device *d)
471 LASSERT(lu_device_is_osp(&d->dd_lu_dev));
472 return container_of0(d, struct osp_device, opd_dt_dev);
475 static inline struct osp_object *lu2osp_obj(struct lu_object *o)
477 LASSERT(ergo(o != NULL, lu_device_is_osp(o->lo_dev)));
478 return container_of0(o, struct osp_object, opo_obj.do_lu);
481 static inline struct lu_object *osp2lu_obj(struct osp_object *obj)
483 return &obj->opo_obj.do_lu;
486 static inline struct osp_object *osp_obj(const struct lu_object *o)
488 LASSERT(lu_device_is_osp(o->lo_dev));
489 return container_of0(o, struct osp_object, opo_obj.do_lu);
492 static inline struct osp_object *dt2osp_obj(const struct dt_object *d)
494 return osp_obj(&d->do_lu);
497 static inline struct dt_object *osp_object_child(struct osp_object *o)
499 return container_of0(lu_object_next(osp2lu_obj(o)),
500 struct dt_object, do_lu);
503 static inline struct seq_server_site *osp_seq_site(struct osp_device *osp)
505 return osp->opd_dt_dev.dd_lu_dev.ld_site->ld_seq_site;
508 #define osp_init_rpc_lock(lck) mdc_init_rpc_lock(lck)
510 static inline void osp_get_rpc_lock(struct osp_device *osp)
512 struct mdc_rpc_lock *rpc_lock = osp->opd_obd->u.cli.cl_rpc_lock;
514 mdc_get_rpc_lock(rpc_lock, NULL);
517 static inline void osp_put_rpc_lock(struct osp_device *osp)
519 struct mdc_rpc_lock *rpc_lock = osp->opd_obd->u.cli.cl_rpc_lock;
521 mdc_put_rpc_lock(rpc_lock, NULL);
524 static inline int osp_fid_diff(const struct lu_fid *fid1,
525 const struct lu_fid *fid2)
527 /* In 2.6+ ost_idx is packed into IDIF FID, while in 2.4 and 2.5 IDIF
528 * is always FID_SEQ_IDIF(0x100000000ULL), which does not include OST
529 * index in the seq. So we can not compare IDIF FID seq here */
530 if (fid_is_idif(fid1) && fid_is_idif(fid2)) {
531 __u32 ost_idx1 = fid_idif_ost_idx(fid1);
532 __u32 ost_idx2 = fid_idif_ost_idx(fid2);
534 LASSERTF(ost_idx1 == 0 || ost_idx2 == 0 || ost_idx1 == ost_idx2,
535 "fid1: "DFID", fid2: "DFID"\n", PFID(fid1),
538 return fid_idif_id(fid1->f_seq, fid1->f_oid, 0) -
539 fid_idif_id(fid2->f_seq, fid2->f_oid, 0);
542 LASSERTF(fid_seq(fid1) == fid_seq(fid2), "fid1:"DFID
543 ", fid2:"DFID"\n", PFID(fid1), PFID(fid2));
545 return fid_oid(fid1) - fid_oid(fid2);
549 static inline void osp_update_last_fid(struct osp_device *d, struct lu_fid *fid)
551 int diff = osp_fid_diff(fid, &d->opd_last_used_fid);
553 * we might have lost precreated objects due to VBR and precreate
554 * orphans, the gap in objid can be calculated properly only here
558 d->opd_gap_start_fid = d->opd_last_used_fid;
559 d->opd_gap_start_fid.f_oid++;
560 d->opd_gap_count = diff - 1;
561 CDEBUG(D_HA, "Gap in objids: start="DFID", count =%d\n",
562 PFID(&d->opd_gap_start_fid), d->opd_gap_count);
564 d->opd_last_used_fid = *fid;
568 static int osp_fid_end_seq(const struct lu_env *env, struct lu_fid *fid)
570 if (fid_is_idif(fid)) {
571 struct osp_thread_info *info = osp_env_info(env);
572 struct ost_id *oi = &info->osi_oi;
574 fid_to_ostid(fid, oi);
575 return ostid_id(oi) == IDIF_MAX_OID;
577 return fid_oid(fid) == LUSTRE_DATA_SEQ_MAX_WIDTH;
581 static inline int osp_precreate_end_seq_nolock(const struct lu_env *env,
582 struct osp_device *osp)
584 struct lu_fid *fid = &osp->opd_pre_last_created_fid;
586 return osp_fid_end_seq(env, fid);
589 static inline int osp_precreate_end_seq(const struct lu_env *env,
590 struct osp_device *osp)
594 spin_lock(&osp->opd_pre_lock);
595 rc = osp_precreate_end_seq_nolock(env, osp);
596 spin_unlock(&osp->opd_pre_lock);
600 static inline int osp_is_fid_client(struct osp_device *osp)
602 struct obd_import *imp = osp->opd_obd->u.cli.cl_import;
604 return imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_FID;
607 struct object_update *
608 update_buffer_get_update(struct object_update_request *request,
611 int osp_extend_update_buffer(const struct lu_env *env,
612 struct osp_update_request *our);
614 struct osp_update_request_sub *
615 osp_current_object_update_request(struct osp_update_request *our);
617 int osp_object_update_request_create(struct osp_update_request *our,
620 #define osp_update_rpc_pack(env, name, our, op, ...) \
622 struct object_update *object_update; \
623 size_t max_update_length; \
624 struct osp_update_request_sub *ours; \
628 ours = osp_current_object_update_request(our); \
629 LASSERT(ours != NULL); \
630 max_update_length = ours->ours_req_size - \
631 object_update_request_size(ours->ours_req); \
633 object_update = update_buffer_get_update(ours->ours_req,\
634 ours->ours_req->ourq_count); \
635 ret = out_##name##_pack(env, object_update, \
636 &max_update_length, \
638 if (ret == -E2BIG) { \
640 /* Create new object update request */ \
641 rc1 = osp_object_update_request_create(our, \
642 max_update_length + \
643 offsetof(struct object_update_request, \
644 ourq_updates[0]) + 1); \
652 ours->ours_req->ourq_count++; \
653 (our)->our_update_nr++; \
654 object_update->ou_batchid = \
656 object_update->ou_flags |= \
665 static inline bool osp_send_update_thread_running(struct osp_device *osp)
667 return osp->opd_update_thread.t_flags & SVC_RUNNING;
670 static inline bool osp_send_update_thread_stopped(struct osp_device *osp)
672 return osp->opd_update_thread.t_flags & SVC_STOPPED;
675 typedef int (*osp_update_interpreter_t)(const struct lu_env *env,
676 struct object_update_reply *rep,
677 struct ptlrpc_request *req,
678 struct osp_object *obj,
679 void *data, int index, int rc);
682 void osp_update_last_id(struct osp_device *d, u64 objid);
683 extern struct llog_operations osp_mds_ost_orig_logops;
686 int osp_insert_async_request(const struct lu_env *env, enum update_type op,
687 struct osp_object *obj, int count, __u16 *lens,
688 const void **bufs, void *data, __u32 repsize,
689 osp_update_interpreter_t interpreter);
691 int osp_unplug_async_request(const struct lu_env *env,
692 struct osp_device *osp,
693 struct osp_update_request *update);
694 int osp_trans_update_request_create(struct thandle *th);
695 struct thandle *osp_trans_create(const struct lu_env *env,
696 struct dt_device *d);
697 int osp_trans_start(const struct lu_env *env, struct dt_device *dt,
699 int osp_insert_update_callback(const struct lu_env *env,
700 struct osp_update_request *update,
701 struct osp_object *obj, void *data,
702 osp_update_interpreter_t interpreter);
704 struct osp_update_request *osp_update_request_create(struct dt_device *dt);
705 void osp_update_request_destroy(const struct lu_env *env,
706 struct osp_update_request *update);
708 int osp_send_update_thread(void *arg);
709 int osp_check_and_set_rpc_version(struct osp_thandle *oth,
710 struct osp_object *obj);
712 void osp_thandle_destroy(const struct lu_env *env, struct osp_thandle *oth);
713 static inline void osp_thandle_get(struct osp_thandle *oth)
715 atomic_inc(&oth->ot_refcount);
718 static inline void osp_thandle_put(const struct lu_env *env,
719 struct osp_thandle *oth)
721 if (atomic_dec_and_test(&oth->ot_refcount))
722 osp_thandle_destroy(env, oth);
725 int osp_prep_update_req(const struct lu_env *env, struct obd_import *imp,
726 struct osp_update_request *our,
727 struct ptlrpc_request **reqp);
728 int osp_remote_sync(const struct lu_env *env, struct osp_device *osp,
729 struct osp_update_request *update,
730 struct ptlrpc_request **reqp);
732 struct thandle *osp_get_storage_thandle(const struct lu_env *env,
734 struct osp_device *osp);
735 void osp_trans_callback(const struct lu_env *env,
736 struct osp_thandle *oth, int rc);
737 void osp_invalidate_request(struct osp_device *osp);
739 int osp_attr_get(const struct lu_env *env, struct dt_object *dt,
740 struct lu_attr *attr);
741 int osp_xattr_get(const struct lu_env *env, struct dt_object *dt,
742 struct lu_buf *buf, const char *name);
743 int osp_declare_xattr_set(const struct lu_env *env, struct dt_object *dt,
744 const struct lu_buf *buf, const char *name,
745 int flag, struct thandle *th);
746 int osp_xattr_set(const struct lu_env *env, struct dt_object *dt,
747 const struct lu_buf *buf, const char *name, int fl,
749 int osp_declare_xattr_del(const struct lu_env *env, struct dt_object *dt,
750 const char *name, struct thandle *th);
751 int osp_xattr_del(const struct lu_env *env, struct dt_object *dt,
752 const char *name, struct thandle *th);
753 int osp_invalidate(const struct lu_env *env, struct dt_object *dt);
754 void osp_obj_invalidate_cache(struct osp_object *obj);
756 int osp_trans_stop(const struct lu_env *env, struct dt_device *dt,
758 int osp_trans_cb_add(struct thandle *th, struct dt_txn_commit_cb *dcb);
760 struct dt_it *osp_it_init(const struct lu_env *env, struct dt_object *dt,
762 void osp_it_fini(const struct lu_env *env, struct dt_it *di);
763 int osp_it_get(const struct lu_env *env, struct dt_it *di,
764 const struct dt_key *key);
765 void osp_it_put(const struct lu_env *env, struct dt_it *di);
766 __u64 osp_it_store(const struct lu_env *env, const struct dt_it *di);
767 int osp_it_key_rec(const struct lu_env *env, const struct dt_it *di,
769 int osp_it_next_page(const struct lu_env *env, struct dt_it *di);
770 /* osp_md_object.c */
771 int osp_md_declare_object_create(const struct lu_env *env,
772 struct dt_object *dt,
773 struct lu_attr *attr,
774 struct dt_allocation_hint *hint,
775 struct dt_object_format *dof,
777 int osp_md_object_create(const struct lu_env *env, struct dt_object *dt,
778 struct lu_attr *attr, struct dt_allocation_hint *hint,
779 struct dt_object_format *dof, struct thandle *th);
780 int osp_md_declare_attr_set(const struct lu_env *env, struct dt_object *dt,
781 const struct lu_attr *attr, struct thandle *th);
782 int osp_md_attr_set(const struct lu_env *env, struct dt_object *dt,
783 const struct lu_attr *attr, struct thandle *th);
784 extern const struct dt_index_operations osp_md_index_ops;
786 /* osp_precreate.c */
787 int osp_init_precreate(struct osp_device *d);
788 int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d);
789 __u64 osp_precreate_get_id(struct osp_device *d);
790 int osp_precreate_get_fid(const struct lu_env *env, struct osp_device *d,
792 void osp_precreate_fini(struct osp_device *d);
793 int osp_object_truncate(const struct lu_env *env, struct dt_object *dt, __u64);
794 void osp_pre_update_status(struct osp_device *d, int rc);
795 void osp_statfs_need_now(struct osp_device *d);
796 int osp_reset_last_used(const struct lu_env *env, struct osp_device *osp);
797 int osp_write_last_oid_seq_files(struct lu_env *env, struct osp_device *osp,
798 struct lu_fid *fid, int sync);
799 int osp_init_pre_fid(struct osp_device *osp);
802 void osp_lprocfs_init(struct osp_device *osp);
805 int osp_sync_declare_add(const struct lu_env *env, struct osp_object *o,
806 llog_op_type type, struct thandle *th);
807 int osp_sync_add(const struct lu_env *env, struct osp_object *o,
808 llog_op_type type, struct thandle *th,
809 const struct lu_attr *attr);
810 int osp_sync_init(const struct lu_env *env, struct osp_device *d);
811 int osp_sync_fini(struct osp_device *d);
812 void __osp_sync_check_for_work(struct osp_device *d);
815 extern struct obd_ops lwp_obd_device_ops;
816 extern struct lu_device_type lwp_device_type;
818 static inline struct lu_device *osp2top(const struct osp_device *osp)
820 return osp->opd_dt_dev.dd_lu_dev.ld_site->ls_top_dev;
823 static inline void osp_set_req_replay(const struct osp_device *osp,
824 struct ptlrpc_request *req)
826 struct obd_device *obd = osp2top(osp)->ld_obd;
828 /* The RPC must be recovery related for the cases:
830 * 1. sent during recovery, or
831 * 2. sent before the recovery thread target_recovery_thread() start,
832 * such as triggered by lod_sub_recovery_thread(). */
833 if (obd->obd_recovering || (obd->obd_replayable && obd->obd_no_conn))
834 req->rq_allow_replay = 1;