Whamcloud - gitweb
cabc772c6046a637287ff58615fbd7e65f96afde
[fs/lustre-release.git] / lustre / osp / osp_precreate.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/osp/osp_precreate.c
32  *
33  * Lustre OST Proxy Device
34  *
35  * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
36  * Author: Mikhail Pershin <mike.pershin@intel.com>
37  * Author: Di Wang <di.wang@intel.com>
38  */
39
40 #define DEBUG_SUBSYSTEM S_MDS
41
42 #include <linux/kthread.h>
43
44 #include <lustre_obdo.h>
45
46 #include "osp_internal.h"
47
48 /*
49  * there are two specific states to take care about:
50  *
51  * = import is disconnected =
52  *
53  * = import is inactive =
54  *   in this case osp_declare_create() returns an error
55  *
56  */
57
58 /**
59  * Check whether statfs data is expired
60  *
61  * OSP device caches statfs data for the target, the function checks
62  * whether the data is expired or not.
63  *
64  * \param[in] d         OSP device
65  *
66  * \retval              0 - not expired, 1 - expired
67  */
68 static inline int osp_statfs_need_update(struct osp_device *d)
69 {
70         return !ktime_before(ktime_get(), d->opd_statfs_fresh_till);
71 }
72
73 /*
74  * OSP tries to maintain pool of available objects so that calls to create
75  * objects don't block most of time
76  *
77  * each time OSP gets connected to OST, we should start from precreation cleanup
78  */
79 static void osp_statfs_timer_cb(cfs_timer_cb_arg_t data)
80 {
81         struct osp_device *d = cfs_from_timer(d, data, opd_statfs_timer);
82
83         LASSERT(d);
84         /* invalidate statfs data so osp_precreate_thread() can refresh */
85         d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(), NSEC_PER_SEC);
86         if (d->opd_pre_task)
87                 wake_up(&d->opd_pre_waitq);
88 }
89
90 static void osp_pre_update_msfs(struct osp_device *d, struct obd_statfs *msfs);
91
92 /*
93  * The function updates current precreation status if broken, and
94  * updates that cached statfs state if functional, then wakes up waiters.
95  * We don't clear opd_pre_status directly here, but rather leave this
96  * to osp_pre_update_msfs() to do if everything is OK so that we don't
97  * have a race to clear opd_pre_status and then set it to -ENOSPC again.
98  *
99  * \param[in] d         OSP device
100  * \param[in] msfs      statfs data
101  * \param[in] rc        new precreate status for device \a d
102  */
103 static void osp_pre_update_status_msfs(struct osp_device *d,
104                                        struct obd_statfs *msfs, int rc)
105 {
106         CDEBUG(D_INFO, "%s: Updating status = %d\n", d->opd_obd->obd_name, rc);
107         if (rc)
108                 d->opd_pre_status = rc;
109         else
110                 osp_pre_update_msfs(d, msfs);
111
112         wake_up_all(&d->opd_pre_user_waitq);
113 }
114
115 /* Pass in the old statfs data in case the limits have changed */
116 void osp_pre_update_status(struct osp_device *d, int rc)
117 {
118         osp_pre_update_status_msfs(d, &d->opd_statfs, rc);
119 }
120
121
122 /**
123  * RPC interpret callback for OST_STATFS RPC
124  *
125  * An interpretation callback called by ptlrpc for OST_STATFS RPC when it is
126  * replied by the target. It's used to maintain statfs cache for the target.
127  * The function fills data from the reply if successful and schedules another
128  * update.
129  *
130  * \param[in] env       LU environment provided by the caller
131  * \param[in] req       RPC replied
132  * \param[in] aa        callback data
133  * \param[in] rc        RPC result
134  *
135  * \retval 0            on success
136  * \retval negative     negated errno on error
137  */
138 static int osp_statfs_interpret(const struct lu_env *env,
139                                 struct ptlrpc_request *req, void *args, int rc)
140 {
141         union ptlrpc_async_args *aa = args;
142         struct obd_import *imp = req->rq_import;
143         struct obd_statfs *msfs;
144         struct obd_statfs *sfs;
145         struct osp_device *d;
146         u64 maxage_ns;
147
148         ENTRY;
149
150         aa = ptlrpc_req_async_args(aa, req);
151         d = aa->pointer_arg[0];
152         LASSERT(d);
153
154         if (rc != 0)
155                 GOTO(out, rc);
156
157         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
158         if (msfs == NULL)
159                 GOTO(out, rc = -EPROTO);
160
161         if (d->opd_pre)
162                 osp_pre_update_status_msfs(d, msfs, 0);
163         else
164                 osp_pre_update_msfs(d, msfs);
165
166         /* schedule next update */
167         maxage_ns = d->opd_statfs_maxage * NSEC_PER_SEC;
168         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), maxage_ns);
169         mod_timer(&d->opd_statfs_timer,
170                   jiffies + cfs_time_seconds(d->opd_statfs_maxage));
171         d->opd_statfs_update_in_progress = 0;
172
173         sfs = &d->opd_statfs;
174         CDEBUG(D_CACHE,
175                "%s (%p): %llu blocks, %llu free, %llu avail, %u bsize, %u reserved mb low, %u reserved mb high, %u reserved ino low, %u reserved ino high, %llu files, %llu free files %#x\n",
176                d->opd_obd->obd_name, d, sfs->os_blocks, sfs->os_bfree,
177                sfs->os_bavail, sfs->os_bsize, d->opd_reserved_mb_low,
178                d->opd_reserved_mb_high, d->opd_reserved_ino_low,
179                d->opd_reserved_ino_high, sfs->os_files, sfs->os_ffree,
180                sfs->os_state);
181
182         RETURN(0);
183 out:
184         /* couldn't update statfs, try again with a small delay */
185         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), 10 * NSEC_PER_SEC);
186         d->opd_statfs_update_in_progress = 0;
187         if (d->opd_pre && d->opd_pre_task)
188                 wake_up(&d->opd_pre_waitq);
189
190         if (req->rq_import_generation == imp->imp_generation)
191                 CDEBUG(D_CACHE, "%s: couldn't update statfs: rc = %d\n",
192                        d->opd_obd->obd_name, rc);
193         RETURN(rc);
194 }
195
196 /**
197  * Send OST_STATFS RPC
198  *
199  * Sends OST_STATFS RPC to refresh cached statfs data for the target.
200  * Also disables scheduled updates as times OSP may need to refresh
201  * statfs data before expiration. The function doesn't block, instead
202  * an interpretation callback osp_statfs_interpret() is used.
203  *
204  * \param[in] d         OSP device
205  */
206 static int osp_statfs_update(const struct lu_env *env, struct osp_device *d)
207 {
208         u64 expire = obd_timeout * 1000 * NSEC_PER_SEC;
209         struct ptlrpc_request   *req;
210         struct obd_import       *imp;
211         union ptlrpc_async_args *aa;
212         int rc;
213
214         ENTRY;
215
216         CDEBUG(D_CACHE, "going to update statfs\n");
217
218         imp = d->opd_obd->u.cli.cl_import;
219         LASSERT(imp);
220
221         req = ptlrpc_request_alloc(imp,
222                            d->opd_pre ? &RQF_OST_STATFS : &RQF_MDS_STATFS);
223         if (req == NULL)
224                 RETURN(-ENOMEM);
225
226         rc = ptlrpc_request_pack(req,
227                          d->opd_pre ? LUSTRE_OST_VERSION : LUSTRE_MDS_VERSION,
228                          d->opd_pre ? OST_STATFS : MDS_STATFS);
229         if (rc) {
230                 ptlrpc_request_free(req);
231                 RETURN(rc);
232         }
233         ptlrpc_request_set_replen(req);
234         if (d->opd_pre)
235                 req->rq_request_portal = OST_CREATE_PORTAL;
236         ptlrpc_at_set_req_timeout(req);
237
238         req->rq_interpret_reply = osp_statfs_interpret;
239         aa = ptlrpc_req_async_args(aa, req);
240         aa->pointer_arg[0] = d;
241
242         /*
243          * no updates till reply
244          */
245         timer_delete(&d->opd_statfs_timer);
246         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), expire);
247         d->opd_statfs_update_in_progress = 1;
248
249         ptlrpcd_add_req(req);
250
251         /* we still want to sync changes if no new changes are coming */
252         if (ktime_before(ktime_get(), d->opd_sync_next_commit_cb))
253                 GOTO(out, rc);
254
255         if (atomic_read(&d->opd_sync_changes)) {
256                 struct thandle *th;
257
258                 th = dt_trans_create(env, d->opd_storage);
259                 if (IS_ERR(th)) {
260                         CERROR("%s: can't sync\n", d->opd_obd->obd_name);
261                         GOTO(out, rc);
262                 }
263                 rc = dt_trans_start_local(env, d->opd_storage, th);
264                 if (rc == 0) {
265                         CDEBUG(D_OTHER, "%s: sync forced, %d changes\n",
266                                d->opd_obd->obd_name,
267                                atomic_read(&d->opd_sync_changes));
268                         osp_sync_add_commit_cb_1s(env, d, th);
269                 }
270                 dt_trans_stop(env, d->opd_storage, th);
271         }
272
273 out:
274         RETURN(0);
275 }
276
277 /**
278  * Schedule an immediate update for statfs data
279  *
280  * If cached statfs data claim no free space, but OSP has got a request to
281  * destroy an object (so release some space probably), then we may need to
282  * refresh cached statfs data sooner than planned. The function checks there
283  * is no statfs update going and schedules immediate update if so.
284  * XXX: there might be a case where removed object(s) do not add free space (empty
285  * object). If the number of such deletions is high, then we can start to update
286  * statfs too often causing a RPC storm. some throttling is needed...
287  *
288  * \param[in] d         OSP device where statfs data needs to be refreshed
289  */
290 void osp_statfs_need_now(struct osp_device *d)
291 {
292         if (!d->opd_statfs_update_in_progress) {
293                 /*
294                  * if current status is -ENOSPC (lack of free space on OST)
295                  * then we should poll OST immediately once object destroy
296                  * is replied
297                  */
298                 d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(), NSEC_PER_SEC);
299                 timer_delete(&d->opd_statfs_timer);
300                 wake_up(&d->opd_pre_waitq);
301         }
302 }
303
304 /**
305  * Return number of precreated objects
306  *
307  * A simple helper to calculate the number of precreated objects on the device.
308  *
309  * \param[in] env       LU environment provided by the caller
310  * \param[in] osp       OSP device
311  *
312  * \retval              the number of the precreated objects
313  */
314 static inline int osp_objs_precreated(const struct lu_env *env,
315                                       struct osp_device *osp)
316 {
317         return osp_fid_diff(&osp->opd_pre_last_created_fid,
318                             &osp->opd_pre_used_fid);
319 }
320
321 /**
322  * Check pool of precreated objects is nearly empty
323  *
324  * We should not wait till the pool of the precreated objects is exhausted,
325  * because then there will be a long period of OSP being unavailable for the
326  * new creations due to lenghty precreate RPC. Instead we ask for another
327  * precreation ahead and hopefully have it ready before the current pool is
328  * empty. Notice this function relies on an external locking.
329  *
330  * \param[in] env       LU environment provided by the caller
331  * \param[in] d         OSP device
332  *
333  * \retval              0 - current pool is good enough, 1 - time to precreate
334  */
335 static inline int osp_precreate_near_empty_nolock(const struct lu_env *env,
336                                                   struct osp_device *d)
337 {
338         int window = osp_objs_precreated(env, d);
339
340         /* don't consider new precreation till OST is healty and
341          * has free space */
342         return ((window - d->opd_pre_reserved < d->opd_pre_create_count / 2 ||
343                  d->opd_force_creation) && (d->opd_pre_status == 0));
344 }
345
346 /**
347  * Check pool of precreated objects
348  *
349  * This is protected version of osp_precreate_near_empty_nolock(), check that
350  * for the details.
351  *
352  * \param[in] env       LU environment provided by the caller
353  * \param[in] d         OSP device
354  *
355  * \retval              0 - current pool is good enough, 1 - time to precreate
356  */
357 static inline int osp_precreate_near_empty(const struct lu_env *env,
358                                            struct osp_device *d)
359 {
360         int rc;
361
362         if (d->opd_pre == NULL)
363                 return 0;
364
365         /* XXX: do we really need locking here? */
366         spin_lock(&d->opd_pre_lock);
367         rc = osp_precreate_near_empty_nolock(env, d);
368         spin_unlock(&d->opd_pre_lock);
369         return rc;
370 }
371
372 /**
373  * Write FID into into last_oid/last_seq file
374  *
375  * The function stores the sequence and the in-sequence id into two dedicated
376  * files. The sync argument can be used to request synchronous commit, so the
377  * function won't return until the updates are committed.
378  *
379  * \param[in] env       LU environment provided by the caller
380  * \param[in] osp       OSP device
381  * \param[in] fid       fid where sequence/id is taken
382  * \param[in] sync      update mode: 0 - asynchronously, 1 - synchronously
383  *
384  * \retval 0            on success
385  * \retval negative     negated errno on error
386  **/
387 int osp_write_last_oid_seq_files(struct lu_env *env, struct osp_device *osp,
388                                  struct lu_fid *fid, int sync)
389 {
390         struct osp_thread_info  *oti = osp_env_info(env);
391         struct lu_buf      *lb_oid = &oti->osi_lb;
392         struct lu_buf      *lb_oseq = &oti->osi_lb2;
393         loff_t             oid_off;
394         u64                oid;
395         loff_t             oseq_off;
396         struct thandle    *th;
397         int                   rc;
398         ENTRY;
399
400         if (osp->opd_storage->dd_rdonly)
401                 RETURN(0);
402
403         /* Note: through f_oid is only 32 bits, it will also write 64 bits
404          * for oid to keep compatibility with the previous version. */
405         oid = fid->f_oid;
406         osp_objid_buf_prep(lb_oid, &oid_off,
407                            &oid, osp->opd_index);
408
409         osp_objseq_buf_prep(lb_oseq, &oseq_off,
410                             &fid->f_seq, osp->opd_index);
411
412         th = dt_trans_create(env, osp->opd_storage);
413         if (IS_ERR(th))
414                 RETURN(PTR_ERR(th));
415
416         th->th_sync |= sync;
417         rc = dt_declare_record_write(env, osp->opd_last_used_oid_file,
418                                      lb_oid, oid_off, th);
419         if (rc != 0)
420                 GOTO(out, rc);
421
422         rc = dt_declare_record_write(env, osp->opd_last_used_seq_file,
423                                      lb_oseq, oseq_off, th);
424         if (rc != 0)
425                 GOTO(out, rc);
426
427         rc = dt_trans_start_local(env, osp->opd_storage, th);
428         if (rc != 0)
429                 GOTO(out, rc);
430
431         rc = dt_record_write(env, osp->opd_last_used_oid_file, lb_oid,
432                              &oid_off, th);
433         if (rc != 0) {
434                 CERROR("%s: can not write to last seq file: rc = %d\n",
435                         osp->opd_obd->obd_name, rc);
436                 GOTO(out, rc);
437         }
438         rc = dt_record_write(env, osp->opd_last_used_seq_file, lb_oseq,
439                              &oseq_off, th);
440         if (rc) {
441                 CERROR("%s: can not write to last seq file: rc = %d\n",
442                         osp->opd_obd->obd_name, rc);
443                 GOTO(out, rc);
444         }
445 out:
446         dt_trans_stop(env, osp->opd_storage, th);
447         RETURN(rc);
448 }
449
450 static void osp_update_fldb_cache(const struct lu_env *env,
451                                   struct osp_device *osp,
452                                   struct lu_fid *fid)
453 {
454         struct lu_seq_range range = { 0 };
455         struct lu_server_fld *server_fld;
456         struct lu_site *site;
457
458         site = osp->opd_storage->dd_lu_dev.ld_site;
459         server_fld = lu_site2seq(site)->ss_server_fld;
460         if (!server_fld)
461                 return;
462
463         fld_range_set_type(&range, LU_SEQ_RANGE_ANY);
464         fld_server_lookup(env, server_fld, fid_seq(fid), &range);
465 }
466
467 /**
468  * Switch to another sequence
469  *
470  * When a current sequence has no available IDs left, OSP has to switch to
471  * another new sequence. OSP requests it using the regular FLDB protocol
472  * and stores synchronously before that is used in precreate. This is needed
473  * to basically have the sequences referenced (not orphaned), otherwise it's
474  * possible that OST has some objects precreated and the clients have data
475  * written to it, but after MDT failover nobody refers those objects and OSP
476  * has no idea that the sequence need cleanup to be done.
477  * While this is very expensive operation, it's supposed to happen infrequently
478  * because sequence has LUSTRE_DATA_SEQ_MAX_WIDTH=32M objects by default.
479  *
480  * \param[in] env       LU environment provided by the caller
481  * \param[in] osp       OSP device
482  *
483  * \retval 0            on success
484  * \retval negative     negated errno on error
485  */
486 static int osp_precreate_rollover_new_seq(struct lu_env *env,
487                                           struct osp_device *osp)
488 {
489         struct lu_fid   *fid = &osp_env_info(env)->osi_fid;
490         struct lu_fid   *last_fid = &osp->opd_last_used_fid;
491         int             rc;
492         ENTRY;
493
494         rc = seq_client_get_seq(env, osp->opd_obd->u.cli.cl_seq, &fid->f_seq);
495         if (rc != 0) {
496                 CERROR("%s: alloc fid error: rc = %d\n",
497                        osp->opd_obd->obd_name, rc);
498                 RETURN(rc);
499         }
500
501         fid->f_oid = 1;
502         fid->f_ver = 0;
503         LASSERTF(fid_seq(fid) != fid_seq(last_fid),
504                  "fid "DFID", last_fid "DFID"\n", PFID(fid),
505                  PFID(last_fid));
506
507         rc = osp_write_last_oid_seq_files(env, osp, fid, 1);
508         if (rc != 0) {
509                 CERROR("%s: Can not update oid/seq file: rc = %d\n",
510                        osp->opd_obd->obd_name, rc);
511                 RETURN(rc);
512         }
513
514         LCONSOLE(D_INFO, "%s: update sequence from %#llx to %#llx\n",
515                  osp->opd_obd->obd_name, fid_seq(last_fid),
516                  fid_seq(fid));
517         /* Update last_xxx to the new seq */
518         spin_lock(&osp->opd_pre_lock);
519         osp->opd_last_used_fid = *fid;
520         osp_fid_to_obdid(fid, &osp->opd_last_id);
521         osp->opd_gap_start_fid = *fid;
522         osp->opd_pre_used_fid = *fid;
523         osp->opd_pre_last_created_fid = *fid;
524         spin_unlock(&osp->opd_pre_lock);
525
526         if (!rc)
527                 osp_update_fldb_cache(env, osp, fid);
528
529         RETURN(rc);
530 }
531
532 /**
533  * Find IDs available in current sequence
534  *
535  * The function calculates the highest possible ID and the number of IDs
536  * available in the current sequence OSP is using. The number is limited
537  * artifically by the caller (grow param) and the number of IDs available
538  * in the sequence by nature. The function doesn't require an external
539  * locking.
540  *
541  * \param[in] env       LU environment provided by the caller
542  * \param[in] osp       OSP device
543  * \param[in] fid       FID the caller wants to start with
544  * \param[in] grow      how many the caller wants
545  * \param[out] fid      the highest calculated FID
546  * \param[out] grow     the number of available IDs calculated
547  *
548  * \retval              0 on success, 1 - the sequence is empty
549  */
550 static int osp_precreate_fids(const struct lu_env *env, struct osp_device *osp,
551                               struct lu_fid *fid, int *grow)
552 {
553         struct osp_thread_info *osi = osp_env_info(env);
554         __u64 seq_width = osp->opd_pre_seq_width;
555         __u64 end;
556         int i = 0;
557
558         if (fid_is_idif(fid)) {
559                 struct lu_fid   *last_fid;
560                 struct ost_id   *oi = &osi->osi_oi;
561                 int rc;
562
563                 spin_lock(&osp->opd_pre_lock);
564                 last_fid = &osp->opd_pre_last_created_fid;
565                 fid_to_ostid(last_fid, oi);
566                 end = min(ostid_id(oi) + *grow, min(IDIF_MAX_OID, seq_width));
567                 *grow = end - ostid_id(oi);
568                 rc = ostid_set_id(oi, ostid_id(oi) + *grow);
569                 spin_unlock(&osp->opd_pre_lock);
570
571                 if (*grow == 0 || rc)
572                         return 1;
573
574                 ostid_to_fid(fid, oi, osp->opd_index);
575                 return 0;
576         }
577
578         spin_lock(&osp->opd_pre_lock);
579         *fid = osp->opd_pre_last_created_fid;
580         end = fid->f_oid;
581         end = min((end + *grow), min(OBIF_MAX_OID, seq_width));
582         *grow = end - fid->f_oid;
583         fid->f_oid += end - fid->f_oid;
584         spin_unlock(&osp->opd_pre_lock);
585
586         CDEBUG(D_INFO, "Expect %d, actual %d ["DFID" -- "DFID"]\n",
587                *grow, i, PFID(fid), PFID(&osp->opd_pre_last_created_fid));
588
589         return *grow > 0 ? 0 : 1;
590 }
591
592 /**
593  * Prepare and send precreate RPC
594  *
595  * The function finds how many objects should be precreated.  Then allocates,
596  * prepares and schedules precreate RPC synchronously. Upon reply the function
597  * wakes up the threads waiting for the new objects on this target. If the
598  * target wasn't able to create all the objects requested, then the next
599  * precreate will be asking for fewer objects (i.e. slow precreate down).
600  *
601  * \param[in] env       LU environment provided by the caller
602  * \param[in] d         OSP device
603  *
604  * \retval 0            on success
605  * \retval negative     negated errno on error
606  **/
607 static int osp_precreate_send(const struct lu_env *env, struct osp_device *d)
608 {
609         struct osp_thread_info  *oti = osp_env_info(env);
610         struct ptlrpc_request   *req;
611         struct obd_import       *imp;
612         struct ost_body         *body;
613         int                      rc, grow, diff;
614         struct lu_fid           *fid = &oti->osi_fid;
615         ENTRY;
616
617         /* don't precreate new objects till OST healthy and has free space */
618         if (unlikely(d->opd_pre_status)) {
619                 CDEBUG(D_INFO, "%s: don't send new precreate: rc = %d\n",
620                        d->opd_obd->obd_name, d->opd_pre_status);
621                 RETURN(0);
622         }
623
624         /*
625          * if not connection/initialization is compeleted, ignore
626          */
627         imp = d->opd_obd->u.cli.cl_import;
628         LASSERT(imp);
629
630         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
631         if (req == NULL)
632                 RETURN(-ENOMEM);
633         req->rq_request_portal = OST_CREATE_PORTAL;
634
635         /* We should not resend create request - anyway we will have delorphan
636          * and kill these objects.
637          * Only needed for MDS+OSS rolling upgrade interop with 2.16+older.
638          */
639         if (unlikely(!imp_connect_replay_create(imp)))
640                 req->rq_no_delay = req->rq_no_resend = 1;
641
642         /* Delorphan happens only with a first MDT-OST connect. resend/replay
643          * handles objects creation on reconnects, no need to do delorhpan
644          * in this case.
645          */
646
647         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
648         if (rc) {
649                 ptlrpc_request_free(req);
650                 RETURN(rc);
651         }
652
653         spin_lock(&d->opd_pre_lock);
654         if (d->opd_force_creation)
655                 d->opd_pre_create_count = OST_MIN_PRECREATE;
656         else if (d->opd_pre_create_count > d->opd_pre_max_create_count / 2)
657                 d->opd_pre_create_count = d->opd_pre_max_create_count / 2;
658         grow = d->opd_pre_create_count;
659         spin_unlock(&d->opd_pre_lock);
660
661         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
662         LASSERT(body);
663
664         *fid = d->opd_pre_last_created_fid;
665         rc = osp_precreate_fids(env, d, fid, &grow);
666         if (rc == 1)
667                 /* Current seq has been used up*/
668                 GOTO(out_req, rc = -ENOSPC);
669
670         if (!osp_is_fid_client(d)) {
671                 /* Non-FID client will always send seq 0 because of
672                  * compatiblity */
673                 LASSERTF(fid_is_idif(fid), "Invalid fid "DFID"\n", PFID(fid));
674                 fid->f_seq = 0;
675         }
676
677         fid_to_ostid(fid, &body->oa.o_oi);
678         body->oa.o_valid = OBD_MD_FLGROUP;
679
680         ptlrpc_request_set_replen(req);
681
682         if (CFS_FAIL_CHECK(OBD_FAIL_OSP_FAKE_PRECREATE))
683                 GOTO(ready, rc = 0);
684
685         rc = ptlrpc_queue_wait(req);
686         if (rc) {
687                 CERROR("%s: can't precreate: rc = %d\n", d->opd_obd->obd_name,
688                        rc);
689                 if (req->rq_net_err)
690                         /* have osp_precreate_reserve() to wait for repeat */
691                         rc = -ENOTCONN;
692                 GOTO(out_req, rc);
693         }
694
695         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
696         if (body == NULL)
697                 GOTO(out_req, rc = -EPROTO);
698
699         ostid_to_fid(fid, &body->oa.o_oi, d->opd_index);
700
701 ready:
702         if (osp_fid_diff(fid, &d->opd_pre_used_fid) <= 0) {
703                 CERROR("%s: precreate fid "DFID" <= local used fid "DFID
704                        ": rc = %d\n", d->opd_obd->obd_name,
705                        PFID(fid), PFID(&d->opd_pre_used_fid), -ESTALE);
706                 GOTO(out_req, rc = -ESTALE);
707         }
708
709         diff = osp_fid_diff(fid, &d->opd_pre_last_created_fid);
710
711         spin_lock(&d->opd_pre_lock);
712         if (diff < grow) {
713                 /* the OST has not managed to create all the
714                  * objects we asked for */
715                 d->opd_pre_create_count = max(diff, OST_MIN_PRECREATE);
716                 d->opd_pre_create_slow = 1;
717         } else {
718                 /* the OST is able to keep up with the work,
719                  * we could consider increasing create_count
720                  * next time if needed */
721                 d->opd_pre_create_slow = 0;
722         }
723
724         if ((body->oa.o_valid & OBD_MD_FLSIZE) && body->oa.o_size)
725                 d->opd_pre_seq_width = body->oa.o_size;
726
727         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
728         fid_to_ostid(fid, &body->oa.o_oi);
729
730         d->opd_pre_last_created_fid = *fid;
731         d->opd_force_creation = false;
732         spin_unlock(&d->opd_pre_lock);
733
734         CDEBUG(D_HA, "%s: current precreated pool: "DFID"-"DFID"\n",
735                d->opd_obd->obd_name, PFID(&d->opd_pre_used_fid),
736                PFID(&d->opd_pre_last_created_fid));
737 out_req:
738         /* now we can wakeup all users awaiting for objects */
739         osp_pre_update_status(d, rc);
740
741         ptlrpc_req_finished(req);
742
743         if (!rc)
744                 osp_update_fldb_cache(env, d, fid);
745
746         RETURN(rc);
747 }
748
749 /**
750  * Get last precreated object from target (OST)
751  *
752  * Sends synchronous RPC to the target (OST) to learn the last precreated
753  * object. This later is used to remove all unused objects (cleanup orphan
754  * procedure). Also, the next object after one we got will be used as a
755  * starting point for the new precreates.
756  *
757  * \param[in] env       LU environment provided by the caller
758  * \param[in] d         OSP device
759  * \param[in] update    update or not update last used fid
760  *
761  * \retval 0            on success
762  * \retval negative     negated errno on error
763  **/
764 static int osp_get_lastfid_from_ost(const struct lu_env *env,
765                                     struct osp_device *d, bool update)
766 {
767         struct ptlrpc_request   *req = NULL;
768         struct obd_import       *imp;
769         struct lu_fid           *last_fid;
770         char                    *tmp;
771         int                     rc;
772         ENTRY;
773
774         imp = d->opd_obd->u.cli.cl_import;
775         LASSERT(imp);
776
777         req = ptlrpc_request_alloc(imp, &RQF_OST_GET_INFO_LAST_FID);
778         if (req == NULL)
779                 RETURN(-ENOMEM);
780
781         req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY, RCL_CLIENT,
782                              sizeof(KEY_LAST_FID));
783
784         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
785         if (rc) {
786                 ptlrpc_request_free(req);
787                 RETURN(rc);
788         }
789
790         tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_KEY);
791         memcpy(tmp, KEY_LAST_FID, sizeof(KEY_LAST_FID));
792
793         req->rq_no_delay = req->rq_no_resend = 1;
794         last_fid = req_capsule_client_get(&req->rq_pill, &RMF_FID);
795         fid_cpu_to_le(last_fid, &d->opd_last_used_fid);
796
797         ptlrpc_request_set_replen(req);
798
799         rc = ptlrpc_queue_wait(req);
800         if (rc) {
801                 /* -EFAULT means reading LAST_FID failed (see ofd_get_info_hld),
802                  * let sysadm sort this * out.
803                  */
804                 if (rc == -EFAULT)
805                         ptlrpc_set_import_active(imp, 0);
806                 GOTO(out, rc);
807         }
808
809         last_fid = req_capsule_server_get(&req->rq_pill, &RMF_FID);
810         if (last_fid == NULL) {
811                 CERROR("%s: Got last_fid failed.\n", d->opd_obd->obd_name);
812                 GOTO(out, rc = -EPROTO);
813         }
814
815         if (!fid_is_sane(last_fid)) {
816                 CERROR("%s: Got insane last_fid "DFID"\n",
817                        d->opd_obd->obd_name, PFID(last_fid));
818                 GOTO(out, rc = -EPROTO);
819         }
820
821         /* Only update the last used fid, if the OST has objects for
822          * this sequence, i.e. fid_oid > 0 */
823         if (fid_oid(last_fid) > 0 && update)
824                 d->opd_last_used_fid = *last_fid;
825
826         if (fid_seq(last_fid) == fid_seq(&d->opd_last_used_fid)) {
827                 if (fid_oid(last_fid) == 0 ||
828                     (fid_seq_is_norm(fid_seq(last_fid)) &&
829                      fid_oid(last_fid) == LUSTRE_FID_INIT_OID)) {
830                         /* reformatted OST, it requires creation request
831                          * to recreate objects
832                          */
833                         spin_lock(&d->opd_pre_lock);
834                         d->opd_force_creation = true;
835                         spin_unlock(&d->opd_pre_lock);
836                 }
837         }
838         CDEBUG(D_HA, "%s: Got last_fid "DFID"\n", d->opd_obd->obd_name,
839                PFID(last_fid));
840
841 out:
842         ptlrpc_req_finished(req);
843         RETURN(rc);
844 }
845
846 /**
847  * Cleanup orphans on OST
848  *
849  * This function is called in a contex of a dedicated thread handling
850  * all the precreation suff. The function waits till local recovery
851  * is complete, then identify all the unreferenced objects (orphans)
852  * using the highest ID referenced by a local and the highest object
853  * precreated by the target. The found range is a subject to removal
854  * using specially flagged RPC. During this process OSP is marked
855  * unavailable for new objects.
856  *
857  * \param[in] env       LU environment provided by the caller
858  * \param[in] d         OSP device
859  *
860  * \retval 0            on success
861  * \retval negative     negated errno on error
862  */
863 static int osp_precreate_cleanup_orphans(struct lu_env *env,
864                                          struct osp_device *d)
865 {
866         struct osp_thread_info  *osi = osp_env_info(env);
867         struct lu_fid           *last_fid = &osi->osi_fid;
868         struct ptlrpc_request   *req = NULL;
869         struct obd_import       *imp = d->opd_obd->u.cli.cl_import;
870         struct ost_body         *body;
871         int                      update_status = 0;
872         int                      rc;
873         int                      diff;
874
875         ENTRY;
876
877         /*
878          * Do cleanup orphans only with a first connection, after that
879          * all precreate requests uses resend/replay flags to support OST
880          * failover/reconnect.
881          */
882         if (d->opd_cleanup_orphans_done && imp_connect_replay_create(imp)) {
883                 rc = osp_get_lastfid_from_ost(env, d, false);
884                 RETURN(0);
885         }
886         /*
887          * wait for local recovery to finish, so we can cleanup orphans
888          * orphans are all objects since "last used" (assigned), but
889          * there might be objects reserved and in some cases they won't
890          * be used. we can't cleanup them till we're sure they won't be
891          * used. also can't we allow new reservations because they may
892          * end up getting orphans being cleaned up below. so we block
893          * new reservations and wait till all reserved objects either
894          * user or released.
895          */
896         spin_lock(&d->opd_pre_lock);
897         d->opd_pre_recovering = 1;
898         spin_unlock(&d->opd_pre_lock);
899         /*
900          * The locking above makes sure the opd_pre_reserved check below will
901          * catch all osp_precreate_reserve() calls who find
902          * "!opd_pre_recovering".
903          */
904         wait_event_idle(d->opd_pre_waitq,
905                         (!d->opd_pre_reserved && d->opd_recovery_completed) ||
906                         !d->opd_pre_task || d->opd_got_disconnected);
907         if (!d->opd_pre_task || d->opd_got_disconnected)
908                 GOTO(out, rc = -EAGAIN);
909
910         CDEBUG(D_HA, "%s: going to cleanup orphans since "DFID"\n",
911                d->opd_obd->obd_name, PFID(&d->opd_last_used_fid));
912
913         CFS_FAIL_TIMEOUT(OBD_FAIL_MDS_DELAY_DELORPHAN, cfs_fail_val);
914
915         *last_fid = d->opd_last_used_fid;
916         /* The OSP should already get the valid seq now */
917         LASSERT(!fid_is_zero(last_fid));
918         if (fid_oid(&d->opd_last_used_fid) < 2) {
919                 /* lastfid looks strange... ask OST */
920                 rc = osp_get_lastfid_from_ost(env, d, true);
921                 if (rc)
922                         GOTO(out, rc);
923         }
924
925         imp = d->opd_obd->u.cli.cl_import;
926         LASSERT(imp);
927
928         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
929         if (req == NULL)
930                 GOTO(out, rc = -ENOMEM);
931
932         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
933         if (rc) {
934                 ptlrpc_request_free(req);
935                 req = NULL;
936                 GOTO(out, rc);
937         }
938
939         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
940         if (body == NULL)
941                 GOTO(out, rc = -EPROTO);
942
943         body->oa.o_flags = OBD_FL_DELORPHAN;
944         body->oa.o_valid = OBD_MD_FLFLAGS | OBD_MD_FLGROUP;
945
946         fid_to_ostid(&d->opd_last_used_fid, &body->oa.o_oi);
947
948         ptlrpc_request_set_replen(req);
949
950         /* Don't resend the delorphan req */
951         req->rq_no_resend = req->rq_no_delay = 1;
952
953         rc = ptlrpc_queue_wait(req);
954         if (rc) {
955                 update_status = 1;
956                 GOTO(out, rc);
957         }
958
959         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
960         if (body == NULL)
961                 GOTO(out, rc = -EPROTO);
962
963         /*
964          * OST provides us with id new pool starts from in body->oa.o_id
965          */
966         ostid_to_fid(last_fid, &body->oa.o_oi, d->opd_index);
967
968         spin_lock(&d->opd_pre_lock);
969         diff = osp_fid_diff(&d->opd_last_used_fid, last_fid);
970         if (diff > 0) {
971                 d->opd_pre_create_count = OST_MIN_PRECREATE + diff;
972                 d->opd_pre_last_created_fid = d->opd_last_used_fid;
973         } else {
974                 d->opd_pre_create_count = OST_MIN_PRECREATE;
975                 d->opd_pre_last_created_fid = *last_fid;
976         }
977         /*
978          * This empties the pre-creation pool and effectively blocks any new
979          * reservations.
980          */
981         LASSERTF(fid_oid(&d->opd_pre_last_created_fid) <= IDIF_MAX_OID,
982                  "%s: last_created_fid "DFID" > %llu\n",
983                  d->opd_obd->obd_name, PFID(&d->opd_pre_last_created_fid),
984                  IDIF_MAX_OID);
985         d->opd_pre_used_fid = d->opd_pre_last_created_fid;
986         d->opd_pre_create_slow = 0;
987         if ((body->oa.o_valid & OBD_MD_FLSIZE) && body->oa.o_size)
988                 d->opd_pre_seq_width = body->oa.o_size;
989         spin_unlock(&d->opd_pre_lock);
990
991         CDEBUG(D_HA, "%s: Got last_id "DFID" from OST, last_created "DFID
992                "last_used is "DFID"\n", d->opd_obd->obd_name, PFID(last_fid),
993                PFID(&d->opd_pre_last_created_fid), PFID(&d->opd_last_used_fid));
994 out:
995         if (req)
996                 ptlrpc_req_finished(req);
997
998
999         /*
1000          * If rc is zero, the pre-creation window should have been emptied.
1001          * Since waking up the herd would be useless without pre-created
1002          * objects, we defer the signal to osp_precreate_send() in that case.
1003          */
1004         if (rc != 0) {
1005                 if (update_status) {
1006                         CERROR("%s: cannot cleanup orphans: rc = %d\n",
1007                                d->opd_obd->obd_name, rc);
1008                         /* we can't proceed from here, OST seem to
1009                          * be in a bad shape, better to wait for
1010                          * a new instance of the server and repeat
1011                          * from the beginning. notify possible waiters
1012                          * this OSP isn't quite functional yet */
1013                         osp_pre_update_status(d, rc);
1014                 } else {
1015                         wake_up_all(&d->opd_pre_user_waitq);
1016                 }
1017         } else {
1018                 spin_lock(&d->opd_pre_lock);
1019                 d->opd_pre_recovering = 0;
1020                 spin_unlock(&d->opd_pre_lock);
1021                 d->opd_cleanup_orphans_done = true;
1022         }
1023
1024         RETURN(rc);
1025 }
1026
1027 /**
1028  * Update precreate status using statfs data
1029  *
1030  * The function decides whether this OSP should be used for new objects.
1031  * IOW, whether this OST is used up or has some free space. Cached statfs
1032  * data is used to make this decision. If the latest result of statfs
1033  * request (rc argument) is not success, then just mark OSP unavailable
1034  * right away.
1035  *
1036  * The new statfs data is passed in \a msfs and needs to be stored into
1037  * opd_statfs, but only after the various flags in os_state are set, so
1038  * that the new statfs data is not visible without appropriate flags set.
1039  * As such, there is no need to clear the flags here, since this is called
1040  * with new statfs data, and they should not be cleared if sent from OST.
1041  *
1042  * Add a bit of hysteresis so this flag isn't continually flapping, and
1043  * ensure that new files don't get extremely fragmented due to only a
1044  * small amount of available space in the filesystem.  We want to set
1045  * the ENOSPC/ENOINO flags unconditionally when there is less than the
1046  * reserved size free, and still copy them from the old state when there
1047  * is less than 2*reserved size free space or inodes.
1048  *
1049  * \param[in] d         OSP device
1050  * \param[in] msfs      statfs data
1051  */
1052 static void osp_pre_update_msfs(struct osp_device *d, struct obd_statfs *msfs)
1053 {
1054         u32 old_state = d->opd_statfs.os_state;
1055         u64 available_mb;
1056
1057         /* statfs structure not initialized yet */
1058         if (unlikely(!msfs->os_type))
1059                 return;
1060
1061         /* if the low and high watermarks have not been initialized yet */
1062         if (unlikely(d->opd_reserved_mb_high == 0 &&
1063                      d->opd_reserved_mb_low == 0)) {
1064                 /* Use ~0.1% by default to disable object allocation,
1065                  * and ~0.2% to enable, size in MB, set both watermark
1066                  */
1067                 spin_lock(&d->opd_pre_lock);
1068                 if (d->opd_reserved_mb_high == 0 &&
1069                     d->opd_reserved_mb_low == 0) {
1070                         d->opd_reserved_mb_low = ((msfs->os_bsize >> 10) *
1071                                                   msfs->os_blocks) >> 20;
1072                         if (d->opd_reserved_mb_low < 1)
1073                                 d->opd_reserved_mb_low = 1;
1074                         d->opd_reserved_mb_high =
1075                                 (d->opd_reserved_mb_low << 1) + 1;
1076                 }
1077                 spin_unlock(&d->opd_pre_lock);
1078         }
1079
1080         if (unlikely(d->opd_reserved_ino_high == 0 &&
1081                      d->opd_reserved_ino_low == 0)) {
1082                 /* Use ~0.0001% by default to disallow distributed transactions,
1083                  * and ~0.0002% to allow, set both watermark
1084                  */
1085                 spin_lock(&d->opd_pre_lock);
1086                 if (d->opd_reserved_ino_high == 0 &&
1087                     d->opd_reserved_ino_low == 0) {
1088                         d->opd_reserved_ino_low = msfs->os_ffree >> 20;
1089                         if (d->opd_reserved_ino_low < 32)
1090                                 d->opd_reserved_ino_low = 32;
1091                         d->opd_reserved_ino_high =
1092                                 (d->opd_reserved_ino_low << 1) + 1;
1093                 }
1094                 spin_unlock(&d->opd_pre_lock);
1095         }
1096
1097         available_mb = (msfs->os_bavail * (msfs->os_bsize >> 10)) >> 10;
1098         if (msfs->os_ffree < d->opd_reserved_ino_low)
1099                 msfs->os_state |= OS_STATFS_ENOINO;
1100         else if (msfs->os_ffree <= d->opd_reserved_ino_high)
1101                 msfs->os_state |= old_state & OS_STATFS_ENOINO;
1102         /* else don't clear flags in new msfs->os_state sent from OST */
1103
1104         if (available_mb < d->opd_reserved_mb_low)
1105                 msfs->os_state |= OS_STATFS_ENOSPC;
1106         else if (available_mb <= d->opd_reserved_mb_high)
1107                 msfs->os_state |= old_state & OS_STATFS_ENOSPC;
1108         /* else don't clear flags in new msfs->os_state sent from OST */
1109
1110         CDEBUG(D_INFO,
1111                "%s: blocks=%llu free=%llu avail=%llu avail_mb=%llu hwm_mb=%u files=%llu ffree=%llu state=%x: rc = %d\n",
1112                d->opd_obd->obd_name, msfs->os_blocks, msfs->os_bfree,
1113                msfs->os_bavail, available_mb, d->opd_reserved_mb_high,
1114                msfs->os_files, msfs->os_ffree, msfs->os_state,
1115                d->opd_pre ? d->opd_pre_status : 0);
1116
1117         if (!d->opd_pre)
1118                 goto update;
1119
1120         if (msfs->os_state & (OS_STATFS_ENOINO | OS_STATFS_ENOSPC)) {
1121                 d->opd_pre_status = -ENOSPC;
1122                 if (!(old_state & (OS_STATFS_ENOINO | OS_STATFS_ENOSPC)))
1123                         CDEBUG(D_INFO, "%s: full: state=%x: rc = %x\n",
1124                                d->opd_obd->obd_name, msfs->os_state,
1125                                d->opd_pre_status);
1126                 CDEBUG(D_INFO, "uncommitted changes=%u in_progress=%u\n",
1127                        atomic_read(&d->opd_sync_changes),
1128                        atomic_read(&d->opd_sync_rpcs_in_progress));
1129         } else if (old_state & (OS_STATFS_ENOINO | OS_STATFS_ENOSPC)) {
1130                 d->opd_pre_status = 0;
1131                 spin_lock(&d->opd_pre_lock);
1132                 d->opd_pre_create_slow = 0;
1133                 d->opd_pre_create_count = OST_MIN_PRECREATE;
1134                 spin_unlock(&d->opd_pre_lock);
1135                 wake_up(&d->opd_pre_waitq);
1136
1137                 CDEBUG(D_INFO,
1138                        "%s: available: state=%x: rc = %d\n",
1139                        d->opd_obd->obd_name, msfs->os_state,
1140                        d->opd_pre_status);
1141         } else {
1142                 /* we only get here if rc == 0 in the caller */
1143                 d->opd_pre_status = 0;
1144         }
1145
1146         /* Object precreation skipped on OST if manually disabled */
1147         if (d->opd_pre_max_create_count == 0)
1148                 msfs->os_state |= OS_STATFS_NOPRECREATE;
1149         /* else don't clear flags in new msfs->os_state sent from OST */
1150
1151 update:
1152         /* copy only new statfs state to make it visible to MDS threads */
1153         if (&d->opd_statfs != msfs)
1154                 d->opd_statfs = *msfs;
1155 }
1156
1157 /**
1158  * Initialize FID for precreation
1159  *
1160  * For a just created new target, a new sequence should be taken.
1161  * The function checks there is no IDIF in use (if the target was
1162  * added with the older version of Lustre), then requests a new
1163  * sequence from FLDB using the regular protocol. Then this new
1164  * sequence is stored on a persisten storage synchronously to prevent
1165  * possible object leakage (for the detail see the description for
1166  * osp_precreate_rollover_new_seq()).
1167  *
1168  * \param[in] osp       OSP device
1169  *
1170  * \retval 0            on success
1171  * \retval negative     negated errno on error
1172  */
1173 int osp_init_pre_fid(struct lu_env *env, struct osp_device *osp)
1174 {
1175         struct osp_thread_info  *osi;
1176         struct lu_client_seq    *cli_seq;
1177         struct lu_fid           *last_fid;
1178         int                     rc;
1179         ENTRY;
1180
1181         LASSERT(osp->opd_pre != NULL);
1182
1183         /* Let's check if the current last_seq/fid is valid,
1184          * otherwise request new sequence from the controller */
1185         if (osp_is_fid_client(osp) && osp->opd_group != 0) {
1186                 /* Non-MDT0 can only use normal sequence for
1187                  * OST objects */
1188                 if (fid_is_norm(&osp->opd_last_used_fid))
1189                         RETURN(0);
1190         } else {
1191                 /* Initially MDT0 will start with IDIF, after
1192                  * that it will request new sequence from the
1193                  * controller */
1194                 if (fid_is_idif(&osp->opd_last_used_fid) ||
1195                     fid_is_norm(&osp->opd_last_used_fid))
1196                         RETURN(0);
1197         }
1198
1199         if (!fid_is_zero(&osp->opd_last_used_fid))
1200                 CWARN("%s: invalid last used fid "DFID
1201                       ", try to get new sequence.\n",
1202                       osp->opd_obd->obd_name,
1203                       PFID(&osp->opd_last_used_fid));
1204
1205         osi = osp_env_info(env);
1206         last_fid = &osi->osi_fid;
1207         fid_zero(last_fid);
1208         /* For a freshed fs, it will allocate a new sequence first */
1209         if (osp_is_fid_client(osp) && osp->opd_group != 0) {
1210                 cli_seq = osp->opd_obd->u.cli.cl_seq;
1211                 rc = seq_client_get_seq(env, cli_seq, &last_fid->f_seq);
1212                 if (rc != 0) {
1213                         CERROR("%s: alloc fid error: rc = %d\n",
1214                                osp->opd_obd->obd_name, rc);
1215                         GOTO(out, rc);
1216                 }
1217         } else {
1218                 last_fid->f_seq = fid_idif_seq(0, osp->opd_index);
1219         }
1220         last_fid->f_oid = 1;
1221         last_fid->f_ver = 0;
1222
1223         spin_lock(&osp->opd_pre_lock);
1224         osp->opd_last_used_fid = *last_fid;
1225         osp->opd_pre_used_fid = *last_fid;
1226         osp->opd_pre_last_created_fid = *last_fid;
1227         spin_unlock(&osp->opd_pre_lock);
1228         rc = osp_write_last_oid_seq_files(env, osp, last_fid, 1);
1229         if (rc != 0) {
1230                 CERROR("%s: write fid error: rc = %d\n",
1231                        osp->opd_obd->obd_name, rc);
1232                 GOTO(out, rc);
1233         }
1234 out:
1235         RETURN(rc);
1236 }
1237
1238 struct opt_args {
1239         struct osp_device       *opta_dev;
1240         struct lu_env           opta_env;
1241         struct completion       *opta_started;
1242 };
1243 /**
1244  * The core of precreate functionality
1245  *
1246  * The function implements the main precreation loop. Basically it
1247  * involves connecting to the target, precerate FID initialization,
1248  * identifying and removing orphans, then serving precreation. As
1249  * part of the latter, the thread is responsible for statfs data
1250  * updates. The precreation is mostly driven by another threads
1251  * asking for new OST objects - those askers wake the thread when
1252  * the number of precreated objects reach low watermark.
1253  * After a disconnect, the sequence above repeats. This is keep going
1254  * until the thread is requested to stop.
1255  *
1256  * \param[in] _arg      private data the thread (OSP device to handle)
1257  *
1258  * \retval 0            on success
1259  * \retval negative     negated errno on error
1260  */
1261 static int osp_precreate_thread(void *_args)
1262 {
1263         struct opt_args         *args = _args;
1264         struct osp_device       *d = args->opta_dev;
1265         struct lu_env           *env = &args->opta_env;
1266         int                      rc;
1267
1268         ENTRY;
1269
1270         complete(args->opta_started);
1271
1272         /* wait for connection from the layers above */
1273         wait_event_idle(d->opd_pre_waitq,
1274                         kthread_should_stop() ||
1275                         d->opd_obd->u.cli.cl_seq->lcs_exp != NULL);
1276
1277         while (!kthread_should_stop()) {
1278                 /*
1279                  * need to be connected to OST
1280                  */
1281                 while (!kthread_should_stop()) {
1282                         if ((d->opd_pre == NULL || d->opd_pre_recovering) &&
1283                             d->opd_imp_connected &&
1284                             !d->opd_got_disconnected)
1285                                 break;
1286                         wait_event_idle(d->opd_pre_waitq,
1287                                         kthread_should_stop() ||
1288                                         d->opd_new_connection);
1289
1290                         if (!d->opd_new_connection)
1291                                 continue;
1292
1293                         CFS_FAIL_TIMEOUT(OBD_FAIL_OSP_CON_EVENT_DELAY,
1294                                          cfs_fail_val);
1295                         d->opd_new_connection = 0;
1296                         d->opd_got_disconnected = 0;
1297                         break;
1298                 }
1299
1300                 if (kthread_should_stop())
1301                         break;
1302
1303                 if (d->opd_pre) {
1304                         LASSERT(d->opd_obd->u.cli.cl_seq != NULL);
1305                         LASSERT(d->opd_obd->u.cli.cl_seq->lcs_exp != NULL);
1306
1307                         /* Init fid for osp_precreate if necessary */
1308                         rc = osp_init_pre_fid(env, d);
1309                         if (rc != 0) {
1310                                 class_export_put(d->opd_exp);
1311                                 d->opd_obd->u.cli.cl_seq->lcs_exp = NULL;
1312                                 CERROR("%s: init pre fid error: rc = %d\n",
1313                                                 d->opd_obd->obd_name, rc);
1314                                 continue;
1315                         }
1316                 }
1317
1318                 if (osp_statfs_update(env, d)) {
1319                         if (wait_event_idle_timeout(d->opd_pre_waitq,
1320                                                     kthread_should_stop(),
1321                                                     cfs_time_seconds(5)) == 0)
1322                                 l_wait_event_abortable(
1323                                         d->opd_pre_waitq,
1324                                         kthread_should_stop());
1325                         continue;
1326                 }
1327
1328                 if (d->opd_pre) {
1329                         /*
1330                          * Clean up orphans or recreate missing objects.
1331                          */
1332                         rc = osp_precreate_cleanup_orphans(env, d);
1333                         if (rc != 0) {
1334                                 schedule_timeout_interruptible(cfs_time_seconds(1));
1335                                 continue;
1336                         }
1337                 }
1338
1339                 /*
1340                  * connected, can handle precreates now
1341                  */
1342                 while (!kthread_should_stop()) {
1343                         wait_event_idle(d->opd_pre_waitq,
1344                                         kthread_should_stop() ||
1345                                         (osp_precreate_near_empty(env, d) &&
1346                                          !(osp_precreate_end_seq(env, d) &&
1347                                            osp_objs_precreated(env, d) != 0)) ||
1348                                         osp_statfs_need_update(d) ||
1349                                         d->opd_got_disconnected);
1350
1351                         if (kthread_should_stop())
1352                                 break;
1353
1354                         /* something happened to the connection
1355                          * have to start from the beginning */
1356                         if (d->opd_got_disconnected)
1357                                 break;
1358
1359                         if (osp_statfs_need_update(d))
1360                                 if (osp_statfs_update(env, d))
1361                                         break;
1362
1363                         if (d->opd_pre == NULL)
1364                                 continue;
1365
1366                         /* To avoid handling different seq in precreate/orphan
1367                          * cleanup, it will hold precreate until current seq is
1368                          * used up. */
1369                         if (unlikely(osp_precreate_end_seq(env, d))) {
1370                                 if (osp_objs_precreated(env, d) == 0) {
1371                                         rc = osp_precreate_rollover_new_seq(env, d);
1372                                         if (rc)
1373                                                 continue;
1374                                 } else {
1375                                         continue;
1376                                 }
1377                         }
1378
1379                         if (osp_precreate_near_empty(env, d)) {
1380                                 rc = osp_precreate_send(env, d);
1381                                 /* osp_precreate_send() sets opd_pre_status
1382                                  * in case of error, that prevent the using of
1383                                  * failed device. */
1384                                 if (rc < 0 && rc != -ENOSPC &&
1385                                     rc != -ETIMEDOUT && rc != -ENOTCONN)
1386                                         CERROR("%s: cannot precreate objects:"
1387                                                " rc = %d\n",
1388                                                d->opd_obd->obd_name, rc);
1389                         }
1390                 }
1391         }
1392
1393         lu_env_fini(env);
1394         OBD_FREE_PTR(args);
1395
1396         RETURN(0);
1397 }
1398
1399 /**
1400  * Check when to stop to wait for precreate objects.
1401  *
1402  * The caller wanting a new OST object can't wait undefinitely. The
1403  * function checks for few conditions including available new OST
1404  * objects, disconnected OST, lack of space with no pending destroys,
1405  * etc. IOW, it checks whether the current OSP state is good to keep
1406  * waiting or it's better to give up.
1407  *
1408  * \param[in] env       LU environment provided by the caller
1409  * \param[in] d         OSP device
1410  *
1411  * \retval              0 - keep waiting, 1 - no luck
1412  */
1413 static int osp_precreate_ready_condition(const struct lu_env *env,
1414                                          struct osp_device *d)
1415 {
1416         /* Bail out I/O fails to OST */
1417         if (d->opd_pre_status != 0 &&
1418             d->opd_pre_status != -EAGAIN &&
1419             d->opd_pre_status != -ENODEV &&
1420             d->opd_pre_status != -ENOTCONN &&
1421             d->opd_pre_status != -ENOSPC) {
1422                 /* DEBUG LU-3230 */
1423                 if (d->opd_pre_status != -EIO)
1424                         CERROR("%s: precreate failed opd_pre_status %d\n",
1425                                d->opd_obd->obd_name, d->opd_pre_status);
1426                 return 1;
1427         }
1428
1429         if (d->opd_pre_recovering || d->opd_force_creation)
1430                 return 0;
1431
1432         /* ready if got enough precreated objects */
1433         /* we need to wait for others (opd_pre_reserved) and our object (+1) */
1434         if (d->opd_pre_reserved + 1 < osp_objs_precreated(env, d))
1435                 return 1;
1436
1437         /* ready if OST reported no space and no destroys in progress */
1438         if (atomic_read(&d->opd_sync_changes) +
1439             atomic_read(&d->opd_sync_rpcs_in_progress) == 0 &&
1440             d->opd_pre_status == -ENOSPC)
1441                 return 1;
1442
1443         return 0;
1444 }
1445
1446 /**
1447  * Reserve object in precreate pool
1448  *
1449  * When the caller wants to create a new object on this target (target
1450  * represented by the given OSP), it should declare this intention using
1451  * a regular ->dt_declare_create() OSD API method. Then OSP will be trying
1452  * to reserve an object in the existing precreated pool or wait up to
1453  * obd_timeout for the available object to appear in the pool (a dedicated
1454  * thread will be doing real precreation in background). The object can be
1455  * consumed later with osp_precreate_get_fid() or be released with call to
1456  * lu_object_put(). Notice the function doesn't reserve a specific ID, just
1457  * some ID. The actual ID assignment happen in osp_precreate_get_fid().
1458  * If the space on the target is short and there is a pending object destroy,
1459  * then the function forces local commit to speedup space release (see
1460  * osp_sync.c for the details).
1461  *
1462  * \param[in] env       LU environment provided by the caller
1463  * \param[in] d         OSP device
1464  *
1465  * \retval              0 on success
1466  * \retval              -ENOSPC when no space on OST
1467  * \retval              -EAGAIN try later, slow precreation in progress
1468  * \retval              -EIO when no access to OST
1469  */
1470 int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d,
1471                           bool can_block)
1472 {
1473         time64_t expire = ktime_get_seconds() + obd_timeout;
1474         int precreated, rc, synced = 0;
1475
1476         ENTRY;
1477
1478         LASSERTF(osp_objs_precreated(env, d) >= 0, "Last created FID "DFID
1479                  "Next FID "DFID"\n", PFID(&d->opd_pre_last_created_fid),
1480                  PFID(&d->opd_pre_used_fid));
1481
1482         /* opd_pre_max_create_count 0 to not use specified OST. */
1483         if (d->opd_pre_max_create_count == 0)
1484                 RETURN(-ENOBUFS);
1485
1486         /*
1487          * wait till:
1488          *  - preallocation is done
1489          *  - no free space expected soon
1490          *  - can't connect to OST for too long (obd_timeout)
1491          *  - OST can allocate fid sequence.
1492          */
1493         while ((rc = d->opd_pre_status) == 0 || rc == -ENOSPC ||
1494                 rc == -ENODEV || rc == -EAGAIN || rc == -ENOTCONN) {
1495
1496                 /*
1497                  * increase number of precreations
1498                  */
1499                 precreated = osp_objs_precreated(env, d);
1500                 if (d->opd_pre_create_count < d->opd_pre_max_create_count &&
1501                     d->opd_pre_create_slow == 0 &&
1502                     precreated <= (d->opd_pre_create_count / 4 + 1)) {
1503                         spin_lock(&d->opd_pre_lock);
1504                         d->opd_pre_create_slow = 1;
1505                         d->opd_pre_create_count *= 2;
1506                         spin_unlock(&d->opd_pre_lock);
1507                 }
1508
1509                 spin_lock(&d->opd_pre_lock);
1510                 precreated = osp_objs_precreated(env, d);
1511                 if (!d->opd_pre_recovering && !d->opd_force_creation) {
1512                         if (precreated > d->opd_pre_reserved) {
1513                                 d->opd_pre_reserved++;
1514                                 spin_unlock(&d->opd_pre_lock);
1515                                 rc = 0;
1516
1517                                 /*
1518                                  * XXX: don't wake up if precreation
1519                                  * is in progress
1520                                  */
1521                                 if (osp_precreate_near_empty_nolock(env, d) &&
1522                                    !osp_precreate_end_seq_nolock(env, d))
1523                                         wake_up(&d->opd_pre_waitq);
1524
1525                                 break;
1526                         } else if (unlikely(precreated &&
1527                                    osp_precreate_end_seq_nolock(env, d))) {
1528                                 /*
1529                                  * precreate pool is reaching the end of the
1530                                  * current seq, and doesn't have enough objects
1531                                  */
1532                                 rc = -ENOSPC;
1533                                 spin_unlock(&d->opd_pre_lock);
1534                                 break;
1535                         }
1536                 }
1537                 spin_unlock(&d->opd_pre_lock);
1538
1539                 /*
1540                  * all precreated objects have been used and no-space
1541                  * status leave us no chance to succeed very soon
1542                  * but if there is destroy in progress, then we should
1543                  * wait till that is done - some space might be released
1544                  */
1545                 if (unlikely(rc == -ENOSPC)) {
1546                         if (atomic_read(&d->opd_sync_changes) && synced == 0) {
1547                                 /* force local commit to release space */
1548                                 dt_commit_async(env, d->opd_storage);
1549                                 osp_sync_check_for_work(d);
1550                                 synced = 1;
1551                         }
1552                         if (atomic_read(&d->opd_sync_rpcs_in_progress)) {
1553                                 /* just wait till destroys are done
1554                                  * see wait_event_idle_timeout() below
1555                                  */
1556                         }
1557                         if (atomic_read(&d->opd_sync_changes) +
1558                             atomic_read(&d->opd_sync_rpcs_in_progress) == 0) {
1559                                 /* no hope for free space */
1560                                 break;
1561                         }
1562                 }
1563
1564                 /* XXX: don't wake up if precreation is in progress */
1565                 wake_up(&d->opd_pre_waitq);
1566
1567                 if (ktime_get_seconds() >= expire) {
1568                         rc = -ETIMEDOUT;
1569                         break;
1570                 }
1571
1572                 if (!can_block) {
1573                         LASSERT(d->opd_pre);
1574                         rc = -ENOBUFS;
1575                         break;
1576                 }
1577
1578                 CDEBUG(D_INFO, "%s: Sleeping on objects\n",
1579                        d->opd_obd->obd_name);
1580                 if (wait_event_idle_timeout(
1581                             d->opd_pre_user_waitq,
1582                             osp_precreate_ready_condition(env, d),
1583                             cfs_time_seconds(obd_timeout)) == 0) {
1584                         CDEBUG(D_HA,
1585                                "%s: slow creates, last="DFID", next="DFID", "
1586                                "reserved=%llu, sync_changes=%u, "
1587                                "sync_rpcs_in_progress=%d, status=%d\n",
1588                                d->opd_obd->obd_name,
1589                                PFID(&d->opd_pre_last_created_fid),
1590                                PFID(&d->opd_pre_used_fid), d->opd_pre_reserved,
1591                                atomic_read(&d->opd_sync_changes),
1592                                atomic_read(&d->opd_sync_rpcs_in_progress),
1593                                d->opd_pre_status);
1594                 } else {
1595                         CDEBUG(D_INFO, "%s: Waked up, status=%d\n",
1596                                d->opd_obd->obd_name, d->opd_pre_status);
1597                 }
1598         }
1599
1600         RETURN(rc);
1601 }
1602
1603 /**
1604  * Get a FID from precreation pool
1605  *
1606  * The function is a companion for osp_precreate_reserve() - it assigns
1607  * a specific FID from the precreate. The function should be called only
1608  * if the call to osp_precreate_reserve() was successful. The function
1609  * updates a local storage to remember the highest object ID referenced
1610  * by the node in the given sequence.
1611  *
1612  * A very importan details: this is supposed to be called once the
1613  * transaction is started, so on-disk update will be atomic with the
1614  * data (like LOVEA) refering this object. Then the object won't be leaked:
1615  * either it's referenced by the committed transaction or it's a subject
1616  * to the orphan cleanup procedure.
1617  *
1618  * \param[in] env       LU environment provided by the caller
1619  * \param[in] d         OSP device
1620  * \param[out] fid      generated FID
1621  *
1622  * \retval 0            on success
1623  * \retval negative     negated errno on error
1624  */
1625 int osp_precreate_get_fid(const struct lu_env *env, struct osp_device *d,
1626                           struct lu_fid *fid)
1627 {
1628         struct lu_fid *pre_used_fid = &d->opd_pre_used_fid;
1629
1630         /* grab next id from the pool */
1631         spin_lock(&d->opd_pre_lock);
1632
1633         LASSERTF(osp_fid_diff(&d->opd_pre_used_fid,
1634                              &d->opd_pre_last_created_fid) < 0,
1635                  "next fid "DFID" > last created fid "DFID"\n",
1636                  PFID(&d->opd_pre_used_fid),
1637                  PFID(&d->opd_pre_last_created_fid));
1638
1639         /* Non-IDIF FIDs shouldn't get here with OID == OBIF_MAX_OID. For IDIF,
1640          * f_oid wraps and "f_seq" (holding high 16 bits of ID) needs increment
1641          */
1642         if (fid_is_idif(pre_used_fid) &&
1643             unlikely(fid_oid(pre_used_fid) == OBIF_MAX_OID)) {
1644                 struct ost_id oi;
1645                 __u32 idx = fid_idif_ost_idx(pre_used_fid);
1646
1647                 fid_to_ostid(pre_used_fid, &oi);
1648                 oi.oi.oi_id++;
1649                 ostid_to_fid(pre_used_fid, &oi, idx);
1650         } else {
1651                 pre_used_fid->f_oid++;
1652         }
1653
1654         memcpy(fid, pre_used_fid, sizeof(*fid));
1655         d->opd_pre_reserved--;
1656         /*
1657          * last_used_id must be changed along with getting new id otherwise
1658          * we might miscalculate gap causing object loss or leak
1659          */
1660         osp_update_last_fid(d, fid);
1661         spin_unlock(&d->opd_pre_lock);
1662
1663         /*
1664          * probably main thread suspended orphan cleanup till
1665          * all reservations are released, see comment in
1666          * osp_precreate_thread() just before orphan cleanup
1667          */
1668         if (unlikely(d->opd_pre_reserved == 0 &&
1669                      (d->opd_pre_recovering || d->opd_pre_status)))
1670                 wake_up(&d->opd_pre_waitq);
1671
1672         return 0;
1673 }
1674
1675 /*
1676  * Set size regular attribute on an object
1677  *
1678  * When a striping is created late, it's possible that size is already
1679  * initialized on the file. Then the new striping should inherit size
1680  * from the file. The function sets size on the object using the regular
1681  * protocol (OST_PUNCH).
1682  * XXX: should be re-implemented using OUT ?
1683  *
1684  * \param[in] env       LU environment provided by the caller
1685  * \param[in] dt        object
1686  * \param[in] size      size to set.
1687  *
1688  * \retval 0            on success
1689  * \retval negative     negated errno on error
1690  */
1691 int osp_object_truncate(const struct lu_env *env, struct dt_object *dt,
1692                         __u64 size)
1693 {
1694         struct osp_device       *d = lu2osp_dev(dt->do_lu.lo_dev);
1695         struct ptlrpc_request   *req = NULL;
1696         struct obd_import       *imp;
1697         struct ost_body         *body;
1698         struct obdo             *oa = NULL;
1699         int                      rc;
1700
1701         ENTRY;
1702
1703         imp = d->opd_obd->u.cli.cl_import;
1704         LASSERT(imp);
1705
1706         req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
1707         if (req == NULL)
1708                 RETURN(-ENOMEM);
1709
1710         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
1711         if (rc) {
1712                 ptlrpc_request_free(req);
1713                 RETURN(rc);
1714         }
1715
1716         /*
1717          * XXX: decide how do we do here with resend
1718          * if we don't resend, then client may see wrong file size
1719          * if we do resend, then MDS thread can get stuck for quite long
1720          * and if we don't resend, then client will also get -EAGAIN !!
1721          * (see LU-7975 and sanity/test_27F use cases)
1722          * but let's decide not to resend/delay this truncate request to OST
1723          * and allow Client to decide to resend, in a less agressive way from
1724          * after_reply(), by returning -EINPROGRESS instead of
1725          * -EAGAIN/-EAGAIN upon return from ptlrpc_queue_wait() at the
1726          * end of this routine
1727          */
1728         req->rq_no_resend = req->rq_no_delay = 1;
1729
1730         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
1731         ptlrpc_at_set_req_timeout(req);
1732
1733         OBD_ALLOC_PTR(oa);
1734         if (oa == NULL)
1735                 GOTO(out, rc = -ENOMEM);
1736
1737         rc = fid_to_ostid(lu_object_fid(&dt->do_lu), &oa->o_oi);
1738         LASSERT(rc == 0);
1739         oa->o_size = size;
1740         oa->o_blocks = OBD_OBJECT_EOF;
1741         oa->o_valid = OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
1742                       OBD_MD_FLID | OBD_MD_FLGROUP;
1743
1744         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
1745         LASSERT(body);
1746         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1747
1748         /* XXX: capa support? */
1749         /* osc_pack_capa(req, body, capa); */
1750
1751         ptlrpc_request_set_replen(req);
1752
1753         rc = ptlrpc_queue_wait(req);
1754         if (rc) {
1755                 /* -EAGAIN/-EWOULDBLOCK means OST is unreachable at the moment
1756                  * since we have decided not to resend/delay, but this could
1757                  * lead to wrong size to be seen at Client side and even process
1758                  * trying to open to exit/fail if not itself handling -EAGAIN.
1759                  * So it should be better to return -EINPROGRESS instead and
1760                  * leave the decision to resend at Client side in after_reply()
1761                  */
1762                 if (rc == -EAGAIN) {
1763                         rc = -EINPROGRESS;
1764                         CDEBUG(D_HA, "returning -EINPROGRESS instead of "
1765                                "-EWOULDBLOCK/-EAGAIN to allow Client to "
1766                                "resend\n");
1767                 } else {
1768                         CERROR("can't punch object: %d\n", rc);
1769                 }
1770         }
1771 out:
1772         ptlrpc_req_finished(req);
1773         if (oa)
1774                 OBD_FREE_PTR(oa);
1775         RETURN(rc);
1776 }
1777
1778 /**
1779  * Initialize precreation functionality of OSP
1780  *
1781  * Prepares all the internal structures and starts the precreate thread
1782  *
1783  * \param[in] d         OSP device
1784  *
1785  * \retval 0            on success
1786  * \retval negative     negated errno on error
1787  */
1788 int osp_init_precreate(struct osp_device *d)
1789 {
1790         ENTRY;
1791
1792         OBD_ALLOC_PTR(d->opd_pre);
1793         if (d->opd_pre == NULL)
1794                 RETURN(-ENOMEM);
1795
1796         /* initially precreation isn't ready */
1797         init_waitqueue_head(&d->opd_pre_user_waitq);
1798         d->opd_pre_status = -EAGAIN;
1799         fid_zero(&d->opd_pre_used_fid);
1800         d->opd_pre_used_fid.f_oid = 1;
1801         fid_zero(&d->opd_pre_last_created_fid);
1802         d->opd_pre_last_created_fid.f_oid = 1;
1803         d->opd_last_id = 0;
1804         d->opd_pre_reserved = 0;
1805         d->opd_pre_seq_width = LUSTRE_DATA_SEQ_MAX_WIDTH;
1806         d->opd_got_disconnected = 1;
1807         d->opd_pre_create_slow = 0;
1808         d->opd_pre_create_count = OST_MIN_PRECREATE;
1809         d->opd_pre_min_create_count = OST_MIN_PRECREATE;
1810         d->opd_pre_max_create_count = OST_MAX_PRECREATE;
1811         d->opd_reserved_mb_high = 0;
1812         d->opd_reserved_mb_low = 0;
1813         d->opd_cleanup_orphans_done = false;
1814         d->opd_force_creation = false;
1815
1816         RETURN(0);
1817 }
1818
1819 /**
1820  * Finish precreate functionality of OSP
1821  *
1822  *
1823  * Asks all the activity (the thread, update timer) to stop, then
1824  * wait till that is done.
1825  *
1826  * \param[in] d         OSP device
1827  */
1828 void osp_precreate_fini(struct osp_device *d)
1829 {
1830         ENTRY;
1831
1832         if (d->opd_pre == NULL)
1833                 RETURN_EXIT;
1834
1835         OBD_FREE_PTR(d->opd_pre);
1836         d->opd_pre = NULL;
1837
1838         EXIT;
1839 }
1840
1841 int osp_init_statfs(struct osp_device *d)
1842 {
1843         struct task_struct      *task;
1844         struct opt_args         *args;
1845         DECLARE_COMPLETION_ONSTACK(started);
1846         int                     rc;
1847
1848         ENTRY;
1849
1850         spin_lock_init(&d->opd_pre_lock);
1851         init_waitqueue_head(&d->opd_pre_waitq);
1852
1853         /*
1854          * Initialize statfs-related things
1855          */
1856         d->opd_statfs_maxage = 5; /* defaultupdate interval */
1857         d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(),
1858                                                 1000 * NSEC_PER_SEC);
1859         CDEBUG(D_OTHER, "current %lldns, fresh till %lldns\n",
1860                ktime_get_ns(),
1861                ktime_to_ns(d->opd_statfs_fresh_till));
1862         cfs_timer_setup(&d->opd_statfs_timer, osp_statfs_timer_cb,
1863                         (unsigned long)d, 0);
1864
1865         if (d->opd_storage->dd_rdonly)
1866                 RETURN(0);
1867
1868         OBD_ALLOC_PTR(args);
1869         if (!args)
1870                 RETURN(0);
1871         args->opta_dev = d;
1872         args->opta_started = &started;
1873         rc = lu_env_init(&args->opta_env,
1874                          d->opd_dt_dev.dd_lu_dev.ld_type->ldt_ctx_tags);
1875         if (rc) {
1876                 CERROR("%s: init env error: rc = %d\n", d->opd_obd->obd_name,
1877                        rc);
1878                 OBD_FREE_PTR(args);
1879                 RETURN(0);
1880         }
1881
1882         /*
1883          * start thread handling precreation and statfs updates
1884          */
1885         task = kthread_create(osp_precreate_thread, args,
1886                               "osp-pre-%u-%u", d->opd_index, d->opd_group);
1887         if (IS_ERR(task)) {
1888                 CERROR("can't start precreate thread %ld\n", PTR_ERR(task));
1889                 lu_env_fini(&args->opta_env);
1890                 OBD_FREE_PTR(args);
1891                 RETURN(PTR_ERR(task));
1892         }
1893         d->opd_pre_task = task;
1894         wake_up_process(task);
1895         wait_for_completion(&started);
1896
1897         RETURN(0);
1898 }
1899
1900 void osp_statfs_fini(struct osp_device *d)
1901 {
1902         struct task_struct *task = d->opd_pre_task;
1903         ENTRY;
1904
1905         timer_delete(&d->opd_statfs_timer);
1906
1907         d->opd_pre_task = NULL;
1908         if (task)
1909                 kthread_stop(task);
1910
1911         EXIT;
1912 }