Whamcloud - gitweb
LU-8367 osp: enable replay for precreation request
[fs/lustre-release.git] / lustre / osp / osp_precreate.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/osp/osp_precreate.c
32  *
33  * Lustre OST Proxy Device
34  *
35  * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
36  * Author: Mikhail Pershin <mike.pershin@intel.com>
37  * Author: Di Wang <di.wang@intel.com>
38  */
39
40 #define DEBUG_SUBSYSTEM S_MDS
41
42 #include <linux/kthread.h>
43
44 #include <lustre_obdo.h>
45
46 #include "osp_internal.h"
47
48 /*
49  * there are two specific states to take care about:
50  *
51  * = import is disconnected =
52  *
53  * = import is inactive =
54  *   in this case osp_declare_create() returns an error
55  *
56  */
57
58 /**
59  * Check whether statfs data is expired
60  *
61  * OSP device caches statfs data for the target, the function checks
62  * whether the data is expired or not.
63  *
64  * \param[in] d         OSP device
65  *
66  * \retval              0 - not expired, 1 - expired
67  */
68 static inline int osp_statfs_need_update(struct osp_device *d)
69 {
70         return !ktime_before(ktime_get(), d->opd_statfs_fresh_till);
71 }
72
73 /*
74  * OSP tries to maintain pool of available objects so that calls to create
75  * objects don't block most of time
76  *
77  * each time OSP gets connected to OST, we should start from precreation cleanup
78  */
79 static void osp_statfs_timer_cb(cfs_timer_cb_arg_t data)
80 {
81         struct osp_device *d = cfs_from_timer(d, data, opd_statfs_timer);
82
83         LASSERT(d);
84         /* invalidate statfs data so osp_precreate_thread() can refresh */
85         d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(), NSEC_PER_SEC);
86         if (d->opd_pre_task)
87                 wake_up(&d->opd_pre_waitq);
88 }
89
90 static void osp_pre_update_msfs(struct osp_device *d, struct obd_statfs *msfs);
91
92 /*
93  * The function updates current precreation status if broken, and
94  * updates that cached statfs state if functional, then wakes up waiters.
95  * We don't clear opd_pre_status directly here, but rather leave this
96  * to osp_pre_update_msfs() to do if everything is OK so that we don't
97  * have a race to clear opd_pre_status and then set it to -ENOSPC again.
98  *
99  * \param[in] d         OSP device
100  * \param[in] msfs      statfs data
101  * \param[in] rc        new precreate status for device \a d
102  */
103 static void osp_pre_update_status_msfs(struct osp_device *d,
104                                        struct obd_statfs *msfs, int rc)
105 {
106         CDEBUG(D_INFO, "%s: Updating status = %d\n", d->opd_obd->obd_name, rc);
107         if (rc)
108                 d->opd_pre_status = rc;
109         else
110                 osp_pre_update_msfs(d, msfs);
111
112         wake_up_all(&d->opd_pre_user_waitq);
113 }
114
115 /* Pass in the old statfs data in case the limits have changed */
116 void osp_pre_update_status(struct osp_device *d, int rc)
117 {
118         osp_pre_update_status_msfs(d, &d->opd_statfs, rc);
119 }
120
121
122 /**
123  * RPC interpret callback for OST_STATFS RPC
124  *
125  * An interpretation callback called by ptlrpc for OST_STATFS RPC when it is
126  * replied by the target. It's used to maintain statfs cache for the target.
127  * The function fills data from the reply if successful and schedules another
128  * update.
129  *
130  * \param[in] env       LU environment provided by the caller
131  * \param[in] req       RPC replied
132  * \param[in] aa        callback data
133  * \param[in] rc        RPC result
134  *
135  * \retval 0            on success
136  * \retval negative     negated errno on error
137  */
138 static int osp_statfs_interpret(const struct lu_env *env,
139                                 struct ptlrpc_request *req, void *args, int rc)
140 {
141         union ptlrpc_async_args *aa = args;
142         struct obd_import *imp = req->rq_import;
143         struct obd_statfs *msfs;
144         struct obd_statfs *sfs;
145         struct osp_device *d;
146         u64 maxage_ns;
147
148         ENTRY;
149
150         aa = ptlrpc_req_async_args(aa, req);
151         d = aa->pointer_arg[0];
152         LASSERT(d);
153
154         if (rc != 0)
155                 GOTO(out, rc);
156
157         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
158         if (msfs == NULL)
159                 GOTO(out, rc = -EPROTO);
160
161         if (d->opd_pre)
162                 osp_pre_update_status_msfs(d, msfs, 0);
163         else
164                 d->opd_statfs = *msfs;
165
166         /* schedule next update */
167         maxage_ns = d->opd_statfs_maxage * NSEC_PER_SEC;
168         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), maxage_ns);
169         mod_timer(&d->opd_statfs_timer,
170                   jiffies + cfs_time_seconds(d->opd_statfs_maxage));
171         d->opd_statfs_update_in_progress = 0;
172
173         sfs = &d->opd_statfs;
174         CDEBUG(D_CACHE, "%s (%p): %llu blocks, %llu free, %llu avail, "
175                "%u bsize, %u reserved mb low, %u reserved mb high,"
176                "%llu files, %llu free files\n", d->opd_obd->obd_name, d,
177                sfs->os_blocks, sfs->os_bfree, sfs->os_bavail, sfs->os_bsize,
178                d->opd_reserved_mb_low, d->opd_reserved_mb_high,
179                sfs->os_files, sfs->os_ffree);
180
181         RETURN(0);
182 out:
183         /* couldn't update statfs, try again with a small delay */
184         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), 10 * NSEC_PER_SEC);
185         d->opd_statfs_update_in_progress = 0;
186         if (d->opd_pre && d->opd_pre_task)
187                 wake_up(&d->opd_pre_waitq);
188
189         if (req->rq_import_generation == imp->imp_generation)
190                 CDEBUG(D_CACHE, "%s: couldn't update statfs: rc = %d\n",
191                        d->opd_obd->obd_name, rc);
192         RETURN(rc);
193 }
194
195 /**
196  * Send OST_STATFS RPC
197  *
198  * Sends OST_STATFS RPC to refresh cached statfs data for the target.
199  * Also disables scheduled updates as times OSP may need to refresh
200  * statfs data before expiration. The function doesn't block, instead
201  * an interpretation callback osp_statfs_interpret() is used.
202  *
203  * \param[in] d         OSP device
204  */
205 static int osp_statfs_update(const struct lu_env *env, struct osp_device *d)
206 {
207         u64 expire = obd_timeout * 1000 * NSEC_PER_SEC;
208         struct ptlrpc_request   *req;
209         struct obd_import       *imp;
210         union ptlrpc_async_args *aa;
211         int rc;
212
213         ENTRY;
214
215         CDEBUG(D_CACHE, "going to update statfs\n");
216
217         imp = d->opd_obd->u.cli.cl_import;
218         LASSERT(imp);
219
220         req = ptlrpc_request_alloc(imp,
221                            d->opd_pre ? &RQF_OST_STATFS : &RQF_MDS_STATFS);
222         if (req == NULL)
223                 RETURN(-ENOMEM);
224
225         rc = ptlrpc_request_pack(req,
226                          d->opd_pre ? LUSTRE_OST_VERSION : LUSTRE_MDS_VERSION,
227                          d->opd_pre ? OST_STATFS : MDS_STATFS);
228         if (rc) {
229                 ptlrpc_request_free(req);
230                 RETURN(rc);
231         }
232         ptlrpc_request_set_replen(req);
233         if (d->opd_pre)
234                 req->rq_request_portal = OST_CREATE_PORTAL;
235         ptlrpc_at_set_req_timeout(req);
236
237         req->rq_interpret_reply = osp_statfs_interpret;
238         aa = ptlrpc_req_async_args(aa, req);
239         aa->pointer_arg[0] = d;
240
241         /*
242          * no updates till reply
243          */
244         del_timer(&d->opd_statfs_timer);
245         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), expire);
246         d->opd_statfs_update_in_progress = 1;
247
248         ptlrpcd_add_req(req);
249
250         /* we still want to sync changes if no new changes are coming */
251         if (ktime_before(ktime_get(), d->opd_sync_next_commit_cb))
252                 GOTO(out, rc);
253
254         if (atomic_read(&d->opd_sync_changes)) {
255                 struct thandle *th;
256
257                 th = dt_trans_create(env, d->opd_storage);
258                 if (IS_ERR(th)) {
259                         CERROR("%s: can't sync\n", d->opd_obd->obd_name);
260                         GOTO(out, rc);
261                 }
262                 rc = dt_trans_start_local(env, d->opd_storage, th);
263                 if (rc == 0) {
264                         CDEBUG(D_OTHER, "%s: sync forced, %d changes\n",
265                                d->opd_obd->obd_name,
266                                atomic_read(&d->opd_sync_changes));
267                         osp_sync_add_commit_cb_1s(env, d, th);
268                 }
269                 dt_trans_stop(env, d->opd_storage, th);
270         }
271
272 out:
273         RETURN(0);
274 }
275
276 /**
277  * Schedule an immediate update for statfs data
278  *
279  * If cached statfs data claim no free space, but OSP has got a request to
280  * destroy an object (so release some space probably), then we may need to
281  * refresh cached statfs data sooner than planned. The function checks there
282  * is no statfs update going and schedules immediate update if so.
283  * XXX: there might be a case where removed object(s) do not add free space (empty
284  * object). If the number of such deletions is high, then we can start to update
285  * statfs too often causing a RPC storm. some throttling is needed...
286  *
287  * \param[in] d         OSP device where statfs data needs to be refreshed
288  */
289 void osp_statfs_need_now(struct osp_device *d)
290 {
291         if (!d->opd_statfs_update_in_progress) {
292                 /*
293                  * if current status is -ENOSPC (lack of free space on OST)
294                  * then we should poll OST immediately once object destroy
295                  * is replied
296                  */
297                 d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(), NSEC_PER_SEC);
298                 del_timer(&d->opd_statfs_timer);
299                 wake_up(&d->opd_pre_waitq);
300         }
301 }
302
303 /**
304  * Return number of precreated objects
305  *
306  * A simple helper to calculate the number of precreated objects on the device.
307  *
308  * \param[in] env       LU environment provided by the caller
309  * \param[in] osp       OSP device
310  *
311  * \retval              the number of the precreated objects
312  */
313 static inline int osp_objs_precreated(const struct lu_env *env,
314                                       struct osp_device *osp)
315 {
316         return osp_fid_diff(&osp->opd_pre_last_created_fid,
317                             &osp->opd_pre_used_fid);
318 }
319
320 /**
321  * Check pool of precreated objects is nearly empty
322  *
323  * We should not wait till the pool of the precreated objects is exhausted,
324  * because then there will be a long period of OSP being unavailable for the
325  * new creations due to lenghty precreate RPC. Instead we ask for another
326  * precreation ahead and hopefully have it ready before the current pool is
327  * empty. Notice this function relies on an external locking.
328  *
329  * \param[in] env       LU environment provided by the caller
330  * \param[in] d         OSP device
331  *
332  * \retval              0 - current pool is good enough, 1 - time to precreate
333  */
334 static inline int osp_precreate_near_empty_nolock(const struct lu_env *env,
335                                                   struct osp_device *d)
336 {
337         int window = osp_objs_precreated(env, d);
338
339         /* don't consider new precreation till OST is healty and
340          * has free space */
341         return ((window - d->opd_pre_reserved < d->opd_pre_create_count / 2 ||
342                  d->opd_force_creation) && (d->opd_pre_status == 0));
343 }
344
345 /**
346  * Check pool of precreated objects
347  *
348  * This is protected version of osp_precreate_near_empty_nolock(), check that
349  * for the details.
350  *
351  * \param[in] env       LU environment provided by the caller
352  * \param[in] d         OSP device
353  *
354  * \retval              0 - current pool is good enough, 1 - time to precreate
355  */
356 static inline int osp_precreate_near_empty(const struct lu_env *env,
357                                            struct osp_device *d)
358 {
359         int rc;
360
361         if (d->opd_pre == NULL)
362                 return 0;
363
364         /* XXX: do we really need locking here? */
365         spin_lock(&d->opd_pre_lock);
366         rc = osp_precreate_near_empty_nolock(env, d);
367         spin_unlock(&d->opd_pre_lock);
368         return rc;
369 }
370
371 /**
372  * Check given sequence is empty
373  *
374  * Returns a binary result whether the given sequence has some IDs left
375  * or not. Find the details in osp_fid_end_seq(). This is a lock protected
376  * version of that function.
377  *
378  * \param[in] env       LU environment provided by the caller
379  * \param[in] osp       OSP device
380  *
381  * \retval              0 - current sequence has no IDs, 1 - otherwise
382  */
383 static inline int osp_create_end_seq(const struct lu_env *env,
384                                      struct osp_device *osp)
385 {
386         struct lu_fid *fid = &osp->opd_pre_used_fid;
387         int rc;
388
389         spin_lock(&osp->opd_pre_lock);
390         rc = osp_fid_end_seq(env, fid);
391         spin_unlock(&osp->opd_pre_lock);
392         return rc;
393 }
394
395 /**
396  * Write FID into into last_oid/last_seq file
397  *
398  * The function stores the sequence and the in-sequence id into two dedicated
399  * files. The sync argument can be used to request synchronous commit, so the
400  * function won't return until the updates are committed.
401  *
402  * \param[in] env       LU environment provided by the caller
403  * \param[in] osp       OSP device
404  * \param[in] fid       fid where sequence/id is taken
405  * \param[in] sync      update mode: 0 - asynchronously, 1 - synchronously
406  *
407  * \retval 0            on success
408  * \retval negative     negated errno on error
409  **/
410 int osp_write_last_oid_seq_files(struct lu_env *env, struct osp_device *osp,
411                                  struct lu_fid *fid, int sync)
412 {
413         struct osp_thread_info  *oti = osp_env_info(env);
414         struct lu_buf      *lb_oid = &oti->osi_lb;
415         struct lu_buf      *lb_oseq = &oti->osi_lb2;
416         loff_t             oid_off;
417         u64                oid;
418         loff_t             oseq_off;
419         struct thandle    *th;
420         int                   rc;
421         ENTRY;
422
423         if (osp->opd_storage->dd_rdonly)
424                 RETURN(0);
425
426         /* Note: through f_oid is only 32 bits, it will also write 64 bits
427          * for oid to keep compatibility with the previous version. */
428         oid = fid->f_oid;
429         osp_objid_buf_prep(lb_oid, &oid_off,
430                            &oid, osp->opd_index);
431
432         osp_objseq_buf_prep(lb_oseq, &oseq_off,
433                             &fid->f_seq, osp->opd_index);
434
435         th = dt_trans_create(env, osp->opd_storage);
436         if (IS_ERR(th))
437                 RETURN(PTR_ERR(th));
438
439         th->th_sync |= sync;
440         rc = dt_declare_record_write(env, osp->opd_last_used_oid_file,
441                                      lb_oid, oid_off, th);
442         if (rc != 0)
443                 GOTO(out, rc);
444
445         rc = dt_declare_record_write(env, osp->opd_last_used_seq_file,
446                                      lb_oseq, oseq_off, th);
447         if (rc != 0)
448                 GOTO(out, rc);
449
450         rc = dt_trans_start_local(env, osp->opd_storage, th);
451         if (rc != 0)
452                 GOTO(out, rc);
453
454         rc = dt_record_write(env, osp->opd_last_used_oid_file, lb_oid,
455                              &oid_off, th);
456         if (rc != 0) {
457                 CERROR("%s: can not write to last seq file: rc = %d\n",
458                         osp->opd_obd->obd_name, rc);
459                 GOTO(out, rc);
460         }
461         rc = dt_record_write(env, osp->opd_last_used_seq_file, lb_oseq,
462                              &oseq_off, th);
463         if (rc) {
464                 CERROR("%s: can not write to last seq file: rc = %d\n",
465                         osp->opd_obd->obd_name, rc);
466                 GOTO(out, rc);
467         }
468 out:
469         dt_trans_stop(env, osp->opd_storage, th);
470         RETURN(rc);
471 }
472
473 /**
474  * Switch to another sequence
475  *
476  * When a current sequence has no available IDs left, OSP has to switch to
477  * another new sequence. OSP requests it using the regular FLDB protocol
478  * and stores synchronously before that is used in precreated. This is needed
479  * to basically have the sequences referenced (not orphaned), otherwise it's
480  * possible that OST has some objects precreated and the clients have data
481  * written to it, but after MDT failover nobody refers those objects and OSP
482  * has no idea that the sequence need cleanup to be done.
483  * While this is very expensive operation, it's supposed to happen very very
484  * infrequently because sequence has 2^32 or 2^48 objects (depending on type)
485  *
486  * \param[in] env       LU environment provided by the caller
487  * \param[in] osp       OSP device
488  *
489  * \retval 0            on success
490  * \retval negative     negated errno on error
491  */
492 static int osp_precreate_rollover_new_seq(struct lu_env *env,
493                                           struct osp_device *osp)
494 {
495         struct lu_fid   *fid = &osp_env_info(env)->osi_fid;
496         struct lu_fid   *last_fid = &osp->opd_last_used_fid;
497         int             rc;
498         ENTRY;
499
500         rc = seq_client_get_seq(env, osp->opd_obd->u.cli.cl_seq, &fid->f_seq);
501         if (rc != 0) {
502                 CERROR("%s: alloc fid error: rc = %d\n",
503                        osp->opd_obd->obd_name, rc);
504                 RETURN(rc);
505         }
506
507         fid->f_oid = 1;
508         fid->f_ver = 0;
509         LASSERTF(fid_seq(fid) != fid_seq(last_fid),
510                  "fid "DFID", last_fid "DFID"\n", PFID(fid),
511                  PFID(last_fid));
512
513         rc = osp_write_last_oid_seq_files(env, osp, fid, 1);
514         if (rc != 0) {
515                 CERROR("%s: Can not update oid/seq file: rc = %d\n",
516                        osp->opd_obd->obd_name, rc);
517                 RETURN(rc);
518         }
519
520         LCONSOLE_INFO("%s: update sequence from %#llx to %#llx\n",
521                       osp->opd_obd->obd_name, fid_seq(last_fid),
522                       fid_seq(fid));
523         /* Update last_xxx to the new seq */
524         spin_lock(&osp->opd_pre_lock);
525         osp->opd_last_used_fid = *fid;
526         osp_fid_to_obdid(fid, &osp->opd_last_id);
527         osp->opd_gap_start_fid = *fid;
528         osp->opd_pre_used_fid = *fid;
529         osp->opd_pre_last_created_fid = *fid;
530         spin_unlock(&osp->opd_pre_lock);
531
532         RETURN(rc);
533 }
534
535 /**
536  * Find IDs available in current sequence
537  *
538  * The function calculates the highest possible ID and the number of IDs
539  * available in the current sequence OSP is using. The number is limited
540  * artifically by the caller (grow param) and the number of IDs available
541  * in the sequence by nature. The function doesn't require an external
542  * locking.
543  *
544  * \param[in] env       LU environment provided by the caller
545  * \param[in] osp       OSP device
546  * \param[in] fid       FID the caller wants to start with
547  * \param[in] grow      how many the caller wants
548  * \param[out] fid      the highest calculated FID
549  * \param[out] grow     the number of available IDs calculated
550  *
551  * \retval              0 on success, 1 - the sequence is empty
552  */
553 static int osp_precreate_fids(const struct lu_env *env, struct osp_device *osp,
554                               struct lu_fid *fid, int *grow)
555 {
556         struct osp_thread_info  *osi = osp_env_info(env);
557         __u64                   end;
558         int                     i = 0;
559
560         if (fid_is_idif(fid)) {
561                 struct lu_fid   *last_fid;
562                 struct ost_id   *oi = &osi->osi_oi;
563                 int rc;
564
565                 spin_lock(&osp->opd_pre_lock);
566                 last_fid = &osp->opd_pre_last_created_fid;
567                 fid_to_ostid(last_fid, oi);
568                 end = min(ostid_id(oi) + *grow, IDIF_MAX_OID);
569                 *grow = end - ostid_id(oi);
570                 rc = ostid_set_id(oi, ostid_id(oi) + *grow);
571                 spin_unlock(&osp->opd_pre_lock);
572
573                 if (*grow == 0 || rc)
574                         return 1;
575
576                 ostid_to_fid(fid, oi, osp->opd_index);
577                 return 0;
578         }
579
580         spin_lock(&osp->opd_pre_lock);
581         *fid = osp->opd_pre_last_created_fid;
582         end = fid->f_oid;
583         end = min((end + *grow), (__u64)LUSTRE_DATA_SEQ_MAX_WIDTH);
584         *grow = end - fid->f_oid;
585         fid->f_oid += end - fid->f_oid;
586         spin_unlock(&osp->opd_pre_lock);
587
588         CDEBUG(D_INFO, "Expect %d, actual %d ["DFID" -- "DFID"]\n",
589                *grow, i, PFID(fid), PFID(&osp->opd_pre_last_created_fid));
590
591         return *grow > 0 ? 0 : 1;
592 }
593
594 /**
595  * Prepare and send precreate RPC
596  *
597  * The function finds how many objects should be precreated.  Then allocates,
598  * prepares and schedules precreate RPC synchronously. Upon reply the function
599  * wakes up the threads waiting for the new objects on this target. If the
600  * target wasn't able to create all the objects requested, then the next
601  * precreate will be asking for fewer objects (i.e. slow precreate down).
602  *
603  * \param[in] env       LU environment provided by the caller
604  * \param[in] d         OSP device
605  *
606  * \retval 0            on success
607  * \retval negative     negated errno on error
608  **/
609 static int osp_precreate_send(const struct lu_env *env, struct osp_device *d)
610 {
611         struct osp_thread_info  *oti = osp_env_info(env);
612         struct ptlrpc_request   *req;
613         struct obd_import       *imp;
614         struct ost_body         *body;
615         int                      rc, grow, diff;
616         struct lu_fid           *fid = &oti->osi_fid;
617         ENTRY;
618
619         /* don't precreate new objects till OST healthy and has free space */
620         if (unlikely(d->opd_pre_status)) {
621                 CDEBUG(D_INFO, "%s: don't send new precreate: rc = %d\n",
622                        d->opd_obd->obd_name, d->opd_pre_status);
623                 RETURN(0);
624         }
625
626         /*
627          * if not connection/initialization is compeleted, ignore
628          */
629         imp = d->opd_obd->u.cli.cl_import;
630         LASSERT(imp);
631
632         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
633         if (req == NULL)
634                 RETURN(-ENOMEM);
635         req->rq_request_portal = OST_CREATE_PORTAL;
636
637         /* Delorphan happens only with a first MDT-OST connect. resend/replay
638          * handles objects creation on reconnects, no need to do delorhpan
639          * in this case.
640          */
641
642         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
643         if (rc) {
644                 ptlrpc_request_free(req);
645                 RETURN(rc);
646         }
647
648         spin_lock(&d->opd_pre_lock);
649         if (d->opd_force_creation)
650                 d->opd_pre_create_count = OST_MIN_PRECREATE;
651         else if (d->opd_pre_create_count > d->opd_pre_max_create_count / 2)
652                 d->opd_pre_create_count = d->opd_pre_max_create_count / 2;
653         grow = d->opd_pre_create_count;
654         spin_unlock(&d->opd_pre_lock);
655
656         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
657         LASSERT(body);
658
659         *fid = d->opd_pre_last_created_fid;
660         rc = osp_precreate_fids(env, d, fid, &grow);
661         if (rc == 1)
662                 /* Current seq has been used up*/
663                 GOTO(out_req, rc = -ENOSPC);
664
665         if (!osp_is_fid_client(d)) {
666                 /* Non-FID client will always send seq 0 because of
667                  * compatiblity */
668                 LASSERTF(fid_is_idif(fid), "Invalid fid "DFID"\n", PFID(fid));
669                 fid->f_seq = 0;
670         }
671
672         fid_to_ostid(fid, &body->oa.o_oi);
673         body->oa.o_valid = OBD_MD_FLGROUP;
674
675         ptlrpc_request_set_replen(req);
676
677         if (OBD_FAIL_CHECK(OBD_FAIL_OSP_FAKE_PRECREATE))
678                 GOTO(ready, rc = 0);
679
680         rc = ptlrpc_queue_wait(req);
681         if (rc) {
682                 CERROR("%s: can't precreate: rc = %d\n", d->opd_obd->obd_name,
683                        rc);
684                 if (req->rq_net_err)
685                         /* have osp_precreate_reserve() to wait for repeat */
686                         rc = -ENOTCONN;
687                 GOTO(out_req, rc);
688         }
689
690         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
691         if (body == NULL)
692                 GOTO(out_req, rc = -EPROTO);
693
694         ostid_to_fid(fid, &body->oa.o_oi, d->opd_index);
695
696 ready:
697         if (osp_fid_diff(fid, &d->opd_pre_used_fid) <= 0) {
698                 CERROR("%s: precreate fid "DFID" <= local used fid "DFID
699                        ": rc = %d\n", d->opd_obd->obd_name,
700                        PFID(fid), PFID(&d->opd_pre_used_fid), -ESTALE);
701                 GOTO(out_req, rc = -ESTALE);
702         }
703
704         diff = osp_fid_diff(fid, &d->opd_pre_last_created_fid);
705
706         spin_lock(&d->opd_pre_lock);
707         if (diff < grow) {
708                 /* the OST has not managed to create all the
709                  * objects we asked for */
710                 d->opd_pre_create_count = max(diff, OST_MIN_PRECREATE);
711                 d->opd_pre_create_slow = 1;
712         } else {
713                 /* the OST is able to keep up with the work,
714                  * we could consider increasing create_count
715                  * next time if needed */
716                 d->opd_pre_create_slow = 0;
717         }
718
719         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
720         fid_to_ostid(fid, &body->oa.o_oi);
721
722         d->opd_pre_last_created_fid = *fid;
723         d->opd_force_creation = false;
724         spin_unlock(&d->opd_pre_lock);
725
726         CDEBUG(D_HA, "%s: current precreated pool: "DFID"-"DFID"\n",
727                d->opd_obd->obd_name, PFID(&d->opd_pre_used_fid),
728                PFID(&d->opd_pre_last_created_fid));
729 out_req:
730         /* now we can wakeup all users awaiting for objects */
731         osp_pre_update_status(d, rc);
732
733         /* pause to let osp_precreate_reserve to go first */
734         CFS_FAIL_TIMEOUT(OBD_FAIL_OSP_PRECREATE_PAUSE, 2);
735
736         ptlrpc_req_finished(req);
737         RETURN(rc);
738 }
739
740 /**
741  * Get last precreated object from target (OST)
742  *
743  * Sends synchronous RPC to the target (OST) to learn the last precreated
744  * object. This later is used to remove all unused objects (cleanup orphan
745  * procedure). Also, the next object after one we got will be used as a
746  * starting point for the new precreates.
747  *
748  * \param[in] env       LU environment provided by the caller
749  * \param[in] d         OSP device
750  * \param[in] update    update or not update last used fid
751  *
752  * \retval 0            on success
753  * \retval negative     negated errno on error
754  **/
755 static int osp_get_lastfid_from_ost(const struct lu_env *env,
756                                     struct osp_device *d, bool update)
757 {
758         struct ptlrpc_request   *req = NULL;
759         struct obd_import       *imp;
760         struct lu_fid           *last_fid;
761         char                    *tmp;
762         int                     rc;
763         ENTRY;
764
765         imp = d->opd_obd->u.cli.cl_import;
766         LASSERT(imp);
767
768         req = ptlrpc_request_alloc(imp, &RQF_OST_GET_INFO_LAST_FID);
769         if (req == NULL)
770                 RETURN(-ENOMEM);
771
772         req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY, RCL_CLIENT,
773                              sizeof(KEY_LAST_FID));
774
775         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
776         if (rc) {
777                 ptlrpc_request_free(req);
778                 RETURN(rc);
779         }
780
781         tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_KEY);
782         memcpy(tmp, KEY_LAST_FID, sizeof(KEY_LAST_FID));
783
784         req->rq_no_delay = req->rq_no_resend = 1;
785         last_fid = req_capsule_client_get(&req->rq_pill, &RMF_FID);
786         fid_cpu_to_le(last_fid, &d->opd_last_used_fid);
787
788         ptlrpc_request_set_replen(req);
789
790         rc = ptlrpc_queue_wait(req);
791         if (rc) {
792                 /* -EFAULT means reading LAST_FID failed (see ofd_get_info_hld),
793                  * let sysadm sort this * out.
794                  */
795                 if (rc == -EFAULT)
796                         ptlrpc_set_import_active(imp, 0);
797                 GOTO(out, rc);
798         }
799
800         last_fid = req_capsule_server_get(&req->rq_pill, &RMF_FID);
801         if (last_fid == NULL) {
802                 CERROR("%s: Got last_fid failed.\n", d->opd_obd->obd_name);
803                 GOTO(out, rc = -EPROTO);
804         }
805
806         if (!fid_is_sane(last_fid)) {
807                 CERROR("%s: Got insane last_fid "DFID"\n",
808                        d->opd_obd->obd_name, PFID(last_fid));
809                 GOTO(out, rc = -EPROTO);
810         }
811
812         /* Only update the last used fid, if the OST has objects for
813          * this sequence, i.e. fid_oid > 0 */
814         if (fid_oid(last_fid) > 0 && update)
815                 d->opd_last_used_fid = *last_fid;
816
817         if (fid_oid(last_fid) == 0 &&
818             fid_seq(last_fid) == fid_seq(&d->opd_last_used_fid)) {
819                 /* reformatted OST, it requires creation request
820                  * to recreate objects
821                  */
822                 d->opd_force_creation = true;
823         }
824         CDEBUG(D_HA, "%s: Got last_fid "DFID"\n", d->opd_obd->obd_name,
825                PFID(last_fid));
826
827 out:
828         ptlrpc_req_finished(req);
829         RETURN(rc);
830 }
831
832 /**
833  * Cleanup orphans on OST
834  *
835  * This function is called in a contex of a dedicated thread handling
836  * all the precreation suff. The function waits till local recovery
837  * is complete, then identify all the unreferenced objects (orphans)
838  * using the highest ID referenced by a local and the highest object
839  * precreated by the target. The found range is a subject to removal
840  * using specially flagged RPC. During this process OSP is marked
841  * unavailable for new objects.
842  *
843  * \param[in] env       LU environment provided by the caller
844  * \param[in] d         OSP device
845  *
846  * \retval 0            on success
847  * \retval negative     negated errno on error
848  */
849 static int osp_precreate_cleanup_orphans(struct lu_env *env,
850                                          struct osp_device *d)
851 {
852         struct osp_thread_info  *osi = osp_env_info(env);
853         struct lu_fid           *last_fid = &osi->osi_fid;
854         struct ptlrpc_request   *req = NULL;
855         struct obd_import       *imp;
856         struct ost_body         *body;
857         int                      update_status = 0;
858         int                      rc;
859         int                      diff;
860
861         ENTRY;
862
863         /*
864          * Do cleanup orphans only with a first connection, after that
865          * all precreate requests uses resend/replay flags to support OST
866          * failover/reconnect.
867          */
868         if (d->opd_cleanup_orphans_done) {
869                 rc = osp_get_lastfid_from_ost(env, d, false);
870                 RETURN(0);
871         }
872         /*
873          * wait for local recovery to finish, so we can cleanup orphans
874          * orphans are all objects since "last used" (assigned), but
875          * there might be objects reserved and in some cases they won't
876          * be used. we can't cleanup them till we're sure they won't be
877          * used. also can't we allow new reservations because they may
878          * end up getting orphans being cleaned up below. so we block
879          * new reservations and wait till all reserved objects either
880          * user or released.
881          */
882         spin_lock(&d->opd_pre_lock);
883         d->opd_pre_recovering = 1;
884         spin_unlock(&d->opd_pre_lock);
885         /*
886          * The locking above makes sure the opd_pre_reserved check below will
887          * catch all osp_precreate_reserve() calls who find
888          * "!opd_pre_recovering".
889          */
890         wait_event_idle(d->opd_pre_waitq,
891                         (!d->opd_pre_reserved && d->opd_recovery_completed) ||
892                         !d->opd_pre_task || d->opd_got_disconnected);
893         if (!d->opd_pre_task || d->opd_got_disconnected)
894                 GOTO(out, rc = -EAGAIN);
895
896         CDEBUG(D_HA, "%s: going to cleanup orphans since "DFID"\n",
897                d->opd_obd->obd_name, PFID(&d->opd_last_used_fid));
898
899         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_DELAY_DELORPHAN, cfs_fail_val);
900
901         *last_fid = d->opd_last_used_fid;
902         /* The OSP should already get the valid seq now */
903         LASSERT(!fid_is_zero(last_fid));
904         if (fid_oid(&d->opd_last_used_fid) < 2 ||
905             OBD_FAIL_CHECK(OBD_FAIL_OSP_GET_LAST_FID)) {
906                 /* lastfid looks strange... ask OST */
907                 rc = osp_get_lastfid_from_ost(env, d, true);
908                 if (rc)
909                         GOTO(out, rc);
910         }
911
912         imp = d->opd_obd->u.cli.cl_import;
913         LASSERT(imp);
914
915         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
916         if (req == NULL)
917                 GOTO(out, rc = -ENOMEM);
918
919         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
920         if (rc) {
921                 ptlrpc_request_free(req);
922                 req = NULL;
923                 GOTO(out, rc);
924         }
925
926         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
927         if (body == NULL)
928                 GOTO(out, rc = -EPROTO);
929
930         body->oa.o_flags = OBD_FL_DELORPHAN;
931         body->oa.o_valid = OBD_MD_FLFLAGS | OBD_MD_FLGROUP;
932
933         fid_to_ostid(&d->opd_last_used_fid, &body->oa.o_oi);
934
935         ptlrpc_request_set_replen(req);
936
937         /* Don't resend the delorphan req */
938         req->rq_no_resend = req->rq_no_delay = 1;
939
940         rc = ptlrpc_queue_wait(req);
941         if (rc) {
942                 update_status = 1;
943                 GOTO(out, rc);
944         }
945
946         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
947         if (body == NULL)
948                 GOTO(out, rc = -EPROTO);
949
950         /*
951          * OST provides us with id new pool starts from in body->oa.o_id
952          */
953         ostid_to_fid(last_fid, &body->oa.o_oi, d->opd_index);
954
955         spin_lock(&d->opd_pre_lock);
956         diff = osp_fid_diff(&d->opd_last_used_fid, last_fid);
957         if (diff > 0) {
958                 d->opd_pre_create_count = OST_MIN_PRECREATE + diff;
959                 d->opd_pre_last_created_fid = d->opd_last_used_fid;
960         } else {
961                 d->opd_pre_create_count = OST_MIN_PRECREATE;
962                 d->opd_pre_last_created_fid = *last_fid;
963         }
964         /*
965          * This empties the pre-creation pool and effectively blocks any new
966          * reservations.
967          */
968         LASSERT(fid_oid(&d->opd_pre_last_created_fid) <=
969                 LUSTRE_DATA_SEQ_MAX_WIDTH);
970         d->opd_pre_used_fid = d->opd_pre_last_created_fid;
971         d->opd_pre_create_slow = 0;
972         spin_unlock(&d->opd_pre_lock);
973
974         CDEBUG(D_HA, "%s: Got last_id "DFID" from OST, last_created "DFID
975                "last_used is "DFID"\n", d->opd_obd->obd_name, PFID(last_fid),
976                PFID(&d->opd_pre_last_created_fid), PFID(&d->opd_last_used_fid));
977 out:
978         if (req)
979                 ptlrpc_req_finished(req);
980
981
982         /*
983          * If rc is zero, the pre-creation window should have been emptied.
984          * Since waking up the herd would be useless without pre-created
985          * objects, we defer the signal to osp_precreate_send() in that case.
986          */
987         if (rc != 0) {
988                 if (update_status) {
989                         CERROR("%s: cannot cleanup orphans: rc = %d\n",
990                                d->opd_obd->obd_name, rc);
991                         /* we can't proceed from here, OST seem to
992                          * be in a bad shape, better to wait for
993                          * a new instance of the server and repeat
994                          * from the beginning. notify possible waiters
995                          * this OSP isn't quite functional yet */
996                         osp_pre_update_status(d, rc);
997                 } else {
998                         wake_up_all(&d->opd_pre_user_waitq);
999                 }
1000         } else {
1001                 spin_lock(&d->opd_pre_lock);
1002                 d->opd_pre_recovering = 0;
1003                 spin_unlock(&d->opd_pre_lock);
1004                 d->opd_cleanup_orphans_done = true;
1005         }
1006
1007         RETURN(rc);
1008 }
1009
1010 /**
1011  * Update precreate status using statfs data
1012  *
1013  * The function decides whether this OSP should be used for new objects.
1014  * IOW, whether this OST is used up or has some free space. Cached statfs
1015  * data is used to make this decision. If the latest result of statfs
1016  * request (rc argument) is not success, then just mark OSP unavailable
1017  * right away.
1018  *
1019  * The new statfs data is passed in \a msfs and needs to be stored into
1020  * opd_statfs, but only after the various flags in os_state are set, so
1021  * that the new statfs data is not visible without appropriate flags set.
1022  * As such, there is no need to clear the flags here, since this is called
1023  * with new statfs data, and they should not be cleared if sent from OST.
1024  *
1025  * Add a bit of hysteresis so this flag isn't continually flapping, and
1026  * ensure that new files don't get extremely fragmented due to only a
1027  * small amount of available space in the filesystem.  We want to set
1028  * the ENOSPC/ENOINO flags unconditionally when there is less than the
1029  * reserved size free, and still copy them from the old state when there
1030  * is less than 2*reserved size free space or inodes.
1031  *
1032  * \param[in] d         OSP device
1033  * \param[in] msfs      statfs data
1034  */
1035 static void osp_pre_update_msfs(struct osp_device *d, struct obd_statfs *msfs)
1036 {
1037         u32 old_state = d->opd_statfs.os_state;
1038         u32 reserved_ino_low = 32;      /* could be tunable in the future */
1039         u32 reserved_ino_high = reserved_ino_low * 2;
1040         u64 available_mb;
1041
1042         /* statfs structure not initialized yet */
1043         if (unlikely(!msfs->os_type))
1044                 return;
1045
1046         /* if the low and high watermarks have not been initialized yet */
1047         if (unlikely(d->opd_reserved_mb_high == 0 &&
1048                      d->opd_reserved_mb_low == 0)) {
1049                 /* Use ~0.1% by default to disable object allocation,
1050                  * and ~0.2% to enable, size in MB, set both watermark
1051                  */
1052                 spin_lock(&d->opd_pre_lock);
1053                 if (d->opd_reserved_mb_high == 0 &&
1054                     d->opd_reserved_mb_low == 0) {
1055                         d->opd_reserved_mb_low = ((msfs->os_bsize >> 10) *
1056                                                   msfs->os_blocks) >> 20;
1057                         if (d->opd_reserved_mb_low == 0)
1058                                 d->opd_reserved_mb_low = 1;
1059                         d->opd_reserved_mb_high =
1060                                 (d->opd_reserved_mb_low << 1) + 1;
1061                 }
1062                 spin_unlock(&d->opd_pre_lock);
1063         }
1064
1065         available_mb = (msfs->os_bavail * (msfs->os_bsize >> 10)) >> 10;
1066         if (msfs->os_ffree < reserved_ino_low)
1067                 msfs->os_state |= OS_STATFS_ENOINO;
1068         else if (msfs->os_ffree <= reserved_ino_high)
1069                 msfs->os_state |= old_state & OS_STATFS_ENOINO;
1070         /* else don't clear flags in new msfs->os_state sent from OST */
1071
1072         CDEBUG(D_INFO,
1073                "%s: blocks=%llu free=%llu avail=%llu avail_mb=%llu hwm_mb=%u files=%llu ffree=%llu state=%x: rc = %d\n",
1074                d->opd_obd->obd_name, msfs->os_blocks, msfs->os_bfree,
1075                msfs->os_bavail, available_mb, d->opd_reserved_mb_high,
1076                msfs->os_files, msfs->os_ffree, msfs->os_state,
1077                d->opd_pre_status);
1078         if (available_mb < d->opd_reserved_mb_low)
1079                 msfs->os_state |= OS_STATFS_ENOSPC;
1080         else if (available_mb <= d->opd_reserved_mb_high)
1081                 msfs->os_state |= old_state & OS_STATFS_ENOSPC;
1082         /* else don't clear flags in new msfs->os_state sent from OST */
1083
1084         if (msfs->os_state & (OS_STATFS_ENOINO | OS_STATFS_ENOSPC)) {
1085                 d->opd_pre_status = -ENOSPC;
1086                 if (!(old_state & (OS_STATFS_ENOINO | OS_STATFS_ENOSPC)))
1087                         CDEBUG(D_INFO, "%s: full: state=%x: rc = %x\n",
1088                                d->opd_obd->obd_name, msfs->os_state,
1089                                d->opd_pre_status);
1090                 CDEBUG(D_INFO, "uncommitted changes=%u in_progress=%u\n",
1091                        atomic_read(&d->opd_sync_changes),
1092                        atomic_read(&d->opd_sync_rpcs_in_progress));
1093         } else if (old_state & (OS_STATFS_ENOINO | OS_STATFS_ENOSPC)) {
1094                 d->opd_pre_status = 0;
1095                 spin_lock(&d->opd_pre_lock);
1096                 d->opd_pre_create_slow = 0;
1097                 d->opd_pre_create_count = OST_MIN_PRECREATE;
1098                 spin_unlock(&d->opd_pre_lock);
1099                 wake_up(&d->opd_pre_waitq);
1100
1101                 CDEBUG(D_INFO,
1102                        "%s: available: state=%x: rc = %d\n",
1103                        d->opd_obd->obd_name, msfs->os_state,
1104                        d->opd_pre_status);
1105         } else {
1106                 /* we only get here if rc == 0 in the caller */
1107                 d->opd_pre_status = 0;
1108         }
1109
1110         /* Object precreation skipped on OST if manually disabled */
1111         if (d->opd_pre_max_create_count == 0)
1112                 msfs->os_state |= OS_STATFS_NOPRECREATE;
1113         /* else don't clear flags in new msfs->os_state sent from OST */
1114
1115         /* copy only new statfs state to make it visible to MDS threads */
1116         if (&d->opd_statfs != msfs)
1117                 d->opd_statfs = *msfs;
1118 }
1119
1120 /**
1121  * Initialize FID for precreation
1122  *
1123  * For a just created new target, a new sequence should be taken.
1124  * The function checks there is no IDIF in use (if the target was
1125  * added with the older version of Lustre), then requests a new
1126  * sequence from FLDB using the regular protocol. Then this new
1127  * sequence is stored on a persisten storage synchronously to prevent
1128  * possible object leakage (for the detail see the description for
1129  * osp_precreate_rollover_new_seq()).
1130  *
1131  * \param[in] osp       OSP device
1132  *
1133  * \retval 0            on success
1134  * \retval negative     negated errno on error
1135  */
1136 int osp_init_pre_fid(struct osp_device *osp)
1137 {
1138         struct lu_env           env;
1139         struct osp_thread_info  *osi;
1140         struct lu_client_seq    *cli_seq;
1141         struct lu_fid           *last_fid;
1142         int                     rc;
1143         ENTRY;
1144
1145         LASSERT(osp->opd_pre != NULL);
1146
1147         /* Let's check if the current last_seq/fid is valid,
1148          * otherwise request new sequence from the controller */
1149         if (osp_is_fid_client(osp) && osp->opd_group != 0) {
1150                 /* Non-MDT0 can only use normal sequence for
1151                  * OST objects */
1152                 if (fid_is_norm(&osp->opd_last_used_fid))
1153                         RETURN(0);
1154         } else {
1155                 /* Initially MDT0 will start with IDIF, after
1156                  * that it will request new sequence from the
1157                  * controller */
1158                 if (fid_is_idif(&osp->opd_last_used_fid) ||
1159                     fid_is_norm(&osp->opd_last_used_fid))
1160                         RETURN(0);
1161         }
1162
1163         if (!fid_is_zero(&osp->opd_last_used_fid))
1164                 CWARN("%s: invalid last used fid "DFID
1165                       ", try to get new sequence.\n",
1166                       osp->opd_obd->obd_name,
1167                       PFID(&osp->opd_last_used_fid));
1168
1169         rc = lu_env_init(&env, osp->opd_dt_dev.dd_lu_dev.ld_type->ldt_ctx_tags);
1170         if (rc) {
1171                 CERROR("%s: init env error: rc = %d\n",
1172                        osp->opd_obd->obd_name, rc);
1173                 RETURN(rc);
1174         }
1175
1176         osi = osp_env_info(&env);
1177         last_fid = &osi->osi_fid;
1178         fid_zero(last_fid);
1179         /* For a freshed fs, it will allocate a new sequence first */
1180         if (osp_is_fid_client(osp) && osp->opd_group != 0) {
1181                 cli_seq = osp->opd_obd->u.cli.cl_seq;
1182                 rc = seq_client_get_seq(&env, cli_seq, &last_fid->f_seq);
1183                 if (rc != 0) {
1184                         CERROR("%s: alloc fid error: rc = %d\n",
1185                                osp->opd_obd->obd_name, rc);
1186                         GOTO(out, rc);
1187                 }
1188         } else {
1189                 last_fid->f_seq = fid_idif_seq(0, osp->opd_index);
1190         }
1191         last_fid->f_oid = 1;
1192         last_fid->f_ver = 0;
1193
1194         spin_lock(&osp->opd_pre_lock);
1195         osp->opd_last_used_fid = *last_fid;
1196         osp->opd_pre_used_fid = *last_fid;
1197         osp->opd_pre_last_created_fid = *last_fid;
1198         spin_unlock(&osp->opd_pre_lock);
1199         rc = osp_write_last_oid_seq_files(&env, osp, last_fid, 1);
1200         if (rc != 0) {
1201                 CERROR("%s: write fid error: rc = %d\n",
1202                        osp->opd_obd->obd_name, rc);
1203                 GOTO(out, rc);
1204         }
1205 out:
1206         lu_env_fini(&env);
1207         RETURN(rc);
1208 }
1209
1210 struct opt_args {
1211         struct osp_device       *opta_dev;
1212         struct lu_env           opta_env;
1213         struct completion       *opta_started;
1214 };
1215 /**
1216  * The core of precreate functionality
1217  *
1218  * The function implements the main precreation loop. Basically it
1219  * involves connecting to the target, precerate FID initialization,
1220  * identifying and removing orphans, then serving precreation. As
1221  * part of the latter, the thread is responsible for statfs data
1222  * updates. The precreation is mostly driven by another threads
1223  * asking for new OST objects - those askers wake the thread when
1224  * the number of precreated objects reach low watermark.
1225  * After a disconnect, the sequence above repeats. This is keep going
1226  * until the thread is requested to stop.
1227  *
1228  * \param[in] _arg      private data the thread (OSP device to handle)
1229  *
1230  * \retval 0            on success
1231  * \retval negative     negated errno on error
1232  */
1233 static int osp_precreate_thread(void *_args)
1234 {
1235         struct opt_args         *args = _args;
1236         struct osp_device       *d = args->opta_dev;
1237         struct lu_env           *env = &args->opta_env;
1238         int                      rc;
1239
1240         ENTRY;
1241
1242         complete(args->opta_started);
1243         while (!kthread_should_stop()) {
1244                 /*
1245                  * need to be connected to OST
1246                  */
1247                 while (!kthread_should_stop()) {
1248                         if ((d->opd_pre == NULL || d->opd_pre_recovering) &&
1249                             d->opd_imp_connected &&
1250                             !d->opd_got_disconnected)
1251                                 break;
1252                         wait_event_idle(d->opd_pre_waitq,
1253                                         kthread_should_stop() ||
1254                                         d->opd_new_connection);
1255
1256                         if (!d->opd_new_connection)
1257                                 continue;
1258
1259                         OBD_FAIL_TIMEOUT(OBD_FAIL_OSP_CON_EVENT_DELAY,
1260                                          cfs_fail_val);
1261                         d->opd_new_connection = 0;
1262                         d->opd_got_disconnected = 0;
1263                         break;
1264                 }
1265
1266                 if (kthread_should_stop())
1267                         break;
1268
1269                 if (d->opd_pre) {
1270                         LASSERT(d->opd_obd->u.cli.cl_seq != NULL);
1271                         /* Sigh, fid client is not ready yet */
1272                         if (d->opd_obd->u.cli.cl_seq->lcs_exp == NULL)
1273                                 continue;
1274
1275                         /* Init fid for osp_precreate if necessary */
1276                         rc = osp_init_pre_fid(d);
1277                         if (rc != 0) {
1278                                 class_export_put(d->opd_exp);
1279                                 d->opd_obd->u.cli.cl_seq->lcs_exp = NULL;
1280                                 CERROR("%s: init pre fid error: rc = %d\n",
1281                                                 d->opd_obd->obd_name, rc);
1282                                 continue;
1283                         }
1284                 }
1285
1286                 if (osp_statfs_update(env, d)) {
1287                         if (wait_event_idle_timeout(d->opd_pre_waitq,
1288                                                     kthread_should_stop(),
1289                                                     cfs_time_seconds(5)) == 0)
1290                                 l_wait_event_abortable(
1291                                         d->opd_pre_waitq,
1292                                         kthread_should_stop());
1293                         continue;
1294                 }
1295
1296                 if (d->opd_pre) {
1297                         /*
1298                          * Clean up orphans or recreate missing objects.
1299                          */
1300                         rc = osp_precreate_cleanup_orphans(env, d);
1301                         if (rc != 0) {
1302                                 schedule_timeout_interruptible(cfs_time_seconds(1));
1303                                 continue;
1304                         }
1305                 }
1306
1307                 /*
1308                  * connected, can handle precreates now
1309                  */
1310                 while (!kthread_should_stop()) {
1311                         wait_event_idle(d->opd_pre_waitq,
1312                                         kthread_should_stop() ||
1313                                         osp_precreate_near_empty(env, d) ||
1314                                         osp_statfs_need_update(d) ||
1315                                         d->opd_got_disconnected);
1316
1317                         if (kthread_should_stop())
1318                                 break;
1319
1320                         /* something happened to the connection
1321                          * have to start from the beginning */
1322                         if (d->opd_got_disconnected)
1323                                 break;
1324
1325                         if (osp_statfs_need_update(d))
1326                                 if (osp_statfs_update(env, d))
1327                                         break;
1328
1329                         if (d->opd_pre == NULL)
1330                                 continue;
1331
1332                         if (OBD_FAIL_CHECK(OBD_FAIL_OSP_GET_LAST_FID)) {
1333                                 d->opd_pre_recovering = 1;
1334                                 break;
1335                         }
1336
1337                         /* To avoid handling different seq in precreate/orphan
1338                          * cleanup, it will hold precreate until current seq is
1339                          * used up. */
1340                         if (unlikely(osp_precreate_end_seq(env, d) &&
1341                             !osp_create_end_seq(env, d)))
1342                                 continue;
1343
1344                         if (unlikely(osp_precreate_end_seq(env, d) &&
1345                                      osp_create_end_seq(env, d))) {
1346                                 LCONSOLE_INFO("%s:%#llx is used up."
1347                                               " Update to new seq\n",
1348                                               d->opd_obd->obd_name,
1349                                          fid_seq(&d->opd_pre_last_created_fid));
1350                                 rc = osp_precreate_rollover_new_seq(env, d);
1351                                 if (rc)
1352                                         continue;
1353                         }
1354
1355                         if (osp_precreate_near_empty(env, d)) {
1356                                 rc = osp_precreate_send(env, d);
1357                                 /* osp_precreate_send() sets opd_pre_status
1358                                  * in case of error, that prevent the using of
1359                                  * failed device. */
1360                                 if (rc < 0 && rc != -ENOSPC &&
1361                                     rc != -ETIMEDOUT && rc != -ENOTCONN)
1362                                         CERROR("%s: cannot precreate objects:"
1363                                                " rc = %d\n",
1364                                                d->opd_obd->obd_name, rc);
1365                         }
1366                 }
1367         }
1368
1369         lu_env_fini(env);
1370         OBD_FREE_PTR(args);
1371
1372         RETURN(0);
1373 }
1374
1375 /**
1376  * Check when to stop to wait for precreate objects.
1377  *
1378  * The caller wanting a new OST object can't wait undefinitely. The
1379  * function checks for few conditions including available new OST
1380  * objects, disconnected OST, lack of space with no pending destroys,
1381  * etc. IOW, it checks whether the current OSP state is good to keep
1382  * waiting or it's better to give up.
1383  *
1384  * \param[in] env       LU environment provided by the caller
1385  * \param[in] d         OSP device
1386  *
1387  * \retval              0 - keep waiting, 1 - no luck
1388  */
1389 static int osp_precreate_ready_condition(const struct lu_env *env,
1390                                          struct osp_device *d)
1391 {
1392         /* Bail out I/O fails to OST */
1393         if (d->opd_pre_status != 0 &&
1394             d->opd_pre_status != -EAGAIN &&
1395             d->opd_pre_status != -ENODEV &&
1396             d->opd_pre_status != -ENOTCONN &&
1397             d->opd_pre_status != -ENOSPC) {
1398                 /* DEBUG LU-3230 */
1399                 if (d->opd_pre_status != -EIO)
1400                         CERROR("%s: precreate failed opd_pre_status %d\n",
1401                                d->opd_obd->obd_name, d->opd_pre_status);
1402                 return 1;
1403         }
1404
1405         if (d->opd_pre_recovering)
1406                 return 0;
1407
1408         /* ready if got enough precreated objects */
1409         /* we need to wait for others (opd_pre_reserved) and our object (+1) */
1410         if (d->opd_pre_reserved + 1 < osp_objs_precreated(env, d))
1411                 return 1;
1412
1413         /* ready if OST reported no space and no destroys in progress */
1414         if (atomic_read(&d->opd_sync_changes) +
1415             atomic_read(&d->opd_sync_rpcs_in_progress) == 0 &&
1416             d->opd_pre_status == -ENOSPC)
1417                 return 1;
1418
1419         return 0;
1420 }
1421
1422 /**
1423  * Reserve object in precreate pool
1424  *
1425  * When the caller wants to create a new object on this target (target
1426  * represented by the given OSP), it should declare this intention using
1427  * a regular ->dt_declare_create() OSD API method. Then OSP will be trying
1428  * to reserve an object in the existing precreated pool or wait up to
1429  * obd_timeout for the available object to appear in the pool (a dedicated
1430  * thread will be doing real precreation in background). The object can be
1431  * consumed later with osp_precreate_get_fid() or be released with call to
1432  * lu_object_put(). Notice the function doesn't reserve a specific ID, just
1433  * some ID. The actual ID assignment happen in osp_precreate_get_fid().
1434  * If the space on the target is short and there is a pending object destroy,
1435  * then the function forces local commit to speedup space release (see
1436  * osp_sync.c for the details).
1437  *
1438  * \param[in] env       LU environment provided by the caller
1439  * \param[in] d         OSP device
1440  *
1441  * \retval              0 on success
1442  * \retval              -ENOSPC when no space on OST
1443  * \retval              -EAGAIN try later, slow precreation in progress
1444  * \retval              -EIO when no access to OST
1445  */
1446 int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d,
1447                           bool can_block)
1448 {
1449         time64_t expire = ktime_get_seconds() + obd_timeout;
1450         int precreated, rc, synced = 0;
1451
1452         ENTRY;
1453
1454         LASSERTF(osp_objs_precreated(env, d) >= 0, "Last created FID "DFID
1455                  "Next FID "DFID"\n", PFID(&d->opd_pre_last_created_fid),
1456                  PFID(&d->opd_pre_used_fid));
1457
1458         /* opd_pre_max_create_count 0 to not use specified OST. */
1459         if (d->opd_pre_max_create_count == 0)
1460                 RETURN(-ENOBUFS);
1461
1462         /*
1463          * wait till:
1464          *  - preallocation is done
1465          *  - no free space expected soon
1466          *  - can't connect to OST for too long (obd_timeout)
1467          *  - OST can allocate fid sequence.
1468          */
1469         while ((rc = d->opd_pre_status) == 0 || rc == -ENOSPC ||
1470                 rc == -ENODEV || rc == -EAGAIN || rc == -ENOTCONN) {
1471
1472                 /*
1473                  * increase number of precreations
1474                  */
1475                 precreated = osp_objs_precreated(env, d);
1476                 if (d->opd_pre_create_count < d->opd_pre_max_create_count &&
1477                     d->opd_pre_create_slow == 0 &&
1478                     precreated <= (d->opd_pre_create_count / 4 + 1)) {
1479                         spin_lock(&d->opd_pre_lock);
1480                         d->opd_pre_create_slow = 1;
1481                         d->opd_pre_create_count *= 2;
1482                         spin_unlock(&d->opd_pre_lock);
1483                 }
1484
1485                 spin_lock(&d->opd_pre_lock);
1486                 precreated = osp_objs_precreated(env, d);
1487                 if (precreated > d->opd_pre_reserved &&
1488                     !d->opd_pre_recovering) {
1489                         d->opd_pre_reserved++;
1490                         spin_unlock(&d->opd_pre_lock);
1491                         rc = 0;
1492
1493                         /* XXX: don't wake up if precreation is in progress */
1494                         if (osp_precreate_near_empty_nolock(env, d) &&
1495                            !osp_precreate_end_seq_nolock(env, d))
1496                                 wake_up(&d->opd_pre_waitq);
1497
1498                         break;
1499                 }
1500                 spin_unlock(&d->opd_pre_lock);
1501
1502                 /*
1503                  * all precreated objects have been used and no-space
1504                  * status leave us no chance to succeed very soon
1505                  * but if there is destroy in progress, then we should
1506                  * wait till that is done - some space might be released
1507                  */
1508                 if (unlikely(rc == -ENOSPC)) {
1509                         if (atomic_read(&d->opd_sync_changes) && synced == 0) {
1510                                 /* force local commit to release space */
1511                                 dt_commit_async(env, d->opd_storage);
1512                                 osp_sync_check_for_work(d);
1513                                 synced = 1;
1514                         }
1515                         if (atomic_read(&d->opd_sync_rpcs_in_progress)) {
1516                                 /* just wait till destroys are done
1517                                  * see wait_event_idle_timeout() below
1518                                  */
1519                         }
1520                         if (atomic_read(&d->opd_sync_changes) +
1521                             atomic_read(&d->opd_sync_rpcs_in_progress) == 0) {
1522                                 /* no hope for free space */
1523                                 break;
1524                         }
1525                 }
1526
1527                 /* XXX: don't wake up if precreation is in progress */
1528                 wake_up(&d->opd_pre_waitq);
1529
1530                 if (ktime_get_seconds() >= expire) {
1531                         rc = -ETIMEDOUT;
1532                         break;
1533                 }
1534
1535                 if (!can_block) {
1536                         LASSERT(d->opd_pre);
1537                         rc = -ENOBUFS;
1538                         break;
1539                 }
1540
1541                 CDEBUG(D_INFO, "%s: Sleeping on objects\n",
1542                        d->opd_obd->obd_name);
1543                 if (wait_event_idle_timeout(
1544                             d->opd_pre_user_waitq,
1545                             osp_precreate_ready_condition(env, d),
1546                             cfs_time_seconds(obd_timeout)) == 0) {
1547                         CDEBUG(D_HA,
1548                                "%s: slow creates, last="DFID", next="DFID", "
1549                                "reserved=%llu, sync_changes=%u, "
1550                                "sync_rpcs_in_progress=%d, status=%d\n",
1551                                d->opd_obd->obd_name,
1552                                PFID(&d->opd_pre_last_created_fid),
1553                                PFID(&d->opd_pre_used_fid), d->opd_pre_reserved,
1554                                atomic_read(&d->opd_sync_changes),
1555                                atomic_read(&d->opd_sync_rpcs_in_progress),
1556                                d->opd_pre_status);
1557                 } else {
1558                         CDEBUG(D_INFO, "%s: Waked up, status=%d\n",
1559                                d->opd_obd->obd_name, d->opd_pre_status);
1560                 }
1561         }
1562
1563         RETURN(rc);
1564 }
1565
1566 /**
1567  * Get a FID from precreation pool
1568  *
1569  * The function is a companion for osp_precreate_reserve() - it assigns
1570  * a specific FID from the precreate. The function should be called only
1571  * if the call to osp_precreate_reserve() was successful. The function
1572  * updates a local storage to remember the highest object ID referenced
1573  * by the node in the given sequence.
1574  *
1575  * A very importan details: this is supposed to be called once the
1576  * transaction is started, so on-disk update will be atomic with the
1577  * data (like LOVEA) refering this object. Then the object won't be leaked:
1578  * either it's referenced by the committed transaction or it's a subject
1579  * to the orphan cleanup procedure.
1580  *
1581  * \param[in] env       LU environment provided by the caller
1582  * \param[in] d         OSP device
1583  * \param[out] fid      generated FID
1584  *
1585  * \retval 0            on success
1586  * \retval negative     negated errno on error
1587  */
1588 int osp_precreate_get_fid(const struct lu_env *env, struct osp_device *d,
1589                           struct lu_fid *fid)
1590 {
1591         struct lu_fid *pre_used_fid = &d->opd_pre_used_fid;
1592         /* grab next id from the pool */
1593         spin_lock(&d->opd_pre_lock);
1594
1595         LASSERTF(osp_fid_diff(&d->opd_pre_used_fid,
1596                              &d->opd_pre_last_created_fid) < 0,
1597                  "next fid "DFID" last created fid "DFID"\n",
1598                  PFID(&d->opd_pre_used_fid),
1599                  PFID(&d->opd_pre_last_created_fid));
1600
1601         /*
1602          * When sequence is used up, new one should be allocated in
1603          * osp_precreate_rollover_new_seq. So ASSERT here to avoid
1604          * objid overflow.
1605          */
1606         LASSERTF(osp_fid_end_seq(env, pre_used_fid) == 0,
1607                  "next fid "DFID" last created fid "DFID"\n",
1608                  PFID(&d->opd_pre_used_fid),
1609                  PFID(&d->opd_pre_last_created_fid));
1610         /* Non IDIF fids shoulnd't get here with oid == 0xFFFFFFFF. */
1611         if (fid_is_idif(pre_used_fid) &&
1612             unlikely(fid_oid(pre_used_fid) == LUSTRE_DATA_SEQ_MAX_WIDTH))
1613                 pre_used_fid->f_seq++;
1614
1615         d->opd_pre_used_fid.f_oid++;
1616         memcpy(fid, &d->opd_pre_used_fid, sizeof(*fid));
1617         d->opd_pre_reserved--;
1618         /*
1619          * last_used_id must be changed along with getting new id otherwise
1620          * we might miscalculate gap causing object loss or leak
1621          */
1622         osp_update_last_fid(d, fid);
1623         spin_unlock(&d->opd_pre_lock);
1624
1625         /*
1626          * probably main thread suspended orphan cleanup till
1627          * all reservations are released, see comment in
1628          * osp_precreate_thread() just before orphan cleanup
1629          */
1630         if (unlikely(d->opd_pre_reserved == 0 &&
1631                      (d->opd_pre_recovering || d->opd_pre_status)))
1632                 wake_up(&d->opd_pre_waitq);
1633
1634         return 0;
1635 }
1636
1637 /*
1638  * Set size regular attribute on an object
1639  *
1640  * When a striping is created late, it's possible that size is already
1641  * initialized on the file. Then the new striping should inherit size
1642  * from the file. The function sets size on the object using the regular
1643  * protocol (OST_PUNCH).
1644  * XXX: should be re-implemented using OUT ?
1645  *
1646  * \param[in] env       LU environment provided by the caller
1647  * \param[in] dt        object
1648  * \param[in] size      size to set.
1649  *
1650  * \retval 0            on success
1651  * \retval negative     negated errno on error
1652  */
1653 int osp_object_truncate(const struct lu_env *env, struct dt_object *dt,
1654                         __u64 size)
1655 {
1656         struct osp_device       *d = lu2osp_dev(dt->do_lu.lo_dev);
1657         struct ptlrpc_request   *req = NULL;
1658         struct obd_import       *imp;
1659         struct ost_body         *body;
1660         struct obdo             *oa = NULL;
1661         int                      rc;
1662
1663         ENTRY;
1664
1665         imp = d->opd_obd->u.cli.cl_import;
1666         LASSERT(imp);
1667
1668         req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
1669         if (req == NULL)
1670                 RETURN(-ENOMEM);
1671
1672         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
1673         if (rc) {
1674                 ptlrpc_request_free(req);
1675                 RETURN(rc);
1676         }
1677
1678         /*
1679          * XXX: decide how do we do here with resend
1680          * if we don't resend, then client may see wrong file size
1681          * if we do resend, then MDS thread can get stuck for quite long
1682          * and if we don't resend, then client will also get -EAGAIN !!
1683          * (see LU-7975 and sanity/test_27F use cases)
1684          * but let's decide not to resend/delay this truncate request to OST
1685          * and allow Client to decide to resend, in a less agressive way from
1686          * after_reply(), by returning -EINPROGRESS instead of
1687          * -EAGAIN/-EAGAIN upon return from ptlrpc_queue_wait() at the
1688          * end of this routine
1689          */
1690         req->rq_no_resend = req->rq_no_delay = 1;
1691
1692         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
1693         ptlrpc_at_set_req_timeout(req);
1694
1695         OBD_ALLOC_PTR(oa);
1696         if (oa == NULL)
1697                 GOTO(out, rc = -ENOMEM);
1698
1699         rc = fid_to_ostid(lu_object_fid(&dt->do_lu), &oa->o_oi);
1700         LASSERT(rc == 0);
1701         oa->o_size = size;
1702         oa->o_blocks = OBD_OBJECT_EOF;
1703         oa->o_valid = OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
1704                       OBD_MD_FLID | OBD_MD_FLGROUP;
1705
1706         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
1707         LASSERT(body);
1708         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1709
1710         /* XXX: capa support? */
1711         /* osc_pack_capa(req, body, capa); */
1712
1713         ptlrpc_request_set_replen(req);
1714
1715         rc = ptlrpc_queue_wait(req);
1716         if (rc) {
1717                 /* -EAGAIN/-EWOULDBLOCK means OST is unreachable at the moment
1718                  * since we have decided not to resend/delay, but this could
1719                  * lead to wrong size to be seen at Client side and even process
1720                  * trying to open to exit/fail if not itself handling -EAGAIN.
1721                  * So it should be better to return -EINPROGRESS instead and
1722                  * leave the decision to resend at Client side in after_reply()
1723                  */
1724                 if (rc == -EAGAIN) {
1725                         rc = -EINPROGRESS;
1726                         CDEBUG(D_HA, "returning -EINPROGRESS instead of "
1727                                "-EWOULDBLOCK/-EAGAIN to allow Client to "
1728                                "resend\n");
1729                 } else {
1730                         CERROR("can't punch object: %d\n", rc);
1731                 }
1732         }
1733 out:
1734         ptlrpc_req_finished(req);
1735         if (oa)
1736                 OBD_FREE_PTR(oa);
1737         RETURN(rc);
1738 }
1739
1740 /**
1741  * Initialize precreation functionality of OSP
1742  *
1743  * Prepares all the internal structures and starts the precreate thread
1744  *
1745  * \param[in] d         OSP device
1746  *
1747  * \retval 0            on success
1748  * \retval negative     negated errno on error
1749  */
1750 int osp_init_precreate(struct osp_device *d)
1751 {
1752         ENTRY;
1753
1754         OBD_ALLOC_PTR(d->opd_pre);
1755         if (d->opd_pre == NULL)
1756                 RETURN(-ENOMEM);
1757
1758         /* initially precreation isn't ready */
1759         init_waitqueue_head(&d->opd_pre_user_waitq);
1760         d->opd_pre_status = -EAGAIN;
1761         fid_zero(&d->opd_pre_used_fid);
1762         d->opd_pre_used_fid.f_oid = 1;
1763         fid_zero(&d->opd_pre_last_created_fid);
1764         d->opd_pre_last_created_fid.f_oid = 1;
1765         d->opd_last_id = 0;
1766         d->opd_pre_reserved = 0;
1767         d->opd_got_disconnected = 1;
1768         d->opd_pre_create_slow = 0;
1769         d->opd_pre_create_count = OST_MIN_PRECREATE;
1770         d->opd_pre_min_create_count = OST_MIN_PRECREATE;
1771         d->opd_pre_max_create_count = OST_MAX_PRECREATE;
1772         d->opd_reserved_mb_high = 0;
1773         d->opd_reserved_mb_low = 0;
1774         d->opd_cleanup_orphans_done = false;
1775         d->opd_force_creation = false;
1776
1777         RETURN(0);
1778 }
1779
1780 /**
1781  * Finish precreate functionality of OSP
1782  *
1783  *
1784  * Asks all the activity (the thread, update timer) to stop, then
1785  * wait till that is done.
1786  *
1787  * \param[in] d         OSP device
1788  */
1789 void osp_precreate_fini(struct osp_device *d)
1790 {
1791         ENTRY;
1792
1793         if (d->opd_pre == NULL)
1794                 RETURN_EXIT;
1795
1796         OBD_FREE_PTR(d->opd_pre);
1797         d->opd_pre = NULL;
1798
1799         EXIT;
1800 }
1801
1802 int osp_init_statfs(struct osp_device *d)
1803 {
1804         struct task_struct      *task;
1805         struct opt_args         *args;
1806         DECLARE_COMPLETION_ONSTACK(started);
1807         int                     rc;
1808
1809         ENTRY;
1810
1811         spin_lock_init(&d->opd_pre_lock);
1812         init_waitqueue_head(&d->opd_pre_waitq);
1813
1814         /*
1815          * Initialize statfs-related things
1816          */
1817         d->opd_statfs_maxage = 5; /* defaultupdate interval */
1818         d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(),
1819                                                 1000 * NSEC_PER_SEC);
1820         CDEBUG(D_OTHER, "current %lldns, fresh till %lldns\n",
1821                ktime_get_ns(),
1822                ktime_to_ns(d->opd_statfs_fresh_till));
1823         cfs_timer_setup(&d->opd_statfs_timer, osp_statfs_timer_cb,
1824                         (unsigned long)d, 0);
1825
1826         if (d->opd_storage->dd_rdonly)
1827                 RETURN(0);
1828
1829         OBD_ALLOC_PTR(args);
1830         if (!args)
1831                 RETURN(0);
1832         args->opta_dev = d;
1833         args->opta_started = &started;
1834         rc = lu_env_init(&args->opta_env,
1835                          d->opd_dt_dev.dd_lu_dev.ld_type->ldt_ctx_tags);
1836         if (rc) {
1837                 CERROR("%s: init env error: rc = %d\n", d->opd_obd->obd_name,
1838                        rc);
1839                 OBD_FREE_PTR(args);
1840                 RETURN(0);
1841         }
1842
1843         /*
1844          * start thread handling precreation and statfs updates
1845          */
1846         task = kthread_create(osp_precreate_thread, args,
1847                               "osp-pre-%u-%u", d->opd_index, d->opd_group);
1848         if (IS_ERR(task)) {
1849                 CERROR("can't start precreate thread %ld\n", PTR_ERR(task));
1850                 lu_env_fini(&args->opta_env);
1851                 OBD_FREE_PTR(args);
1852                 RETURN(PTR_ERR(task));
1853         }
1854         d->opd_pre_task = task;
1855         wake_up_process(task);
1856         wait_for_completion(&started);
1857
1858         RETURN(0);
1859 }
1860
1861 void osp_statfs_fini(struct osp_device *d)
1862 {
1863         struct task_struct *task = d->opd_pre_task;
1864         ENTRY;
1865
1866         del_timer(&d->opd_statfs_timer);
1867
1868         d->opd_pre_task = NULL;
1869         if (task)
1870                 kthread_stop(task);
1871
1872         EXIT;
1873 }