Whamcloud - gitweb
LU-17705 ptlrpc: replace synchronize_rcu() with rcu_barrier()
[fs/lustre-release.git] / lustre / osp / osp_precreate.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/osp/osp_precreate.c
32  *
33  * Lustre OST Proxy Device
34  *
35  * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
36  * Author: Mikhail Pershin <mike.pershin@intel.com>
37  * Author: Di Wang <di.wang@intel.com>
38  */
39
40 #define DEBUG_SUBSYSTEM S_MDS
41
42 #include <linux/kthread.h>
43
44 #include <lustre_obdo.h>
45
46 #include "osp_internal.h"
47
48 /*
49  * there are two specific states to take care about:
50  *
51  * = import is disconnected =
52  *
53  * = import is inactive =
54  *   in this case osp_declare_create() returns an error
55  *
56  */
57
58 /**
59  * Check whether statfs data is expired
60  *
61  * OSP device caches statfs data for the target, the function checks
62  * whether the data is expired or not.
63  *
64  * \param[in] d         OSP device
65  *
66  * \retval              0 - not expired, 1 - expired
67  */
68 static inline int osp_statfs_need_update(struct osp_device *d)
69 {
70         return !ktime_before(ktime_get(), d->opd_statfs_fresh_till);
71 }
72
73 /*
74  * OSP tries to maintain pool of available objects so that calls to create
75  * objects don't block most of time
76  *
77  * each time OSP gets connected to OST, we should start from precreation cleanup
78  */
79 static void osp_statfs_timer_cb(cfs_timer_cb_arg_t data)
80 {
81         struct osp_device *d = cfs_from_timer(d, data, opd_statfs_timer);
82
83         LASSERT(d);
84         /* invalidate statfs data so osp_precreate_thread() can refresh */
85         d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(), NSEC_PER_SEC);
86         if (d->opd_pre_task)
87                 wake_up(&d->opd_pre_waitq);
88 }
89
90 static void osp_pre_update_msfs(struct osp_device *d, struct obd_statfs *msfs);
91
92 /*
93  * The function updates current precreation status if broken, and
94  * updates that cached statfs state if functional, then wakes up waiters.
95  * We don't clear opd_pre_status directly here, but rather leave this
96  * to osp_pre_update_msfs() to do if everything is OK so that we don't
97  * have a race to clear opd_pre_status and then set it to -ENOSPC again.
98  *
99  * \param[in] d         OSP device
100  * \param[in] msfs      statfs data
101  * \param[in] rc        new precreate status for device \a d
102  */
103 static void osp_pre_update_status_msfs(struct osp_device *d,
104                                        struct obd_statfs *msfs, int rc)
105 {
106         CDEBUG(D_INFO, "%s: Updating status = %d\n", d->opd_obd->obd_name, rc);
107         if (rc)
108                 d->opd_pre_status = rc;
109         else
110                 osp_pre_update_msfs(d, msfs);
111
112         wake_up_all(&d->opd_pre_user_waitq);
113 }
114
115 /* Pass in the old statfs data in case the limits have changed */
116 void osp_pre_update_status(struct osp_device *d, int rc)
117 {
118         osp_pre_update_status_msfs(d, &d->opd_statfs, rc);
119 }
120
121
122 /**
123  * RPC interpret callback for OST_STATFS RPC
124  *
125  * An interpretation callback called by ptlrpc for OST_STATFS RPC when it is
126  * replied by the target. It's used to maintain statfs cache for the target.
127  * The function fills data from the reply if successful and schedules another
128  * update.
129  *
130  * \param[in] env       LU environment provided by the caller
131  * \param[in] req       RPC replied
132  * \param[in] aa        callback data
133  * \param[in] rc        RPC result
134  *
135  * \retval 0            on success
136  * \retval negative     negated errno on error
137  */
138 static int osp_statfs_interpret(const struct lu_env *env,
139                                 struct ptlrpc_request *req, void *args, int rc)
140 {
141         union ptlrpc_async_args *aa = args;
142         struct obd_import *imp = req->rq_import;
143         struct obd_statfs *msfs;
144         struct obd_statfs *sfs;
145         struct osp_device *d;
146         u64 maxage_ns;
147
148         ENTRY;
149
150         aa = ptlrpc_req_async_args(aa, req);
151         d = aa->pointer_arg[0];
152         LASSERT(d);
153
154         if (rc != 0)
155                 GOTO(out, rc);
156
157         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
158         if (msfs == NULL)
159                 GOTO(out, rc = -EPROTO);
160
161         if (d->opd_pre)
162                 osp_pre_update_status_msfs(d, msfs, 0);
163         else
164                 osp_pre_update_msfs(d, msfs);
165
166         /* schedule next update */
167         maxage_ns = d->opd_statfs_maxage * NSEC_PER_SEC;
168         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), maxage_ns);
169         mod_timer(&d->opd_statfs_timer,
170                   jiffies + cfs_time_seconds(d->opd_statfs_maxage));
171         d->opd_statfs_update_in_progress = 0;
172
173         sfs = &d->opd_statfs;
174         CDEBUG(D_CACHE,
175                "%s (%p): %llu blocks, %llu free, %llu avail, %u bsize, %u reserved mb low, %u reserved mb high, %u reserved ino low, %u reserved ino high, %llu files, %llu free files %#x\n",
176                d->opd_obd->obd_name, d, sfs->os_blocks, sfs->os_bfree,
177                sfs->os_bavail, sfs->os_bsize, d->opd_reserved_mb_low,
178                d->opd_reserved_mb_high, d->opd_reserved_ino_low,
179                d->opd_reserved_ino_high, sfs->os_files, sfs->os_ffree,
180                sfs->os_state);
181
182         RETURN(0);
183 out:
184         /* couldn't update statfs, try again with a small delay */
185         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), 10 * NSEC_PER_SEC);
186         d->opd_statfs_update_in_progress = 0;
187         if (d->opd_pre && d->opd_pre_task)
188                 wake_up(&d->opd_pre_waitq);
189
190         if (req->rq_import_generation == imp->imp_generation)
191                 CDEBUG(D_CACHE, "%s: couldn't update statfs: rc = %d\n",
192                        d->opd_obd->obd_name, rc);
193         RETURN(rc);
194 }
195
196 /**
197  * Send OST_STATFS RPC
198  *
199  * Sends OST_STATFS RPC to refresh cached statfs data for the target.
200  * Also disables scheduled updates as times OSP may need to refresh
201  * statfs data before expiration. The function doesn't block, instead
202  * an interpretation callback osp_statfs_interpret() is used.
203  *
204  * \param[in] env       LU environment provided by the caller
205  * \param[in] d         OSP device
206  */
207 static int osp_statfs_update(const struct lu_env *env, struct osp_device *d)
208 {
209         u64 expire = obd_timeout * 1000 * NSEC_PER_SEC;
210         struct ptlrpc_request   *req;
211         struct obd_import       *imp;
212         union ptlrpc_async_args *aa;
213         int rc;
214
215         ENTRY;
216
217         CDEBUG(D_CACHE, "going to update statfs\n");
218
219         imp = d->opd_obd->u.cli.cl_import;
220         LASSERT(imp);
221
222         req = ptlrpc_request_alloc(imp,
223                            d->opd_pre ? &RQF_OST_STATFS : &RQF_MDS_STATFS);
224         if (req == NULL)
225                 RETURN(-ENOMEM);
226
227         rc = ptlrpc_request_pack(req,
228                          d->opd_pre ? LUSTRE_OST_VERSION : LUSTRE_MDS_VERSION,
229                          d->opd_pre ? OST_STATFS : MDS_STATFS);
230         if (rc) {
231                 ptlrpc_request_free(req);
232                 RETURN(rc);
233         }
234         ptlrpc_request_set_replen(req);
235         if (d->opd_pre)
236                 req->rq_request_portal = OST_CREATE_PORTAL;
237         ptlrpc_at_set_req_timeout(req);
238
239         req->rq_interpret_reply = osp_statfs_interpret;
240         aa = ptlrpc_req_async_args(aa, req);
241         aa->pointer_arg[0] = d;
242
243         /*
244          * no updates till reply
245          */
246         timer_delete(&d->opd_statfs_timer);
247         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), expire);
248         d->opd_statfs_update_in_progress = 1;
249
250         ptlrpcd_add_req(req);
251
252         /* we still want to sync changes if no new changes are coming */
253         if (ktime_before(ktime_get(), d->opd_sync_next_commit_cb))
254                 GOTO(out, rc);
255
256         if (atomic_read(&d->opd_sync_changes)) {
257                 struct thandle *th;
258
259                 th = dt_trans_create(env, d->opd_storage);
260                 if (IS_ERR(th)) {
261                         CERROR("%s: can't sync\n", d->opd_obd->obd_name);
262                         GOTO(out, rc);
263                 }
264                 rc = dt_trans_start_local(env, d->opd_storage, th);
265                 if (rc == 0) {
266                         CDEBUG(D_OTHER, "%s: sync forced, %d changes\n",
267                                d->opd_obd->obd_name,
268                                atomic_read(&d->opd_sync_changes));
269                         osp_sync_add_commit_cb_1s(env, d, th);
270                 }
271                 dt_trans_stop(env, d->opd_storage, th);
272         }
273
274 out:
275         RETURN(0);
276 }
277
278 /**
279  * Schedule an immediate update for statfs data
280  *
281  * If cached statfs data claim no free space, but OSP has got a request to
282  * destroy an object (so release some space probably), then we may need to
283  * refresh cached statfs data sooner than planned. The function checks there
284  * is no statfs update going and schedules immediate update if so.
285  * XXX: there might be a case where removed object(s) do not add free space (empty
286  * object). If the number of such deletions is high, then we can start to update
287  * statfs too often causing a RPC storm. some throttling is needed...
288  *
289  * \param[in] d         OSP device where statfs data needs to be refreshed
290  */
291 void osp_statfs_need_now(struct osp_device *d)
292 {
293         if (!d->opd_statfs_update_in_progress) {
294                 /*
295                  * if current status is -ENOSPC (lack of free space on OST)
296                  * then we should poll OST immediately once object destroy
297                  * is replied
298                  */
299                 d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(), NSEC_PER_SEC);
300                 timer_delete(&d->opd_statfs_timer);
301                 wake_up(&d->opd_pre_waitq);
302         }
303 }
304
305 /**
306  * Check pool of precreated objects is getting low.
307  *
308  * We should not wait till the pool of the precreated objects is too low,
309  * because then there will be a long period of OSP being unavailable for the
310  * new creations due to lengthy precreate RPC. Instead we ask for another
311  * precreation ahead and hopefully have it ready before the current pool is
312  * empty. Notice this function relies on external locking by opd_pre_lock.
313  *
314  * \param[in] d         OSP device
315  *
316  * \retval              0 - current pool is good enough, 1 - time to precreate
317  */
318 static inline int osp_precreate_is_low_nolock(struct osp_device *d)
319 {
320         int available = osp_objs_precreated_nolock(d) - d->opd_pre_reserved;
321         int precreate_needed = d->opd_pre_create_count > 1024 ?
322                 d->opd_pre_create_count / 4 : d->opd_pre_create_count / 2;
323
324         if (precreate_needed > 1024)
325                 precreate_needed = 1024;
326
327         /* no new precreation until OST is healthy and has free space */
328         return ((d->opd_pre_create_count - available > precreate_needed ||
329                  d->opd_force_creation) && (d->opd_pre_status == 0));
330 }
331
332 /**
333  * Check pool of precreated objects
334  *
335  * This is protected version of osp_precreate_is_low_nolock(), check that
336  * for the details.
337  *
338  * \param[in] d         OSP device
339  *
340  * \retval              0 - current pool is good enough, 1 - time to precreate
341  */
342 static inline int osp_precreate_is_low(struct osp_device *d)
343 {
344         int rc;
345
346         if (d->opd_pre == NULL)
347                 return 0;
348
349         /* XXX: do we really need locking here? */
350         spin_lock(&d->opd_pre_lock);
351         rc = osp_precreate_is_low_nolock(d);
352         spin_unlock(&d->opd_pre_lock);
353         return rc;
354 }
355
356 /**
357  * Write FID into into last_oid/last_seq file
358  *
359  * The function stores the sequence and the in-sequence id into two dedicated
360  * files. The sync argument can be used to request synchronous commit, so the
361  * function won't return until the updates are committed.
362  *
363  * \param[in] env       LU environment provided by the caller
364  * \param[in] osp       OSP device
365  * \param[in] fid       fid where sequence/id is taken
366  * \param[in] sync      update mode: 0 - asynchronously, 1 - synchronously
367  *
368  * \retval 0            on success
369  * \retval negative     negated errno on error
370  **/
371 int osp_write_last_oid_seq_files(struct lu_env *env, struct osp_device *osp,
372                                  struct lu_fid *fid, int sync)
373 {
374         struct osp_thread_info  *oti = osp_env_info(env);
375         struct lu_buf      *lb_oid = &oti->osi_lb;
376         struct lu_buf      *lb_oseq = &oti->osi_lb2;
377         loff_t             oid_off;
378         u64                oid;
379         loff_t             oseq_off;
380         struct thandle    *th;
381         int                   rc;
382         ENTRY;
383
384         if (osp->opd_storage->dd_rdonly)
385                 RETURN(0);
386
387         /* Note: through f_oid is only 32 bits, it will also write 64 bits
388          * for oid to keep compatibility with the previous version. */
389         oid = fid->f_oid;
390         osp_objid_buf_prep(lb_oid, &oid_off,
391                            &oid, osp->opd_index);
392
393         osp_objseq_buf_prep(lb_oseq, &oseq_off,
394                             &fid->f_seq, osp->opd_index);
395
396         th = dt_trans_create(env, osp->opd_storage);
397         if (IS_ERR(th))
398                 RETURN(PTR_ERR(th));
399
400         th->th_sync |= sync;
401         rc = dt_declare_record_write(env, osp->opd_last_used_oid_file,
402                                      lb_oid, oid_off, th);
403         if (rc != 0)
404                 GOTO(out, rc);
405
406         rc = dt_declare_record_write(env, osp->opd_last_used_seq_file,
407                                      lb_oseq, oseq_off, th);
408         if (rc != 0)
409                 GOTO(out, rc);
410
411         rc = dt_trans_start_local(env, osp->opd_storage, th);
412         if (rc != 0)
413                 GOTO(out, rc);
414
415         rc = dt_record_write(env, osp->opd_last_used_oid_file, lb_oid,
416                              &oid_off, th);
417         if (rc != 0) {
418                 CERROR("%s: can not write to last seq file: rc = %d\n",
419                         osp->opd_obd->obd_name, rc);
420                 GOTO(out, rc);
421         }
422         rc = dt_record_write(env, osp->opd_last_used_seq_file, lb_oseq,
423                              &oseq_off, th);
424         if (rc) {
425                 CERROR("%s: can not write to last seq file: rc = %d\n",
426                         osp->opd_obd->obd_name, rc);
427                 GOTO(out, rc);
428         }
429 out:
430         dt_trans_stop(env, osp->opd_storage, th);
431         RETURN(rc);
432 }
433
434 static void osp_update_fldb_cache(const struct lu_env *env,
435                                   struct osp_device *osp,
436                                   struct lu_fid *fid)
437 {
438         struct lu_seq_range range = { 0 };
439         struct lu_server_fld *server_fld;
440         struct lu_site *site;
441
442         site = osp->opd_storage->dd_lu_dev.ld_site;
443         server_fld = lu_site2seq(site)->ss_server_fld;
444         if (!server_fld)
445                 return;
446
447         fld_range_set_type(&range, LU_SEQ_RANGE_ANY);
448         fld_server_lookup(env, server_fld, fid_seq(fid), &range);
449 }
450
451 /**
452  * Switch to another sequence
453  *
454  * When a current sequence has no available IDs left, OSP has to switch to
455  * another new sequence. OSP requests it using the regular FLDB protocol
456  * and stores synchronously before that is used in precreate. This is needed
457  * to basically have the sequences referenced (not orphaned), otherwise it's
458  * possible that OST has some objects precreated and the clients have data
459  * written to it, but after MDT failover nobody refers those objects and OSP
460  * has no idea that the sequence need cleanup to be done.
461  * While this is very expensive operation, it's supposed to happen infrequently
462  * because sequence has LUSTRE_DATA_SEQ_MAX_WIDTH=32M objects by default.
463  *
464  * \param[in] env       LU environment provided by the caller
465  * \param[in] osp       OSP device
466  *
467  * \retval 0            on success
468  * \retval negative     negated errno on error
469  */
470 static int osp_precreate_rollover_new_seq(struct lu_env *env,
471                                           struct osp_device *osp)
472 {
473         struct lu_fid   *fid = &osp_env_info(env)->osi_fid;
474         struct lu_fid   *last_fid = &osp->opd_last_used_fid;
475         int             rc;
476         ENTRY;
477
478         rc = seq_client_get_seq(env, osp->opd_obd->u.cli.cl_seq, &fid->f_seq);
479         if (rc != 0) {
480                 CERROR("%s: alloc fid error: rc = %d\n",
481                        osp->opd_obd->obd_name, rc);
482                 RETURN(rc);
483         }
484
485         if (fid_seq(fid) <= fid_seq(last_fid)) {
486                 rc = -ESTALE;
487                 CERROR("%s: not a new sequence: fid "DFID", last_used_fid "DFID": rc = %d\n",
488                        osp->opd_obd->obd_name, PFID(fid), PFID(last_fid), rc);
489                 RETURN(rc);
490         }
491
492         fid->f_oid = 1;
493         fid->f_ver = 0;
494
495         rc = osp_write_last_oid_seq_files(env, osp, fid, 1);
496         if (rc != 0) {
497                 CERROR("%s: Can not update oid/seq file: rc = %d\n",
498                        osp->opd_obd->obd_name, rc);
499                 RETURN(rc);
500         }
501
502         LCONSOLE(D_INFO, "%s: update sequence from %#llx to %#llx\n",
503                  osp->opd_obd->obd_name, fid_seq(last_fid),
504                  fid_seq(fid));
505         /* Update last_xxx to the new seq */
506         spin_lock(&osp->opd_pre_lock);
507         osp->opd_last_used_fid = *fid;
508         osp_fid_to_obdid(fid, &osp->opd_last_id);
509         osp->opd_gap_start_fid = *fid;
510         osp->opd_pre_used_fid = *fid;
511         osp->opd_pre_last_created_fid = *fid;
512         spin_unlock(&osp->opd_pre_lock);
513
514         if (!rc)
515                 osp_update_fldb_cache(env, osp, fid);
516
517         RETURN(rc);
518 }
519
520 /**
521  * Find IDs available in current sequence
522  *
523  * The function calculates the highest possible ID and the number of IDs
524  * available in the current sequence OSP is using. The number is limited
525  * artifically by the caller (grow param) and the number of IDs available
526  * in the sequence by nature. The function doesn't require an external
527  * locking.
528  *
529  * \param[in] env       LU environment provided by the caller
530  * \param[in] osp       OSP device
531  * \param[in] fid       FID the caller wants to start with
532  * \param[in] grow      how many the caller wants
533  * \param[out] fid      the highest calculated FID
534  * \param[out] grow     the number of available IDs calculated
535  *
536  * \retval              0 on success, 1 - the sequence is empty
537  */
538 static int osp_precreate_fids(const struct lu_env *env, struct osp_device *osp,
539                               struct lu_fid *fid, int *grow)
540 {
541         struct osp_thread_info *osi = osp_env_info(env);
542         __u64 seq_width = osp->opd_pre_seq_width;
543         __u64 end;
544         int i = 0;
545
546         if (fid_is_idif(fid)) {
547                 struct lu_fid   *last_fid;
548                 struct ost_id   *oi = &osi->osi_oi;
549                 int rc;
550
551                 spin_lock(&osp->opd_pre_lock);
552                 last_fid = &osp->opd_pre_last_created_fid;
553                 fid_to_ostid(last_fid, oi);
554                 end = min(ostid_id(oi) + *grow, min(IDIF_MAX_OID, seq_width));
555                 *grow = end - ostid_id(oi);
556                 rc = ostid_set_id(oi, ostid_id(oi) + *grow);
557                 spin_unlock(&osp->opd_pre_lock);
558
559                 if (*grow == 0 || rc)
560                         return 1;
561
562                 ostid_to_fid(fid, oi, osp->opd_index);
563                 return 0;
564         }
565
566         spin_lock(&osp->opd_pre_lock);
567         *fid = osp->opd_pre_last_created_fid;
568         end = fid->f_oid;
569         end = min((end + *grow), min(OBIF_MAX_OID, seq_width));
570         *grow = end - fid->f_oid;
571         fid->f_oid += end - fid->f_oid;
572         spin_unlock(&osp->opd_pre_lock);
573
574         CDEBUG(D_INFO, "Expect %d, actual %d ["DFID" -- "DFID"]\n",
575                *grow, i, PFID(fid), PFID(&osp->opd_pre_last_created_fid));
576
577         return *grow > 0 ? 0 : 1;
578 }
579
580 /**
581  * Prepare and send precreate RPC
582  *
583  * The function finds how many objects should be precreated.  Then allocates,
584  * prepares and schedules precreate RPC synchronously. Upon reply the function
585  * wakes up the threads waiting for the new objects on this target. If the
586  * target wasn't able to create all the objects requested, then the next
587  * precreate will be asking for fewer objects (i.e. slow precreate down).
588  *
589  * \param[in] env       LU environment provided by the caller
590  * \param[in] d         OSP device
591  *
592  * \retval 0            on success
593  * \retval negative     negated errno on error
594  **/
595 static int osp_precreate_send(const struct lu_env *env, struct osp_device *d)
596 {
597         struct osp_thread_info  *oti = osp_env_info(env);
598         struct ptlrpc_request   *req;
599         struct obd_import       *imp;
600         struct ost_body         *body;
601         int                      rc, grow, diff;
602         struct lu_fid           *fid = &oti->osi_fid;
603         ENTRY;
604
605         /* don't precreate new objects till OST healthy and has free space */
606         if (unlikely(d->opd_pre_status)) {
607                 CDEBUG(D_INFO, "%s: don't send new precreate: rc = %d\n",
608                        d->opd_obd->obd_name, d->opd_pre_status);
609                 RETURN(0);
610         }
611
612         /*
613          * if not connection/initialization is compeleted, ignore
614          */
615         imp = d->opd_obd->u.cli.cl_import;
616         LASSERT(imp);
617
618         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
619         if (req == NULL)
620                 RETURN(-ENOMEM);
621         req->rq_request_portal = OST_CREATE_PORTAL;
622
623         /* We should not resend create request - anyway we will have delorphan
624          * and kill these objects.
625          * Only needed for MDS+OSS rolling upgrade interop with 2.16+older.
626          */
627         if (unlikely(!imp_connect_replay_create(imp)))
628                 req->rq_no_delay = req->rq_no_resend = 1;
629
630         /* Delorphan happens only with a first MDT-OST connect. resend/replay
631          * handles objects creation on reconnects, no need to do delorhpan
632          * in this case.
633          */
634
635         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
636         if (rc) {
637                 ptlrpc_request_free(req);
638                 RETURN(rc);
639         }
640
641         spin_lock(&d->opd_pre_lock);
642         if (d->opd_pre_create_count > d->opd_pre_max_create_count / 2)
643                 d->opd_pre_create_count = d->opd_pre_max_create_count / 2;
644         grow = d->opd_pre_create_count;
645         spin_unlock(&d->opd_pre_lock);
646
647         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
648         LASSERT(body);
649
650         *fid = d->opd_pre_last_created_fid;
651         rc = osp_precreate_fids(env, d, fid, &grow);
652         if (rc == 1)
653                 /* Current seq has been used up*/
654                 GOTO(out_req, rc = -ENOSPC);
655
656         if (!osp_is_fid_client(d)) {
657                 /* Non-FID client will always send seq 0 because of
658                  * compatiblity */
659                 LASSERTF(fid_is_idif(fid), "Invalid fid "DFID"\n", PFID(fid));
660                 fid->f_seq = 0;
661         }
662
663         fid_to_ostid(fid, &body->oa.o_oi);
664         body->oa.o_valid = OBD_MD_FLGROUP;
665
666         ptlrpc_request_set_replen(req);
667
668         if (CFS_FAIL_CHECK(OBD_FAIL_OSP_FAKE_PRECREATE))
669                 GOTO(ready, rc = 0);
670
671         rc = ptlrpc_queue_wait(req);
672         if (rc) {
673                 CERROR("%s: can't precreate: rc = %d\n", d->opd_obd->obd_name,
674                        rc);
675                 if (req->rq_net_err)
676                         /* have osp_precreate_reserve() to wait for repeat */
677                         rc = -ENOTCONN;
678                 GOTO(out_req, rc);
679         }
680
681         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
682         if (body == NULL)
683                 GOTO(out_req, rc = -EPROTO);
684
685         ostid_to_fid(fid, &body->oa.o_oi, d->opd_index);
686
687 ready:
688         spin_lock(&d->opd_pre_lock);
689
690         if (osp_fid_diff(fid, &d->opd_pre_used_fid) <= 0) {
691                 CERROR("%s: precreate fid "DFID" <= local used fid "DFID
692                        ": rc = %d\n", d->opd_obd->obd_name,
693                        PFID(fid), PFID(&d->opd_pre_used_fid), -ESTALE);
694                 spin_unlock(&d->opd_pre_lock);
695                 GOTO(out_req, rc = -ESTALE);
696         }
697
698         diff = osp_fid_diff(fid, &d->opd_pre_last_created_fid);
699
700         if (diff < grow) {
701                 /* the OST has not managed to create all the
702                  * objects we asked for */
703                 d->opd_pre_create_count = max(diff, OST_MIN_PRECREATE);
704                 d->opd_pre_create_slow = 1;
705         } else {
706                 /* the OST is able to keep up with the work,
707                  * we could consider increasing create_count
708                  * next time if needed */
709                 d->opd_pre_create_slow = 0;
710         }
711
712         if ((body->oa.o_valid & OBD_MD_FLSIZE) && body->oa.o_size)
713                 d->opd_pre_seq_width = body->oa.o_size;
714
715         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
716         fid_to_ostid(fid, &body->oa.o_oi);
717
718         d->opd_pre_last_created_fid = *fid;
719         d->opd_force_creation = false;
720         spin_unlock(&d->opd_pre_lock);
721
722         CDEBUG(D_HA, "%s: current precreated pool: "DFID"-"DFID"\n",
723                d->opd_obd->obd_name, PFID(&d->opd_pre_used_fid),
724                PFID(&d->opd_pre_last_created_fid));
725 out_req:
726         /* now we can wakeup all users awaiting for objects */
727         osp_pre_update_status(d, rc);
728
729         ptlrpc_req_finished(req);
730
731         if (!rc)
732                 osp_update_fldb_cache(env, d, fid);
733
734         RETURN(rc);
735 }
736
737 /**
738  * Get last precreated object from target (OST)
739  *
740  * Sends synchronous RPC to the target (OST) to learn the last precreated
741  * object. This later is used to remove all unused objects (cleanup orphan
742  * procedure). Also, the next object after one we got will be used as a
743  * starting point for the new precreates.
744  *
745  * \param[in] env       LU environment provided by the caller
746  * \param[in] d         OSP device
747  * \param[in] update    update or not update last used fid
748  *
749  * \retval 0            on success
750  * \retval negative     negated errno on error
751  **/
752 static int osp_get_lastfid_from_ost(const struct lu_env *env,
753                                     struct osp_device *d, bool update)
754 {
755         struct ptlrpc_request   *req = NULL;
756         struct obd_import       *imp;
757         struct lu_fid           *last_fid;
758         char                    *tmp;
759         int                     rc;
760         ENTRY;
761
762         imp = d->opd_obd->u.cli.cl_import;
763         LASSERT(imp);
764
765         req = ptlrpc_request_alloc(imp, &RQF_OST_GET_INFO_LAST_FID);
766         if (req == NULL)
767                 RETURN(-ENOMEM);
768
769         req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY, RCL_CLIENT,
770                              sizeof(KEY_LAST_FID));
771
772         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
773         if (rc) {
774                 ptlrpc_request_free(req);
775                 RETURN(rc);
776         }
777
778         tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_KEY);
779         memcpy(tmp, KEY_LAST_FID, sizeof(KEY_LAST_FID));
780
781         req->rq_no_delay = req->rq_no_resend = 1;
782         last_fid = req_capsule_client_get(&req->rq_pill, &RMF_FID);
783         fid_cpu_to_le(last_fid, &d->opd_last_used_fid);
784
785         ptlrpc_request_set_replen(req);
786
787         rc = ptlrpc_queue_wait(req);
788         if (rc) {
789                 /* -EFAULT means reading LAST_FID failed (see ofd_get_info_hld),
790                  * let sysadm sort this * out.
791                  */
792                 if (rc == -EFAULT)
793                         ptlrpc_set_import_active(imp, 0);
794                 GOTO(out, rc);
795         }
796
797         last_fid = req_capsule_server_get(&req->rq_pill, &RMF_FID);
798         if (last_fid == NULL) {
799                 CERROR("%s: Got last_fid failed.\n", d->opd_obd->obd_name);
800                 GOTO(out, rc = -EPROTO);
801         }
802
803         if (!fid_is_sane(last_fid)) {
804                 CERROR("%s: Got insane last_fid "DFID"\n",
805                        d->opd_obd->obd_name, PFID(last_fid));
806                 GOTO(out, rc = -EPROTO);
807         }
808
809         /* Only update the last used fid, if the OST has objects for
810          * this sequence, i.e. fid_oid > 0 */
811         if (fid_oid(last_fid) > 0 && update)
812                 d->opd_last_used_fid = *last_fid;
813
814         if (fid_seq(last_fid) == fid_seq(&d->opd_last_used_fid)) {
815                 if (fid_oid(last_fid) == 0 ||
816                     (fid_seq_is_norm(fid_seq(last_fid)) &&
817                      fid_oid(last_fid) == LUSTRE_FID_INIT_OID)) {
818                         /* reformatted OST, it requires creation request
819                          * to recreate objects
820                          */
821                         spin_lock(&d->opd_pre_lock);
822                         d->opd_force_creation = true;
823                         d->opd_pre_create_count = OST_MIN_PRECREATE;
824                         spin_unlock(&d->opd_pre_lock);
825                 }
826         }
827         CDEBUG(D_HA, "%s: Got last_fid "DFID"\n", d->opd_obd->obd_name,
828                PFID(last_fid));
829
830 out:
831         ptlrpc_req_finished(req);
832         RETURN(rc);
833 }
834
835 /**
836  * Cleanup orphans on OST
837  *
838  * This function is called in a contex of a dedicated thread handling
839  * all the precreation suff. The function waits till local recovery
840  * is complete, then identify all the unreferenced objects (orphans)
841  * using the highest ID referenced by a local and the highest object
842  * precreated by the target. The found range is a subject to removal
843  * using specially flagged RPC. During this process OSP is marked
844  * unavailable for new objects.
845  *
846  * \param[in] env       LU environment provided by the caller
847  * \param[in] d         OSP device
848  *
849  * \retval 0            on success
850  * \retval negative     negated errno on error
851  */
852 static int osp_precreate_cleanup_orphans(struct lu_env *env,
853                                          struct osp_device *d)
854 {
855         struct osp_thread_info  *osi = osp_env_info(env);
856         struct lu_fid           *last_fid = &osi->osi_fid;
857         struct ptlrpc_request   *req = NULL;
858         struct obd_import       *imp = d->opd_obd->u.cli.cl_import;
859         struct ost_body         *body;
860         int                      update_status = 0;
861         int                      rc;
862         int                      diff;
863
864         ENTRY;
865
866         /*
867          * Do cleanup orphans only with a first connection, after that
868          * all precreate requests uses resend/replay flags to support OST
869          * failover/reconnect.
870          */
871         if (d->opd_cleanup_orphans_done && imp_connect_replay_create(imp)) {
872                 rc = osp_get_lastfid_from_ost(env, d, false);
873                 RETURN(0);
874         }
875         /*
876          * wait for local recovery to finish, so we can cleanup orphans
877          * orphans are all objects since "last used" (assigned), but
878          * there might be objects reserved and in some cases they won't
879          * be used. we can't cleanup them till we're sure they won't be
880          * used. also can't we allow new reservations because they may
881          * end up getting orphans being cleaned up below. so we block
882          * new reservations and wait till all reserved objects either
883          * user or released.
884          */
885         spin_lock(&d->opd_pre_lock);
886         d->opd_pre_recovering = 1;
887         spin_unlock(&d->opd_pre_lock);
888         /*
889          * The locking above makes sure the opd_pre_reserved check below will
890          * catch all osp_precreate_reserve() calls who find
891          * "!opd_pre_recovering".
892          */
893         wait_event_idle(d->opd_pre_waitq,
894                         (!d->opd_pre_reserved && d->opd_recovery_completed) ||
895                         !d->opd_pre_task || d->opd_got_disconnected);
896         if (!d->opd_pre_task || d->opd_got_disconnected)
897                 GOTO(out, rc = -EAGAIN);
898
899         CDEBUG(D_HA, "%s: going to cleanup orphans since "DFID"\n",
900                d->opd_obd->obd_name, PFID(&d->opd_last_used_fid));
901
902         CFS_FAIL_TIMEOUT(OBD_FAIL_MDS_DELAY_DELORPHAN, cfs_fail_val);
903
904         *last_fid = d->opd_last_used_fid;
905         /* The OSP should already get the valid seq now */
906         LASSERT(!fid_is_zero(last_fid));
907         if (fid_oid(&d->opd_last_used_fid) < 2) {
908                 /* lastfid looks strange... ask OST */
909                 rc = osp_get_lastfid_from_ost(env, d, true);
910                 if (rc)
911                         GOTO(out, rc);
912         }
913
914         imp = d->opd_obd->u.cli.cl_import;
915         LASSERT(imp);
916
917         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
918         if (req == NULL)
919                 GOTO(out, rc = -ENOMEM);
920
921         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
922         if (rc) {
923                 ptlrpc_request_free(req);
924                 req = NULL;
925                 GOTO(out, rc);
926         }
927
928         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
929         if (body == NULL)
930                 GOTO(out, rc = -EPROTO);
931
932         body->oa.o_flags = OBD_FL_DELORPHAN;
933         body->oa.o_valid = OBD_MD_FLFLAGS | OBD_MD_FLGROUP;
934
935         fid_to_ostid(&d->opd_last_used_fid, &body->oa.o_oi);
936
937         ptlrpc_request_set_replen(req);
938
939         /* Don't resend the delorphan req */
940         req->rq_no_resend = req->rq_no_delay = 1;
941
942         rc = ptlrpc_queue_wait(req);
943         if (rc) {
944                 update_status = 1;
945                 GOTO(out, rc);
946         }
947
948         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
949         if (body == NULL)
950                 GOTO(out, rc = -EPROTO);
951
952         /*
953          * OST provides us with id new pool starts from in body->oa.o_id
954          */
955         ostid_to_fid(last_fid, &body->oa.o_oi, d->opd_index);
956
957         spin_lock(&d->opd_pre_lock);
958         diff = osp_fid_diff(&d->opd_last_used_fid, last_fid);
959         if (diff > 0) {
960                 d->opd_pre_create_count = OST_MIN_PRECREATE + diff;
961                 d->opd_pre_last_created_fid = d->opd_last_used_fid;
962         } else {
963                 d->opd_pre_create_count = OST_MIN_PRECREATE;
964                 d->opd_pre_last_created_fid = *last_fid;
965         }
966         /*
967          * This empties the pre-creation pool and effectively blocks any new
968          * reservations.
969          */
970         d->opd_pre_used_fid = d->opd_pre_last_created_fid;
971         d->opd_pre_create_slow = 0;
972         if ((body->oa.o_valid & OBD_MD_FLSIZE) && body->oa.o_size)
973                 d->opd_pre_seq_width = body->oa.o_size;
974         spin_unlock(&d->opd_pre_lock);
975
976         CDEBUG(D_HA, "%s: Got last_id "DFID" from OST, last_created "DFID
977                "last_used is "DFID"\n", d->opd_obd->obd_name, PFID(last_fid),
978                PFID(&d->opd_pre_last_created_fid), PFID(&d->opd_last_used_fid));
979 out:
980         if (req)
981                 ptlrpc_req_finished(req);
982
983
984         /*
985          * If rc is zero, the pre-creation window should have been emptied.
986          * Since waking up the herd would be useless without pre-created
987          * objects, we defer the signal to osp_precreate_send() in that case.
988          */
989         if (rc != 0) {
990                 if (update_status) {
991                         CERROR("%s: cannot cleanup orphans: rc = %d\n",
992                                d->opd_obd->obd_name, rc);
993                         /* we can't proceed from here, OST seem to
994                          * be in a bad shape, better to wait for
995                          * a new instance of the server and repeat
996                          * from the beginning. notify possible waiters
997                          * this OSP isn't quite functional yet */
998                         osp_pre_update_status(d, rc);
999                 } else {
1000                         wake_up_all(&d->opd_pre_user_waitq);
1001                 }
1002         } else {
1003                 spin_lock(&d->opd_pre_lock);
1004                 d->opd_pre_recovering = 0;
1005                 spin_unlock(&d->opd_pre_lock);
1006                 d->opd_cleanup_orphans_done = true;
1007         }
1008
1009         RETURN(rc);
1010 }
1011
1012 /**
1013  * Update precreate status using statfs data
1014  *
1015  * The function decides whether this OSP should be used for new objects.
1016  * IOW, whether this OST is used up or has some free space. Cached statfs
1017  * data is used to make this decision. If the latest result of statfs
1018  * request (rc argument) is not success, then just mark OSP unavailable
1019  * right away.
1020  *
1021  * The new statfs data is passed in \a msfs and needs to be stored into
1022  * opd_statfs, but only after the various flags in os_state are set, so
1023  * that the new statfs data is not visible without appropriate flags set.
1024  * As such, there is no need to clear the flags here, since this is called
1025  * with new statfs data, and they should not be cleared if sent from OST.
1026  *
1027  * Add a bit of hysteresis so this flag isn't continually flapping, and
1028  * ensure that new files don't get extremely fragmented due to only a
1029  * small amount of available space in the filesystem.  We want to set
1030  * the ENOSPC/ENOINO flags unconditionally when there is less than the
1031  * reserved size free, and still copy them from the old state when there
1032  * is less than 2*reserved size free space or inodes.
1033  *
1034  * \param[in] d         OSP device
1035  * \param[in] msfs      statfs data
1036  */
1037 static void osp_pre_update_msfs(struct osp_device *d, struct obd_statfs *msfs)
1038 {
1039         u32 old_state = d->opd_statfs.os_state;
1040         u64 available_mb;
1041
1042         /* statfs structure not initialized yet */
1043         if (unlikely(!msfs->os_type))
1044                 return;
1045
1046         /* if the low and high watermarks have not been initialized yet */
1047         if (unlikely(d->opd_reserved_mb_high == 0 &&
1048                      d->opd_reserved_mb_low == 0)) {
1049                 /* Use ~0.1% by default to disable object allocation,
1050                  * and ~0.2% to enable, size in MB, set both watermark
1051                  */
1052                 spin_lock(&d->opd_pre_lock);
1053                 if (d->opd_reserved_mb_high == 0 &&
1054                     d->opd_reserved_mb_low == 0) {
1055                         d->opd_reserved_mb_low = ((msfs->os_bsize >> 10) *
1056                                                   msfs->os_blocks) >> 20;
1057                         if (d->opd_reserved_mb_low < 1)
1058                                 d->opd_reserved_mb_low = 1;
1059                         d->opd_reserved_mb_high =
1060                                 (d->opd_reserved_mb_low << 1) + 1;
1061                 }
1062                 spin_unlock(&d->opd_pre_lock);
1063         }
1064
1065         if (unlikely(d->opd_reserved_ino_high == 0 &&
1066                      d->opd_reserved_ino_low == 0)) {
1067                 /* Use ~0.0001% by default to disallow distributed transactions,
1068                  * and ~0.0002% to allow, set both watermark
1069                  */
1070                 spin_lock(&d->opd_pre_lock);
1071                 if (d->opd_reserved_ino_high == 0 &&
1072                     d->opd_reserved_ino_low == 0) {
1073                         d->opd_reserved_ino_low = msfs->os_ffree >> 20;
1074                         if (d->opd_reserved_ino_low < 32)
1075                                 d->opd_reserved_ino_low = 32;
1076                         d->opd_reserved_ino_high =
1077                                 (d->opd_reserved_ino_low << 1) + 1;
1078                 }
1079                 spin_unlock(&d->opd_pre_lock);
1080         }
1081
1082         available_mb = (msfs->os_bavail * (msfs->os_bsize >> 10)) >> 10;
1083         if (msfs->os_ffree < d->opd_reserved_ino_low)
1084                 msfs->os_state |= OS_STATFS_ENOINO;
1085         else if (msfs->os_ffree <= d->opd_reserved_ino_high)
1086                 msfs->os_state |= old_state & OS_STATFS_ENOINO;
1087         /* else don't clear flags in new msfs->os_state sent from OST */
1088
1089         if (available_mb < d->opd_reserved_mb_low)
1090                 msfs->os_state |= OS_STATFS_ENOSPC;
1091         else if (available_mb <= d->opd_reserved_mb_high)
1092                 msfs->os_state |= old_state & OS_STATFS_ENOSPC;
1093         /* else don't clear flags in new msfs->os_state sent from OST */
1094
1095         CDEBUG(D_INFO,
1096                "%s: blocks=%llu free=%llu avail=%llu avail_mb=%llu hwm_mb=%u files=%llu ffree=%llu state=%x: rc = %d\n",
1097                d->opd_obd->obd_name, msfs->os_blocks, msfs->os_bfree,
1098                msfs->os_bavail, available_mb, d->opd_reserved_mb_high,
1099                msfs->os_files, msfs->os_ffree, msfs->os_state,
1100                d->opd_pre ? d->opd_pre_status : 0);
1101
1102         if (!d->opd_pre)
1103                 goto update;
1104
1105         if (msfs->os_state & (OS_STATFS_ENOINO | OS_STATFS_ENOSPC)) {
1106                 d->opd_pre_status = -ENOSPC;
1107                 if (!(old_state & (OS_STATFS_ENOINO | OS_STATFS_ENOSPC)))
1108                         CDEBUG(D_INFO, "%s: full: state=%x: rc = %x\n",
1109                                d->opd_obd->obd_name, msfs->os_state,
1110                                d->opd_pre_status);
1111                 CDEBUG(D_INFO, "uncommitted changes=%u in_progress=%u\n",
1112                        atomic_read(&d->opd_sync_changes),
1113                        atomic_read(&d->opd_sync_rpcs_in_progress));
1114         } else if (old_state & (OS_STATFS_ENOINO | OS_STATFS_ENOSPC)) {
1115                 d->opd_pre_status = 0;
1116                 spin_lock(&d->opd_pre_lock);
1117                 d->opd_pre_create_slow = 0;
1118                 d->opd_pre_create_count = OST_MIN_PRECREATE;
1119                 spin_unlock(&d->opd_pre_lock);
1120                 wake_up(&d->opd_pre_waitq);
1121
1122                 CDEBUG(D_INFO,
1123                        "%s: available: state=%x: rc = %d\n",
1124                        d->opd_obd->obd_name, msfs->os_state,
1125                        d->opd_pre_status);
1126         } else {
1127                 /* we only get here if rc == 0 in the caller */
1128                 d->opd_pre_status = 0;
1129         }
1130
1131         /* Object precreation skipped on OST if manually disabled */
1132         if (d->opd_pre_max_create_count == 0)
1133                 msfs->os_state |= OS_STATFS_NOCREATE;
1134         /* else don't clear flags in new msfs->os_state sent from OST */
1135
1136 update:
1137         /* copy only new statfs state to make it visible to MDS threads */
1138         if (&d->opd_statfs != msfs)
1139                 d->opd_statfs = *msfs;
1140 }
1141
1142 /**
1143  * Initialize FID for precreation
1144  *
1145  * For a just created new target, a new sequence should be taken.
1146  * The function checks there is no IDIF in use (if the target was
1147  * added with the older version of Lustre), then requests a new
1148  * sequence from FLDB using the regular protocol. Then this new
1149  * sequence is stored on a persisten storage synchronously to prevent
1150  * possible object leakage (for the detail see the description for
1151  * osp_precreate_rollover_new_seq()).
1152  *
1153  * \param[in] osp       OSP device
1154  *
1155  * \retval 0            on success
1156  * \retval negative     negated errno on error
1157  */
1158 static int osp_init_pre_fid(struct lu_env *env, struct osp_device *osp)
1159 {
1160         struct osp_thread_info *osi;
1161         struct lu_client_seq *cli_seq;
1162         struct lu_fid *last_fid;
1163         int rc;
1164
1165         ENTRY;
1166         LASSERT(osp->opd_pre != NULL);
1167
1168         if (CFS_FAIL_CHECK(OBD_FAIL_OSP_FAIL_SEQ_ALLOC)) {
1169                 unsigned int timeout = cfs_fail_val ?: 1;
1170
1171                 schedule_timeout_uninterruptible(cfs_time_seconds(timeout));
1172                 RETURN(-EIO);
1173         }
1174
1175         /* Let's check if the current last_seq/fid is valid,
1176          * otherwise request new sequence from the controller */
1177         if (osp_is_fid_client(osp) && osp->opd_group != 0) {
1178                 /* Non-MDT0 can only use normal sequence for
1179                  * OST objects */
1180                 if (fid_is_norm(&osp->opd_last_used_fid))
1181                         RETURN(0);
1182         } else {
1183                 /* Initially MDT0 will start with IDIF, after
1184                  * that it will request new sequence from the
1185                  * controller */
1186                 if (fid_is_idif(&osp->opd_last_used_fid) ||
1187                     fid_is_norm(&osp->opd_last_used_fid))
1188                         RETURN(0);
1189         }
1190
1191         if (!fid_is_zero(&osp->opd_last_used_fid))
1192                 CWARN("%s: invalid last used fid "DFID
1193                       ", try to get new sequence.\n",
1194                       osp->opd_obd->obd_name,
1195                       PFID(&osp->opd_last_used_fid));
1196
1197         osi = osp_env_info(env);
1198         last_fid = &osi->osi_fid;
1199         fid_zero(last_fid);
1200         /* For a freshed fs, it will allocate a new sequence first */
1201         if (osp_is_fid_client(osp) && osp->opd_group != 0) {
1202                 cli_seq = osp->opd_obd->u.cli.cl_seq;
1203                 rc = seq_client_get_seq(env, cli_seq, &last_fid->f_seq);
1204                 if (rc != 0) {
1205                         CERROR("%s: alloc fid error: rc = %d\n",
1206                                osp->opd_obd->obd_name, rc);
1207                         GOTO(out, rc);
1208                 }
1209         } else {
1210                 last_fid->f_seq = fid_idif_seq(0, osp->opd_index);
1211         }
1212         last_fid->f_oid = 1;
1213         last_fid->f_ver = 0;
1214
1215         spin_lock(&osp->opd_pre_lock);
1216         osp->opd_last_used_fid = *last_fid;
1217         osp->opd_pre_used_fid = *last_fid;
1218         osp->opd_pre_last_created_fid = *last_fid;
1219         spin_unlock(&osp->opd_pre_lock);
1220         rc = osp_write_last_oid_seq_files(env, osp, last_fid, 1);
1221         if (rc != 0) {
1222                 CERROR("%s: write fid error: rc = %d\n",
1223                        osp->opd_obd->obd_name, rc);
1224                 GOTO(out, rc);
1225         }
1226 out:
1227         RETURN(rc);
1228 }
1229
1230 struct opt_args {
1231         struct osp_device       *opta_dev;
1232         struct lu_env           opta_env;
1233         struct completion       *opta_started;
1234 };
1235 /**
1236  * The core of precreate functionality
1237  *
1238  * The function implements the main precreation loop. Basically it
1239  * involves connecting to the target, precerate FID initialization,
1240  * identifying and removing orphans, then serving precreation. As
1241  * part of the latter, the thread is responsible for statfs data
1242  * updates. The precreation is mostly driven by another threads
1243  * asking for new OST objects - those askers wake the thread when
1244  * the number of precreated objects reach low watermark.
1245  * After a disconnect, the sequence above repeats. This is keep going
1246  * until the thread is requested to stop.
1247  *
1248  * \param[in] _arg      private data the thread (OSP device to handle)
1249  *
1250  * \retval 0            on success
1251  * \retval negative     negated errno on error
1252  */
1253 static int osp_precreate_thread(void *_args)
1254 {
1255         struct opt_args         *args = _args;
1256         struct osp_device       *d = args->opta_dev;
1257         struct lu_env           *env = &args->opta_env;
1258         int                      rc;
1259
1260         ENTRY;
1261
1262         complete(args->opta_started);
1263
1264         /* wait for connection from the layers above */
1265         wait_event_idle(d->opd_pre_waitq,
1266                         kthread_should_stop() ||
1267                         d->opd_obd->u.cli.cl_seq->lcs_exp != NULL);
1268
1269         while (!kthread_should_stop()) {
1270                 /*
1271                  * need to be connected to OST
1272                  */
1273                 while (!kthread_should_stop()) {
1274                         if ((d->opd_pre == NULL || d->opd_pre_recovering) &&
1275                             d->opd_imp_connected &&
1276                             !d->opd_got_disconnected)
1277                                 break;
1278                         wait_event_idle(d->opd_pre_waitq,
1279                                         kthread_should_stop() ||
1280                                         d->opd_new_connection);
1281
1282                         if (!d->opd_new_connection)
1283                                 continue;
1284
1285                         CFS_FAIL_TIMEOUT(OBD_FAIL_OSP_CON_EVENT_DELAY,
1286                                          cfs_fail_val);
1287                         d->opd_new_connection = 0;
1288                         d->opd_got_disconnected = 0;
1289                         break;
1290                 }
1291
1292                 if (kthread_should_stop())
1293                         break;
1294
1295                 if (d->opd_pre) {
1296                         LASSERT(d->opd_obd->u.cli.cl_seq != NULL);
1297                         LASSERT(d->opd_obd->u.cli.cl_seq->lcs_exp != NULL);
1298
1299                         /* Init fid for osp_precreate if necessary */
1300                         rc = osp_init_pre_fid(env, d);
1301                         if (rc != 0) {
1302                                 CERROR("%s: init pre fid error: rc = %d\n",
1303                                                 d->opd_obd->obd_name, rc);
1304                                 continue;
1305                         }
1306                 }
1307
1308                 if (osp_statfs_update(env, d)) {
1309                         if (wait_event_idle_timeout(d->opd_pre_waitq,
1310                                                     kthread_should_stop(),
1311                                                     cfs_time_seconds(5)) == 0)
1312                                 l_wait_event_abortable(
1313                                         d->opd_pre_waitq,
1314                                         kthread_should_stop());
1315                         continue;
1316                 }
1317
1318                 if (d->opd_pre) {
1319                         /*
1320                          * Clean up orphans or recreate missing objects.
1321                          */
1322                         rc = osp_precreate_cleanup_orphans(env, d);
1323                         if (rc != 0) {
1324                                 schedule_timeout_interruptible(cfs_time_seconds(1));
1325                                 continue;
1326                         }
1327                 }
1328
1329                 /*
1330                  * connected, can handle precreates now
1331                  */
1332                 while (!kthread_should_stop()) {
1333                         wait_event_idle(d->opd_pre_waitq,
1334                                         kthread_should_stop() ||
1335                                         (osp_precreate_is_low(d) &&
1336                                          !(osp_precreate_end_seq(d) &&
1337                                            osp_objs_precreated(d) != 0)) ||
1338                                         osp_statfs_need_update(d) ||
1339                                         d->opd_got_disconnected);
1340
1341                         if (kthread_should_stop())
1342                                 break;
1343
1344                         /* something happened to the connection
1345                          * have to start from the beginning */
1346                         if (d->opd_got_disconnected)
1347                                 break;
1348
1349                         if (osp_statfs_need_update(d))
1350                                 if (osp_statfs_update(env, d))
1351                                         break;
1352
1353                         if (d->opd_pre == NULL)
1354                                 continue;
1355
1356                         /* To avoid handling different seq in precreate/orphan
1357                          * cleanup, it will hold precreate until current seq is
1358                          * used up. */
1359                         if (unlikely(osp_precreate_end_seq(d))) {
1360                                 if (osp_objs_precreated(d) == 0) {
1361                                         rc = osp_precreate_rollover_new_seq(env, d);
1362                                         if (rc)
1363                                                 continue;
1364                                 } else {
1365                                         continue;
1366                                 }
1367                         }
1368
1369                         if (osp_precreate_is_low(d)) {
1370                                 rc = osp_precreate_send(env, d);
1371                                 /* osp_precreate_send() sets opd_pre_status
1372                                  * in case of error, that prevent the using of
1373                                  * failed device. */
1374                                 if (rc < 0 && rc != -ENOSPC &&
1375                                     rc != -ETIMEDOUT && rc != -ENOTCONN)
1376                                         CERROR("%s: cannot precreate objects:"
1377                                                " rc = %d\n",
1378                                                d->opd_obd->obd_name, rc);
1379                         }
1380                 }
1381         }
1382
1383         lu_env_fini(env);
1384         OBD_FREE_PTR(args);
1385
1386         RETURN(0);
1387 }
1388
1389 /**
1390  * Check when to stop to wait for precreate objects.
1391  *
1392  * The caller wanting a new OST object can't wait undefinitely. The
1393  * function checks for few conditions including available new OST
1394  * objects, disconnected OST, lack of space with no pending destroys,
1395  * etc. IOW, it checks whether the current OSP state is good to keep
1396  * waiting or it's better to give up.
1397  *
1398  * \param[in] env       LU environment provided by the caller
1399  * \param[in] d         OSP device
1400  *
1401  * \retval              0 - keep waiting, 1 - no luck
1402  */
1403 static int osp_precreate_ready_condition(const struct lu_env *env,
1404                                          struct osp_device *d)
1405 {
1406         /* Bail out I/O fails to OST */
1407         if (d->opd_pre_status != 0 &&
1408             d->opd_pre_status != -EAGAIN &&
1409             d->opd_pre_status != -ENODEV &&
1410             d->opd_pre_status != -ENOTCONN &&
1411             d->opd_pre_status != -ENOSPC) {
1412                 /* DEBUG LU-3230 */
1413                 if (d->opd_pre_status != -EIO)
1414                         CERROR("%s: precreate failed opd_pre_status %d\n",
1415                                d->opd_obd->obd_name, d->opd_pre_status);
1416                 return 1;
1417         }
1418
1419         if (d->opd_pre_recovering || d->opd_force_creation)
1420                 return 0;
1421
1422         /* ready if got enough precreated objects */
1423         /* we need to wait for others (opd_pre_reserved) and our object (+1) */
1424         if (d->opd_pre_reserved + 1 < osp_objs_precreated(d))
1425                 return 1;
1426
1427         /* ready if OST reported no space and no destroys in progress */
1428         if (atomic_read(&d->opd_sync_changes) +
1429             atomic_read(&d->opd_sync_rpcs_in_progress) == 0 &&
1430             d->opd_pre_status == -ENOSPC)
1431                 return 1;
1432
1433         return 0;
1434 }
1435
1436 /**
1437  * Reserve object in precreate pool
1438  *
1439  * When the caller wants to create a new object on this target (target
1440  * represented by the given OSP), it should declare this intention using
1441  * a regular ->dt_declare_create() OSD API method. Then OSP will be trying
1442  * to reserve an object in the existing precreated pool or wait up to
1443  * obd_timeout for the available object to appear in the pool (a dedicated
1444  * thread will be doing real precreation in background). The object can be
1445  * consumed later with osp_precreate_get_fid() or be released with call to
1446  * lu_object_put(). Notice the function doesn't reserve a specific ID, just
1447  * some ID. The actual ID assignment happen in osp_precreate_get_fid().
1448  * If the space on the target is short and there is a pending object destroy,
1449  * then the function forces local commit to speedup space release (see
1450  * osp_sync.c for the details).
1451  *
1452  * \param[in] env       LU environment provided by the caller
1453  * \param[in] d         OSP device
1454  *
1455  * \retval              0 on success
1456  * \retval              -ENOSPC when no space on OST
1457  * \retval              -EAGAIN try later, slow precreation in progress
1458  * \retval              -EIO when no access to OST
1459  */
1460 int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d,
1461                           bool can_block)
1462 {
1463         time64_t expire = ktime_get_seconds() + obd_timeout;
1464         int precreated, rc, synced = 0;
1465
1466         ENTRY;
1467
1468         LASSERTF(osp_objs_precreated(d) >= 0, "Last created FID "DFID
1469                  "Next FID "DFID"\n", PFID(&d->opd_pre_last_created_fid),
1470                  PFID(&d->opd_pre_used_fid));
1471
1472         /* opd_pre_max_create_count 0 to not use specified OST. */
1473         if (d->opd_pre_max_create_count == 0)
1474                 RETURN(-ENOBUFS);
1475
1476         /*
1477          * wait till:
1478          *  - preallocation is done
1479          *  - no free space expected soon
1480          *  - can't connect to OST for too long (obd_timeout)
1481          *  - OST can allocate fid sequence.
1482          */
1483         while ((rc = d->opd_pre_status) == 0 || rc == -ENOSPC ||
1484                 rc == -ENODEV || rc == -EAGAIN || rc == -ENOTCONN) {
1485
1486                 spin_lock(&d->opd_pre_lock);
1487                 precreated = osp_objs_precreated_nolock(d);
1488                 /*
1489                  * increase number of precreations
1490                  */
1491                 if (d->opd_pre_create_count < d->opd_pre_max_create_count &&
1492                     d->opd_pre_create_slow == 0 &&
1493                     precreated <= (d->opd_pre_create_count / 4 + 1)) {
1494                         d->opd_pre_create_slow = 1;
1495                         d->opd_pre_create_count *= 2;
1496                 }
1497
1498                 if (!d->opd_pre_recovering && !d->opd_force_creation) {
1499                         if (precreated > d->opd_pre_reserved) {
1500                                 d->opd_pre_reserved++;
1501                                 spin_unlock(&d->opd_pre_lock);
1502                                 rc = 0;
1503
1504                                 /*
1505                                  * XXX: don't wake up if precreation
1506                                  * is in progress
1507                                  */
1508                                 if (osp_precreate_is_low_nolock(d) &&
1509                                    !osp_precreate_end_seq_nolock(d))
1510                                         wake_up(&d->opd_pre_waitq);
1511
1512                                 break;
1513                         } else if (unlikely(precreated &&
1514                                             osp_precreate_end_seq_nolock(d))) {
1515                                 /*
1516                                  * precreate pool is reaching the end of the
1517                                  * current seq, and doesn't have enough objects
1518                                  */
1519                                 rc = -ENOSPC;
1520                                 spin_unlock(&d->opd_pre_lock);
1521                                 break;
1522                         }
1523                 }
1524                 spin_unlock(&d->opd_pre_lock);
1525
1526                 /*
1527                  * all precreated objects have been used and no-space
1528                  * status leave us no chance to succeed very soon
1529                  * but if there is destroy in progress, then we should
1530                  * wait till that is done - some space might be released
1531                  */
1532                 if (unlikely(rc == -ENOSPC)) {
1533                         if (atomic_read(&d->opd_sync_changes) && synced == 0) {
1534                                 /* force local commit to release space */
1535                                 dt_commit_async(env, d->opd_storage);
1536                                 osp_sync_check_for_work(d);
1537                                 synced = 1;
1538                         }
1539                         if (atomic_read(&d->opd_sync_rpcs_in_progress)) {
1540                                 /* just wait till destroys are done
1541                                  * see wait_event_idle_timeout() below
1542                                  */
1543                         }
1544                         if (atomic_read(&d->opd_sync_changes) +
1545                             atomic_read(&d->opd_sync_rpcs_in_progress) == 0) {
1546                                 /* no hope for free space */
1547                                 break;
1548                         }
1549                 }
1550
1551                 /* XXX: don't wake up if precreation is in progress */
1552                 wake_up(&d->opd_pre_waitq);
1553
1554                 if (ktime_get_seconds() >= expire) {
1555                         rc = -ETIMEDOUT;
1556                         break;
1557                 }
1558
1559                 if (!can_block) {
1560                         LASSERT(d->opd_pre);
1561                         rc = -ENOBUFS;
1562                         break;
1563                 }
1564
1565                 CDEBUG(D_INFO, "%s: Sleeping on objects\n",
1566                        d->opd_obd->obd_name);
1567                 if (wait_event_idle_timeout(
1568                             d->opd_pre_user_waitq,
1569                             osp_precreate_ready_condition(env, d),
1570                             cfs_time_seconds(obd_timeout)) == 0) {
1571                         CDEBUG(D_HA,
1572                                "%s: slow creates, last="DFID", next="DFID", "
1573                                "reserved=%llu, sync_changes=%u, "
1574                                "sync_rpcs_in_progress=%d, status=%d\n",
1575                                d->opd_obd->obd_name,
1576                                PFID(&d->opd_pre_last_created_fid),
1577                                PFID(&d->opd_pre_used_fid), d->opd_pre_reserved,
1578                                atomic_read(&d->opd_sync_changes),
1579                                atomic_read(&d->opd_sync_rpcs_in_progress),
1580                                d->opd_pre_status);
1581                 } else {
1582                         CDEBUG(D_INFO, "%s: Waked up, status=%d\n",
1583                                d->opd_obd->obd_name, d->opd_pre_status);
1584                 }
1585         }
1586
1587         RETURN(rc);
1588 }
1589
1590 /**
1591  * Get a FID from precreation pool
1592  *
1593  * The function is a companion for osp_precreate_reserve() - it assigns
1594  * a specific FID from the precreate. The function should be called only
1595  * if the call to osp_precreate_reserve() was successful. The function
1596  * updates a local storage to remember the highest object ID referenced
1597  * by the node in the given sequence.
1598  *
1599  * A very importan details: this is supposed to be called once the
1600  * transaction is started, so on-disk update will be atomic with the
1601  * data (like LOVEA) refering this object. Then the object won't be leaked:
1602  * either it's referenced by the committed transaction or it's a subject
1603  * to the orphan cleanup procedure.
1604  *
1605  * \param[in] env       LU environment provided by the caller
1606  * \param[in] d         OSP device
1607  * \param[out] fid      generated FID
1608  *
1609  * \retval 0            on success
1610  * \retval negative     negated errno on error
1611  */
1612 int osp_precreate_get_fid(const struct lu_env *env, struct osp_device *d,
1613                           struct lu_fid *fid)
1614 {
1615         struct lu_fid *pre_used_fid = &d->opd_pre_used_fid;
1616
1617         /* grab next id from the pool */
1618         spin_lock(&d->opd_pre_lock);
1619
1620         LASSERTF(osp_fid_diff(&d->opd_pre_used_fid,
1621                              &d->opd_pre_last_created_fid) < 0,
1622                  "next fid "DFID" > last created fid "DFID"\n",
1623                  PFID(&d->opd_pre_used_fid),
1624                  PFID(&d->opd_pre_last_created_fid));
1625
1626         /* Non-IDIF FIDs shouldn't get here with OID == OBIF_MAX_OID. For IDIF,
1627          * f_oid wraps and "f_seq" (holding high 16 bits of ID) needs increment
1628          */
1629         if (fid_is_idif(pre_used_fid) &&
1630             unlikely(fid_oid(pre_used_fid) == OBIF_MAX_OID)) {
1631                 struct ost_id oi;
1632                 __u32 idx = fid_idif_ost_idx(pre_used_fid);
1633
1634                 fid_to_ostid(pre_used_fid, &oi);
1635                 oi.oi.oi_id++;
1636                 ostid_to_fid(pre_used_fid, &oi, idx);
1637         } else {
1638                 pre_used_fid->f_oid++;
1639         }
1640
1641         memcpy(fid, pre_used_fid, sizeof(*fid));
1642         d->opd_pre_reserved--;
1643         /*
1644          * last_used_id must be changed along with getting new id otherwise
1645          * we might miscalculate gap causing object loss or leak
1646          */
1647         osp_update_last_fid(d, fid, false);
1648         spin_unlock(&d->opd_pre_lock);
1649
1650         /*
1651          * probably main thread suspended orphan cleanup till
1652          * all reservations are released, see comment in
1653          * osp_precreate_thread() just before orphan cleanup
1654          */
1655         if (unlikely(d->opd_pre_reserved == 0 &&
1656                      (d->opd_pre_recovering || d->opd_pre_status)))
1657                 wake_up(&d->opd_pre_waitq);
1658
1659         return 0;
1660 }
1661
1662 /*
1663  * Set size regular attribute on an object
1664  *
1665  * When a striping is created late, it's possible that size is already
1666  * initialized on the file. Then the new striping should inherit size
1667  * from the file. The function sets size on the object using the regular
1668  * protocol (OST_PUNCH).
1669  * XXX: should be re-implemented using OUT ?
1670  *
1671  * \param[in] env       LU environment provided by the caller
1672  * \param[in] dt        object
1673  * \param[in] size      size to set.
1674  *
1675  * \retval 0            on success
1676  * \retval negative     negated errno on error
1677  */
1678 int osp_object_truncate(const struct lu_env *env, struct dt_object *dt,
1679                         __u64 size)
1680 {
1681         struct osp_device       *d = lu2osp_dev(dt->do_lu.lo_dev);
1682         struct ptlrpc_request   *req = NULL;
1683         struct obd_import       *imp;
1684         struct ost_body         *body;
1685         struct obdo             *oa = NULL;
1686         int                      rc;
1687
1688         ENTRY;
1689
1690         imp = d->opd_obd->u.cli.cl_import;
1691         LASSERT(imp);
1692
1693         req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
1694         if (req == NULL)
1695                 RETURN(-ENOMEM);
1696
1697         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
1698         if (rc) {
1699                 ptlrpc_request_free(req);
1700                 RETURN(rc);
1701         }
1702
1703         /*
1704          * XXX: decide how do we do here with resend
1705          * if we don't resend, then client may see wrong file size
1706          * if we do resend, then MDS thread can get stuck for quite long
1707          * and if we don't resend, then client will also get -EAGAIN !!
1708          * (see LU-7975 and sanity/test_27F use cases)
1709          * but let's decide not to resend/delay this truncate request to OST
1710          * and allow Client to decide to resend, in a less agressive way from
1711          * after_reply(), by returning -EINPROGRESS instead of
1712          * -EAGAIN/-EAGAIN upon return from ptlrpc_queue_wait() at the
1713          * end of this routine
1714          */
1715         req->rq_no_resend = req->rq_no_delay = 1;
1716
1717         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
1718         ptlrpc_at_set_req_timeout(req);
1719
1720         OBD_ALLOC_PTR(oa);
1721         if (oa == NULL)
1722                 GOTO(out, rc = -ENOMEM);
1723
1724         rc = fid_to_ostid(lu_object_fid(&dt->do_lu), &oa->o_oi);
1725         LASSERT(rc == 0);
1726         oa->o_size = size;
1727         oa->o_blocks = OBD_OBJECT_EOF;
1728         oa->o_valid = OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
1729                       OBD_MD_FLID | OBD_MD_FLGROUP;
1730
1731         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
1732         LASSERT(body);
1733         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1734
1735         /* XXX: capa support? */
1736         /* osc_pack_capa(req, body, capa); */
1737
1738         ptlrpc_request_set_replen(req);
1739
1740         rc = ptlrpc_queue_wait(req);
1741         if (rc) {
1742                 /* -EAGAIN/-EWOULDBLOCK means OST is unreachable at the moment
1743                  * since we have decided not to resend/delay, but this could
1744                  * lead to wrong size to be seen at Client side and even process
1745                  * trying to open to exit/fail if not itself handling -EAGAIN.
1746                  * So it should be better to return -EINPROGRESS instead and
1747                  * leave the decision to resend at Client side in after_reply()
1748                  */
1749                 if (rc == -EAGAIN) {
1750                         rc = -EINPROGRESS;
1751                         CDEBUG(D_HA, "returning -EINPROGRESS instead of "
1752                                "-EWOULDBLOCK/-EAGAIN to allow Client to "
1753                                "resend\n");
1754                 } else {
1755                         CERROR("can't punch object: %d\n", rc);
1756                 }
1757         }
1758 out:
1759         ptlrpc_req_finished(req);
1760         if (oa)
1761                 OBD_FREE_PTR(oa);
1762         RETURN(rc);
1763 }
1764
1765 /**
1766  * Initialize precreation functionality of OSP
1767  *
1768  * Prepares all the internal structures and starts the precreate thread
1769  *
1770  * \param[in] d         OSP device
1771  *
1772  * \retval 0            on success
1773  * \retval negative     negated errno on error
1774  */
1775 int osp_init_precreate(struct osp_device *d)
1776 {
1777         ENTRY;
1778
1779         OBD_ALLOC_PTR(d->opd_pre);
1780         if (d->opd_pre == NULL)
1781                 RETURN(-ENOMEM);
1782
1783         /* initially precreation isn't ready */
1784         init_waitqueue_head(&d->opd_pre_user_waitq);
1785         d->opd_pre_status = -EAGAIN;
1786         fid_zero(&d->opd_pre_used_fid);
1787         d->opd_pre_used_fid.f_oid = 1;
1788         fid_zero(&d->opd_pre_last_created_fid);
1789         d->opd_pre_last_created_fid.f_oid = 1;
1790         d->opd_last_id = 0;
1791         d->opd_pre_reserved = 0;
1792         d->opd_pre_seq_width = LUSTRE_DATA_SEQ_MAX_WIDTH;
1793         d->opd_got_disconnected = 1;
1794         d->opd_pre_create_slow = 0;
1795         d->opd_pre_create_count = OST_MIN_PRECREATE;
1796         d->opd_pre_min_create_count = OST_MIN_PRECREATE;
1797         d->opd_pre_max_create_count = OST_MAX_PRECREATE;
1798         d->opd_reserved_mb_high = 0;
1799         d->opd_reserved_mb_low = 0;
1800         d->opd_cleanup_orphans_done = false;
1801         d->opd_force_creation = false;
1802
1803         RETURN(0);
1804 }
1805
1806 /**
1807  * Finish precreate functionality of OSP
1808  *
1809  *
1810  * Asks all the activity (the thread, update timer) to stop, then
1811  * wait till that is done.
1812  *
1813  * \param[in] d         OSP device
1814  */
1815 void osp_precreate_fini(struct osp_device *d)
1816 {
1817         ENTRY;
1818
1819         if (d->opd_pre == NULL)
1820                 RETURN_EXIT;
1821
1822         OBD_FREE_PTR(d->opd_pre);
1823         d->opd_pre = NULL;
1824
1825         EXIT;
1826 }
1827
1828 int osp_init_statfs(struct osp_device *d)
1829 {
1830         struct task_struct      *task;
1831         struct opt_args         *args;
1832         DECLARE_COMPLETION_ONSTACK(started);
1833         int                     rc;
1834
1835         ENTRY;
1836
1837         spin_lock_init(&d->opd_pre_lock);
1838         init_waitqueue_head(&d->opd_pre_waitq);
1839
1840         /*
1841          * Initialize statfs-related things
1842          */
1843         d->opd_statfs_maxage = 5; /* defaultupdate interval */
1844         d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(),
1845                                                 1000 * NSEC_PER_SEC);
1846         CDEBUG(D_OTHER, "current %lldns, fresh till %lldns\n",
1847                ktime_get_ns(),
1848                ktime_to_ns(d->opd_statfs_fresh_till));
1849         cfs_timer_setup(&d->opd_statfs_timer, osp_statfs_timer_cb,
1850                         (unsigned long)d, 0);
1851
1852         if (d->opd_storage->dd_rdonly)
1853                 RETURN(0);
1854
1855         OBD_ALLOC_PTR(args);
1856         if (!args)
1857                 RETURN(0);
1858         args->opta_dev = d;
1859         args->opta_started = &started;
1860         rc = lu_env_init(&args->opta_env,
1861                          d->opd_dt_dev.dd_lu_dev.ld_type->ldt_ctx_tags);
1862         if (rc) {
1863                 CERROR("%s: init env error: rc = %d\n", d->opd_obd->obd_name,
1864                        rc);
1865                 OBD_FREE_PTR(args);
1866                 RETURN(0);
1867         }
1868
1869         /*
1870          * start thread handling precreation and statfs updates
1871          */
1872         task = kthread_create(osp_precreate_thread, args,
1873                               "osp-pre-%u-%u", d->opd_index, d->opd_group);
1874         if (IS_ERR(task)) {
1875                 CERROR("can't start precreate thread %ld\n", PTR_ERR(task));
1876                 lu_env_fini(&args->opta_env);
1877                 OBD_FREE_PTR(args);
1878                 RETURN(PTR_ERR(task));
1879         }
1880         d->opd_pre_task = task;
1881         wake_up_process(task);
1882         wait_for_completion(&started);
1883
1884         RETURN(0);
1885 }
1886
1887 void osp_statfs_fini(struct osp_device *d)
1888 {
1889         struct task_struct *task = d->opd_pre_task;
1890         ENTRY;
1891
1892         timer_delete(&d->opd_statfs_timer);
1893
1894         d->opd_pre_task = NULL;
1895         if (task)
1896                 kthread_stop(task);
1897
1898         EXIT;
1899 }