Whamcloud - gitweb
LU-15220 utils: use 'fallthrough' pseudo keyword for switch
[fs/lustre-release.git] / lustre / osp / osp_precreate.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  *
31  * lustre/osp/osp_precreate.c
32  *
33  * Lustre OST Proxy Device
34  *
35  * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
36  * Author: Mikhail Pershin <mike.pershin@intel.com>
37  * Author: Di Wang <di.wang@intel.com>
38  */
39
40 #define DEBUG_SUBSYSTEM S_MDS
41
42 #include <linux/kthread.h>
43
44 #include <lustre_obdo.h>
45
46 #include "osp_internal.h"
47
48 /*
49  * there are two specific states to take care about:
50  *
51  * = import is disconnected =
52  *
53  * = import is inactive =
54  *   in this case osp_declare_create() returns an error
55  *
56  */
57
58 /**
59  * Check whether statfs data is expired
60  *
61  * OSP device caches statfs data for the target, the function checks
62  * whether the data is expired or not.
63  *
64  * \param[in] d         OSP device
65  *
66  * \retval              0 - not expired, 1 - expired
67  */
68 static inline int osp_statfs_need_update(struct osp_device *d)
69 {
70         return !ktime_before(ktime_get(), d->opd_statfs_fresh_till);
71 }
72
73 /*
74  * OSP tries to maintain pool of available objects so that calls to create
75  * objects don't block most of time
76  *
77  * each time OSP gets connected to OST, we should start from precreation cleanup
78  */
79 static void osp_statfs_timer_cb(cfs_timer_cb_arg_t data)
80 {
81         struct osp_device *d = cfs_from_timer(d, data, opd_statfs_timer);
82
83         LASSERT(d);
84         if (d->opd_pre_task)
85                 wake_up(&d->opd_pre_waitq);
86 }
87
88 static void osp_pre_update_msfs(struct osp_device *d, struct obd_statfs *msfs);
89
90 /*
91  * The function updates current precreation status if broken, and
92  * updates that cached statfs state if functional, then wakes up waiters.
93  * We don't clear opd_pre_status directly here, but rather leave this
94  * to osp_pre_update_msfs() to do if everything is OK so that we don't
95  * have a race to clear opd_pre_status and then set it to -ENOSPC again.
96  *
97  * \param[in] d         OSP device
98  * \param[in] msfs      statfs data
99  * \param[in] rc        new precreate status for device \a d
100  */
101 static void osp_pre_update_status_msfs(struct osp_device *d,
102                                        struct obd_statfs *msfs, int rc)
103 {
104         if (rc)
105                 d->opd_pre_status = rc;
106         else
107                 osp_pre_update_msfs(d, msfs);
108
109         wake_up(&d->opd_pre_user_waitq);
110 }
111
112 /* Pass in the old statfs data in case the limits have changed */
113 void osp_pre_update_status(struct osp_device *d, int rc)
114 {
115         osp_pre_update_status_msfs(d, &d->opd_statfs, rc);
116 }
117
118
119 /**
120  * RPC interpret callback for OST_STATFS RPC
121  *
122  * An interpretation callback called by ptlrpc for OST_STATFS RPC when it is
123  * replied by the target. It's used to maintain statfs cache for the target.
124  * The function fills data from the reply if successful and schedules another
125  * update.
126  *
127  * \param[in] env       LU environment provided by the caller
128  * \param[in] req       RPC replied
129  * \param[in] aa        callback data
130  * \param[in] rc        RPC result
131  *
132  * \retval 0            on success
133  * \retval negative     negated errno on error
134  */
135 static int osp_statfs_interpret(const struct lu_env *env,
136                                 struct ptlrpc_request *req, void *args, int rc)
137 {
138         union ptlrpc_async_args *aa = args;
139         struct obd_import *imp = req->rq_import;
140         struct obd_statfs *msfs;
141         struct obd_statfs *sfs;
142         struct osp_device *d;
143         u64 maxage_ns;
144
145         ENTRY;
146
147         aa = ptlrpc_req_async_args(aa, req);
148         d = aa->pointer_arg[0];
149         LASSERT(d);
150
151         if (rc != 0)
152                 GOTO(out, rc);
153
154         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
155         if (msfs == NULL)
156                 GOTO(out, rc = -EPROTO);
157
158         if (d->opd_pre)
159                 osp_pre_update_status_msfs(d, msfs, 0);
160         else
161                 d->opd_statfs = *msfs;
162
163         /* schedule next update */
164         maxage_ns = d->opd_statfs_maxage * NSEC_PER_SEC;
165         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), maxage_ns);
166         mod_timer(&d->opd_statfs_timer,
167                   jiffies + cfs_time_seconds(d->opd_statfs_maxage));
168         d->opd_statfs_update_in_progress = 0;
169
170         sfs = &d->opd_statfs;
171         CDEBUG(D_CACHE, "%s (%p): %llu blocks, %llu free, %llu avail, "
172                "%u bsize, %u reserved mb low, %u reserved mb high,"
173                "%llu files, %llu free files\n", d->opd_obd->obd_name, d,
174                sfs->os_blocks, sfs->os_bfree, sfs->os_bavail, sfs->os_bsize,
175                d->opd_reserved_mb_low, d->opd_reserved_mb_high,
176                sfs->os_files, sfs->os_ffree);
177
178         RETURN(0);
179 out:
180         /* couldn't update statfs, try again with a small delay */
181         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), 10 * NSEC_PER_SEC);
182         d->opd_statfs_update_in_progress = 0;
183         if (d->opd_pre && d->opd_pre_task)
184                 wake_up(&d->opd_pre_waitq);
185
186         if (req->rq_import_generation == imp->imp_generation)
187                 CDEBUG(D_CACHE, "%s: couldn't update statfs: rc = %d\n",
188                        d->opd_obd->obd_name, rc);
189         RETURN(rc);
190 }
191
192 /**
193  * Send OST_STATFS RPC
194  *
195  * Sends OST_STATFS RPC to refresh cached statfs data for the target.
196  * Also disables scheduled updates as times OSP may need to refresh
197  * statfs data before expiration. The function doesn't block, instead
198  * an interpretation callback osp_statfs_interpret() is used.
199  *
200  * \param[in] d         OSP device
201  */
202 static int osp_statfs_update(const struct lu_env *env, struct osp_device *d)
203 {
204         u64 expire = obd_timeout * 1000 * NSEC_PER_SEC;
205         struct ptlrpc_request   *req;
206         struct obd_import       *imp;
207         union ptlrpc_async_args *aa;
208         int rc;
209
210         ENTRY;
211
212         CDEBUG(D_CACHE, "going to update statfs\n");
213
214         imp = d->opd_obd->u.cli.cl_import;
215         LASSERT(imp);
216
217         req = ptlrpc_request_alloc(imp,
218                            d->opd_pre ? &RQF_OST_STATFS : &RQF_MDS_STATFS);
219         if (req == NULL)
220                 RETURN(-ENOMEM);
221
222         rc = ptlrpc_request_pack(req,
223                          d->opd_pre ? LUSTRE_OST_VERSION : LUSTRE_MDS_VERSION,
224                          d->opd_pre ? OST_STATFS : MDS_STATFS);
225         if (rc) {
226                 ptlrpc_request_free(req);
227                 RETURN(rc);
228         }
229         ptlrpc_request_set_replen(req);
230         if (d->opd_pre)
231                 req->rq_request_portal = OST_CREATE_PORTAL;
232         ptlrpc_at_set_req_timeout(req);
233
234         req->rq_interpret_reply = osp_statfs_interpret;
235         aa = ptlrpc_req_async_args(aa, req);
236         aa->pointer_arg[0] = d;
237
238         /*
239          * no updates till reply
240          */
241         del_timer(&d->opd_statfs_timer);
242         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), expire);
243         d->opd_statfs_update_in_progress = 1;
244
245         ptlrpcd_add_req(req);
246
247         /* we still want to sync changes if no new changes are coming */
248         if (ktime_before(ktime_get(), d->opd_sync_next_commit_cb))
249                 GOTO(out, rc);
250
251         if (atomic_read(&d->opd_sync_changes)) {
252                 struct thandle *th;
253
254                 th = dt_trans_create(env, d->opd_storage);
255                 if (IS_ERR(th)) {
256                         CERROR("%s: can't sync\n", d->opd_obd->obd_name);
257                         GOTO(out, rc);
258                 }
259                 rc = dt_trans_start_local(env, d->opd_storage, th);
260                 if (rc == 0) {
261                         CDEBUG(D_OTHER, "%s: sync forced, %d changes\n",
262                                d->opd_obd->obd_name,
263                                atomic_read(&d->opd_sync_changes));
264                         osp_sync_add_commit_cb_1s(env, d, th);
265                 }
266                 dt_trans_stop(env, d->opd_storage, th);
267         }
268
269 out:
270         RETURN(0);
271 }
272
273 /**
274  * Schedule an immediate update for statfs data
275  *
276  * If cached statfs data claim no free space, but OSP has got a request to
277  * destroy an object (so release some space probably), then we may need to
278  * refresh cached statfs data sooner than planned. The function checks there
279  * is no statfs update going and schedules immediate update if so.
280  * XXX: there might be a case where removed object(s) do not add free space (empty
281  * object). If the number of such deletions is high, then we can start to update
282  * statfs too often causing a RPC storm. some throttling is needed...
283  *
284  * \param[in] d         OSP device where statfs data needs to be refreshed
285  */
286 void osp_statfs_need_now(struct osp_device *d)
287 {
288         if (!d->opd_statfs_update_in_progress) {
289                 /*
290                  * if current status is -ENOSPC (lack of free space on OST)
291                  * then we should poll OST immediately once object destroy
292                  * is replied
293                  */
294                 d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(), NSEC_PER_SEC);
295                 del_timer(&d->opd_statfs_timer);
296                 wake_up(&d->opd_pre_waitq);
297         }
298 }
299
300 /**
301  * Return number of precreated objects
302  *
303  * A simple helper to calculate the number of precreated objects on the device.
304  *
305  * \param[in] env       LU environment provided by the caller
306  * \param[in] osp       OSP device
307  *
308  * \retval              the number of the precreated objects
309  */
310 static inline int osp_objs_precreated(const struct lu_env *env,
311                                       struct osp_device *osp)
312 {
313         return osp_fid_diff(&osp->opd_pre_last_created_fid,
314                             &osp->opd_pre_used_fid);
315 }
316
317 /**
318  * Check pool of precreated objects is nearly empty
319  *
320  * We should not wait till the pool of the precreated objects is exhausted,
321  * because then there will be a long period of OSP being unavailable for the
322  * new creations due to lenghty precreate RPC. Instead we ask for another
323  * precreation ahead and hopefully have it ready before the current pool is
324  * empty. Notice this function relies on an external locking.
325  *
326  * \param[in] env       LU environment provided by the caller
327  * \param[in] d         OSP device
328  *
329  * \retval              0 - current pool is good enough, 1 - time to precreate
330  */
331 static inline int osp_precreate_near_empty_nolock(const struct lu_env *env,
332                                                   struct osp_device *d)
333 {
334         int window = osp_objs_precreated(env, d);
335
336         /* don't consider new precreation till OST is healty and
337          * has free space */
338         return ((window - d->opd_pre_reserved < d->opd_pre_create_count / 2) &&
339                 (d->opd_pre_status == 0));
340 }
341
342 /**
343  * Check pool of precreated objects
344  *
345  * This is protected version of osp_precreate_near_empty_nolock(), check that
346  * for the details.
347  *
348  * \param[in] env       LU environment provided by the caller
349  * \param[in] d         OSP device
350  *
351  * \retval              0 - current pool is good enough, 1 - time to precreate
352  */
353 static inline int osp_precreate_near_empty(const struct lu_env *env,
354                                            struct osp_device *d)
355 {
356         int rc;
357
358         if (d->opd_pre == NULL)
359                 return 0;
360
361         /* XXX: do we really need locking here? */
362         spin_lock(&d->opd_pre_lock);
363         rc = osp_precreate_near_empty_nolock(env, d);
364         spin_unlock(&d->opd_pre_lock);
365         return rc;
366 }
367
368 /**
369  * Check given sequence is empty
370  *
371  * Returns a binary result whether the given sequence has some IDs left
372  * or not. Find the details in osp_fid_end_seq(). This is a lock protected
373  * version of that function.
374  *
375  * \param[in] env       LU environment provided by the caller
376  * \param[in] osp       OSP device
377  *
378  * \retval              0 - current sequence has no IDs, 1 - otherwise
379  */
380 static inline int osp_create_end_seq(const struct lu_env *env,
381                                      struct osp_device *osp)
382 {
383         struct lu_fid *fid = &osp->opd_pre_used_fid;
384         int rc;
385
386         spin_lock(&osp->opd_pre_lock);
387         rc = osp_fid_end_seq(env, fid);
388         spin_unlock(&osp->opd_pre_lock);
389         return rc;
390 }
391
392 /**
393  * Write FID into into last_oid/last_seq file
394  *
395  * The function stores the sequence and the in-sequence id into two dedicated
396  * files. The sync argument can be used to request synchronous commit, so the
397  * function won't return until the updates are committed.
398  *
399  * \param[in] env       LU environment provided by the caller
400  * \param[in] osp       OSP device
401  * \param[in] fid       fid where sequence/id is taken
402  * \param[in] sync      update mode: 0 - asynchronously, 1 - synchronously
403  *
404  * \retval 0            on success
405  * \retval negative     negated errno on error
406  **/
407 int osp_write_last_oid_seq_files(struct lu_env *env, struct osp_device *osp,
408                                  struct lu_fid *fid, int sync)
409 {
410         struct osp_thread_info  *oti = osp_env_info(env);
411         struct lu_buf      *lb_oid = &oti->osi_lb;
412         struct lu_buf      *lb_oseq = &oti->osi_lb2;
413         loff_t             oid_off;
414         u64                oid;
415         loff_t             oseq_off;
416         struct thandle    *th;
417         int                   rc;
418         ENTRY;
419
420         if (osp->opd_storage->dd_rdonly)
421                 RETURN(0);
422
423         /* Note: through f_oid is only 32 bits, it will also write 64 bits
424          * for oid to keep compatibility with the previous version. */
425         oid = fid->f_oid;
426         osp_objid_buf_prep(lb_oid, &oid_off,
427                            &oid, osp->opd_index);
428
429         osp_objseq_buf_prep(lb_oseq, &oseq_off,
430                             &fid->f_seq, osp->opd_index);
431
432         th = dt_trans_create(env, osp->opd_storage);
433         if (IS_ERR(th))
434                 RETURN(PTR_ERR(th));
435
436         th->th_sync |= sync;
437         rc = dt_declare_record_write(env, osp->opd_last_used_oid_file,
438                                      lb_oid, oid_off, th);
439         if (rc != 0)
440                 GOTO(out, rc);
441
442         rc = dt_declare_record_write(env, osp->opd_last_used_seq_file,
443                                      lb_oseq, oseq_off, th);
444         if (rc != 0)
445                 GOTO(out, rc);
446
447         rc = dt_trans_start_local(env, osp->opd_storage, th);
448         if (rc != 0)
449                 GOTO(out, rc);
450
451         rc = dt_record_write(env, osp->opd_last_used_oid_file, lb_oid,
452                              &oid_off, th);
453         if (rc != 0) {
454                 CERROR("%s: can not write to last seq file: rc = %d\n",
455                         osp->opd_obd->obd_name, rc);
456                 GOTO(out, rc);
457         }
458         rc = dt_record_write(env, osp->opd_last_used_seq_file, lb_oseq,
459                              &oseq_off, th);
460         if (rc) {
461                 CERROR("%s: can not write to last seq file: rc = %d\n",
462                         osp->opd_obd->obd_name, rc);
463                 GOTO(out, rc);
464         }
465 out:
466         dt_trans_stop(env, osp->opd_storage, th);
467         RETURN(rc);
468 }
469
470 /**
471  * Switch to another sequence
472  *
473  * When a current sequence has no available IDs left, OSP has to switch to
474  * another new sequence. OSP requests it using the regular FLDB protocol
475  * and stores synchronously before that is used in precreated. This is needed
476  * to basically have the sequences referenced (not orphaned), otherwise it's
477  * possible that OST has some objects precreated and the clients have data
478  * written to it, but after MDT failover nobody refers those objects and OSP
479  * has no idea that the sequence need cleanup to be done.
480  * While this is very expensive operation, it's supposed to happen very very
481  * infrequently because sequence has 2^32 or 2^48 objects (depending on type)
482  *
483  * \param[in] env       LU environment provided by the caller
484  * \param[in] osp       OSP device
485  *
486  * \retval 0            on success
487  * \retval negative     negated errno on error
488  */
489 static int osp_precreate_rollover_new_seq(struct lu_env *env,
490                                           struct osp_device *osp)
491 {
492         struct lu_fid   *fid = &osp_env_info(env)->osi_fid;
493         struct lu_fid   *last_fid = &osp->opd_last_used_fid;
494         int             rc;
495         ENTRY;
496
497         rc = seq_client_get_seq(env, osp->opd_obd->u.cli.cl_seq, &fid->f_seq);
498         if (rc != 0) {
499                 CERROR("%s: alloc fid error: rc = %d\n",
500                        osp->opd_obd->obd_name, rc);
501                 RETURN(rc);
502         }
503
504         fid->f_oid = 1;
505         fid->f_ver = 0;
506         LASSERTF(fid_seq(fid) != fid_seq(last_fid),
507                  "fid "DFID", last_fid "DFID"\n", PFID(fid),
508                  PFID(last_fid));
509
510         rc = osp_write_last_oid_seq_files(env, osp, fid, 1);
511         if (rc != 0) {
512                 CERROR("%s: Can not update oid/seq file: rc = %d\n",
513                        osp->opd_obd->obd_name, rc);
514                 RETURN(rc);
515         }
516
517         LCONSOLE_INFO("%s: update sequence from %#llx to %#llx\n",
518                       osp->opd_obd->obd_name, fid_seq(last_fid),
519                       fid_seq(fid));
520         /* Update last_xxx to the new seq */
521         spin_lock(&osp->opd_pre_lock);
522         osp->opd_last_used_fid = *fid;
523         osp_fid_to_obdid(fid, &osp->opd_last_id);
524         osp->opd_gap_start_fid = *fid;
525         osp->opd_pre_used_fid = *fid;
526         osp->opd_pre_last_created_fid = *fid;
527         spin_unlock(&osp->opd_pre_lock);
528
529         RETURN(rc);
530 }
531
532 /**
533  * Find IDs available in current sequence
534  *
535  * The function calculates the highest possible ID and the number of IDs
536  * available in the current sequence OSP is using. The number is limited
537  * artifically by the caller (grow param) and the number of IDs available
538  * in the sequence by nature. The function doesn't require an external
539  * locking.
540  *
541  * \param[in] env       LU environment provided by the caller
542  * \param[in] osp       OSP device
543  * \param[in] fid       FID the caller wants to start with
544  * \param[in] grow      how many the caller wants
545  * \param[out] fid      the highest calculated FID
546  * \param[out] grow     the number of available IDs calculated
547  *
548  * \retval              0 on success, 1 - the sequence is empty
549  */
550 static int osp_precreate_fids(const struct lu_env *env, struct osp_device *osp,
551                               struct lu_fid *fid, int *grow)
552 {
553         struct osp_thread_info  *osi = osp_env_info(env);
554         __u64                   end;
555         int                     i = 0;
556
557         if (fid_is_idif(fid)) {
558                 struct lu_fid   *last_fid;
559                 struct ost_id   *oi = &osi->osi_oi;
560                 int rc;
561
562                 spin_lock(&osp->opd_pre_lock);
563                 last_fid = &osp->opd_pre_last_created_fid;
564                 fid_to_ostid(last_fid, oi);
565                 end = min(ostid_id(oi) + *grow, IDIF_MAX_OID);
566                 *grow = end - ostid_id(oi);
567                 rc = ostid_set_id(oi, ostid_id(oi) + *grow);
568                 spin_unlock(&osp->opd_pre_lock);
569
570                 if (*grow == 0 || rc)
571                         return 1;
572
573                 ostid_to_fid(fid, oi, osp->opd_index);
574                 return 0;
575         }
576
577         spin_lock(&osp->opd_pre_lock);
578         *fid = osp->opd_pre_last_created_fid;
579         end = fid->f_oid;
580         end = min((end + *grow), (__u64)LUSTRE_DATA_SEQ_MAX_WIDTH);
581         *grow = end - fid->f_oid;
582         fid->f_oid += end - fid->f_oid;
583         spin_unlock(&osp->opd_pre_lock);
584
585         CDEBUG(D_INFO, "Expect %d, actual %d ["DFID" -- "DFID"]\n",
586                *grow, i, PFID(fid), PFID(&osp->opd_pre_last_created_fid));
587
588         return *grow > 0 ? 0 : 1;
589 }
590
591 /**
592  * Prepare and send precreate RPC
593  *
594  * The function finds how many objects should be precreated.  Then allocates,
595  * prepares and schedules precreate RPC synchronously. Upon reply the function
596  * wakes up the threads waiting for the new objects on this target. If the
597  * target wasn't able to create all the objects requested, then the next
598  * precreate will be asking for fewer objects (i.e. slow precreate down).
599  *
600  * \param[in] env       LU environment provided by the caller
601  * \param[in] d         OSP device
602  *
603  * \retval 0            on success
604  * \retval negative     negated errno on error
605  **/
606 static int osp_precreate_send(const struct lu_env *env, struct osp_device *d)
607 {
608         struct osp_thread_info  *oti = osp_env_info(env);
609         struct ptlrpc_request   *req;
610         struct obd_import       *imp;
611         struct ost_body         *body;
612         int                      rc, grow, diff;
613         struct lu_fid           *fid = &oti->osi_fid;
614         ENTRY;
615
616         /* don't precreate new objects till OST healthy and has free space */
617         if (unlikely(d->opd_pre_status)) {
618                 CDEBUG(D_INFO, "%s: don't send new precreate: rc = %d\n",
619                        d->opd_obd->obd_name, d->opd_pre_status);
620                 RETURN(0);
621         }
622
623         /*
624          * if not connection/initialization is compeleted, ignore
625          */
626         imp = d->opd_obd->u.cli.cl_import;
627         LASSERT(imp);
628
629         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
630         if (req == NULL)
631                 RETURN(-ENOMEM);
632         req->rq_request_portal = OST_CREATE_PORTAL;
633         /* we should not resend create request - anyway we will have delorphan
634          * and kill these objects */
635         req->rq_no_delay = req->rq_no_resend = 1;
636
637         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
638         if (rc) {
639                 ptlrpc_request_free(req);
640                 RETURN(rc);
641         }
642
643         spin_lock(&d->opd_pre_lock);
644         if (d->opd_pre_create_count > d->opd_pre_max_create_count / 2)
645                 d->opd_pre_create_count = d->opd_pre_max_create_count / 2;
646         grow = d->opd_pre_create_count;
647         spin_unlock(&d->opd_pre_lock);
648
649         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
650         LASSERT(body);
651
652         *fid = d->opd_pre_last_created_fid;
653         rc = osp_precreate_fids(env, d, fid, &grow);
654         if (rc == 1)
655                 /* Current seq has been used up*/
656                 GOTO(out_req, rc = -ENOSPC);
657
658         if (!osp_is_fid_client(d)) {
659                 /* Non-FID client will always send seq 0 because of
660                  * compatiblity */
661                 LASSERTF(fid_is_idif(fid), "Invalid fid "DFID"\n", PFID(fid));
662                 fid->f_seq = 0;
663         }
664
665         fid_to_ostid(fid, &body->oa.o_oi);
666         body->oa.o_valid = OBD_MD_FLGROUP;
667
668         ptlrpc_request_set_replen(req);
669
670         if (OBD_FAIL_CHECK(OBD_FAIL_OSP_FAKE_PRECREATE))
671                 GOTO(ready, rc = 0);
672
673         rc = ptlrpc_queue_wait(req);
674         if (rc) {
675                 CERROR("%s: can't precreate: rc = %d\n", d->opd_obd->obd_name,
676                        rc);
677                 if (req->rq_net_err)
678                         /* have osp_precreate_reserve() to wait for repeat */
679                         rc = -ENOTCONN;
680                 GOTO(out_req, rc);
681         }
682         LASSERT(req->rq_transno == 0);
683
684         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
685         if (body == NULL)
686                 GOTO(out_req, rc = -EPROTO);
687
688         ostid_to_fid(fid, &body->oa.o_oi, d->opd_index);
689
690 ready:
691         if (osp_fid_diff(fid, &d->opd_pre_used_fid) <= 0) {
692                 CERROR("%s: precreate fid "DFID" <= local used fid "DFID
693                        ": rc = %d\n", d->opd_obd->obd_name,
694                        PFID(fid), PFID(&d->opd_pre_used_fid), -ESTALE);
695                 GOTO(out_req, rc = -ESTALE);
696         }
697
698         diff = osp_fid_diff(fid, &d->opd_pre_last_created_fid);
699
700         spin_lock(&d->opd_pre_lock);
701         if (diff < grow) {
702                 /* the OST has not managed to create all the
703                  * objects we asked for */
704                 d->opd_pre_create_count = max(diff, OST_MIN_PRECREATE);
705                 d->opd_pre_create_slow = 1;
706         } else {
707                 /* the OST is able to keep up with the work,
708                  * we could consider increasing create_count
709                  * next time if needed */
710                 d->opd_pre_create_slow = 0;
711         }
712
713         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
714         fid_to_ostid(fid, &body->oa.o_oi);
715
716         d->opd_pre_last_created_fid = *fid;
717         spin_unlock(&d->opd_pre_lock);
718
719         CDEBUG(D_HA, "%s: current precreated pool: "DFID"-"DFID"\n",
720                d->opd_obd->obd_name, PFID(&d->opd_pre_used_fid),
721                PFID(&d->opd_pre_last_created_fid));
722 out_req:
723         /* now we can wakeup all users awaiting for objects */
724         osp_pre_update_status(d, rc);
725         wake_up(&d->opd_pre_user_waitq);
726
727         /* pause to let osp_precreate_reserve to go first */
728         CFS_FAIL_TIMEOUT(OBD_FAIL_OSP_PRECREATE_PAUSE, 2);
729
730         ptlrpc_req_finished(req);
731         RETURN(rc);
732 }
733
734 /**
735  * Get last precreated object from target (OST)
736  *
737  * Sends synchronous RPC to the target (OST) to learn the last precreated
738  * object. This later is used to remove all unused objects (cleanup orphan
739  * procedure). Also, the next object after one we got will be used as a
740  * starting point for the new precreates.
741  *
742  * \param[in] env       LU environment provided by the caller
743  * \param[in] d         OSP device
744  *
745  * \retval 0            on success
746  * \retval negative     negated errno on error
747  **/
748 static int osp_get_lastfid_from_ost(const struct lu_env *env,
749                                     struct osp_device *d)
750 {
751         struct ptlrpc_request   *req = NULL;
752         struct obd_import       *imp;
753         struct lu_fid           *last_fid;
754         char                    *tmp;
755         int                     rc;
756         ENTRY;
757
758         imp = d->opd_obd->u.cli.cl_import;
759         LASSERT(imp);
760
761         req = ptlrpc_request_alloc(imp, &RQF_OST_GET_INFO_LAST_FID);
762         if (req == NULL)
763                 RETURN(-ENOMEM);
764
765         req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY, RCL_CLIENT,
766                              sizeof(KEY_LAST_FID));
767
768         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
769         if (rc) {
770                 ptlrpc_request_free(req);
771                 RETURN(rc);
772         }
773
774         tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_KEY);
775         memcpy(tmp, KEY_LAST_FID, sizeof(KEY_LAST_FID));
776
777         req->rq_no_delay = req->rq_no_resend = 1;
778         last_fid = req_capsule_client_get(&req->rq_pill, &RMF_FID);
779         fid_cpu_to_le(last_fid, &d->opd_last_used_fid);
780
781         ptlrpc_request_set_replen(req);
782
783         rc = ptlrpc_queue_wait(req);
784         if (rc) {
785                 /* -EFAULT means reading LAST_FID failed (see ofd_get_info_hld),
786                  * let sysadm sort this * out.
787                  */
788                 if (rc == -EFAULT)
789                         ptlrpc_set_import_active(imp, 0);
790                 GOTO(out, rc);
791         }
792
793         last_fid = req_capsule_server_get(&req->rq_pill, &RMF_FID);
794         if (last_fid == NULL) {
795                 CERROR("%s: Got last_fid failed.\n", d->opd_obd->obd_name);
796                 GOTO(out, rc = -EPROTO);
797         }
798
799         if (!fid_is_sane(last_fid)) {
800                 CERROR("%s: Got insane last_fid "DFID"\n",
801                        d->opd_obd->obd_name, PFID(last_fid));
802                 GOTO(out, rc = -EPROTO);
803         }
804
805         /* Only update the last used fid, if the OST has objects for
806          * this sequence, i.e. fid_oid > 0 */
807         if (fid_oid(last_fid) > 0)
808                 d->opd_last_used_fid = *last_fid;
809
810         CDEBUG(D_HA, "%s: Got last_fid "DFID"\n", d->opd_obd->obd_name,
811                PFID(last_fid));
812
813 out:
814         ptlrpc_req_finished(req);
815         RETURN(rc);
816 }
817
818 /**
819  * Cleanup orphans on OST
820  *
821  * This function is called in a contex of a dedicated thread handling
822  * all the precreation suff. The function waits till local recovery
823  * is complete, then identify all the unreferenced objects (orphans)
824  * using the highest ID referenced by a local and the highest object
825  * precreated by the target. The found range is a subject to removal
826  * using specially flagged RPC. During this process OSP is marked
827  * unavailable for new objects.
828  *
829  * \param[in] env       LU environment provided by the caller
830  * \param[in] d         OSP device
831  *
832  * \retval 0            on success
833  * \retval negative     negated errno on error
834  */
835 static int osp_precreate_cleanup_orphans(struct lu_env *env,
836                                          struct osp_device *d)
837 {
838         struct osp_thread_info  *osi = osp_env_info(env);
839         struct lu_fid           *last_fid = &osi->osi_fid;
840         struct ptlrpc_request   *req = NULL;
841         struct obd_import       *imp;
842         struct ost_body         *body;
843         int                      update_status = 0;
844         int                      rc;
845         int                      diff;
846
847         ENTRY;
848
849         /*
850          * wait for local recovery to finish, so we can cleanup orphans
851          * orphans are all objects since "last used" (assigned), but
852          * there might be objects reserved and in some cases they won't
853          * be used. we can't cleanup them till we're sure they won't be
854          * used. also can't we allow new reservations because they may
855          * end up getting orphans being cleaned up below. so we block
856          * new reservations and wait till all reserved objects either
857          * user or released.
858          */
859         spin_lock(&d->opd_pre_lock);
860         d->opd_pre_recovering = 1;
861         spin_unlock(&d->opd_pre_lock);
862         /*
863          * The locking above makes sure the opd_pre_reserved check below will
864          * catch all osp_precreate_reserve() calls who find
865          * "!opd_pre_recovering".
866          */
867         wait_event_idle(d->opd_pre_waitq,
868                         (!d->opd_pre_reserved && d->opd_recovery_completed) ||
869                         !d->opd_pre_task || d->opd_got_disconnected);
870         if (!d->opd_pre_task || d->opd_got_disconnected)
871                 GOTO(out, rc = -EAGAIN);
872
873         CDEBUG(D_HA, "%s: going to cleanup orphans since "DFID"\n",
874                d->opd_obd->obd_name, PFID(&d->opd_last_used_fid));
875
876         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_DELAY_DELORPHAN, cfs_fail_val);
877
878         *last_fid = d->opd_last_used_fid;
879         /* The OSP should already get the valid seq now */
880         LASSERT(!fid_is_zero(last_fid));
881         if (fid_oid(&d->opd_last_used_fid) < 2 ||
882             OBD_FAIL_CHECK(OBD_FAIL_OSP_GET_LAST_FID)) {
883                 /* lastfid looks strange... ask OST */
884                 rc = osp_get_lastfid_from_ost(env, d);
885                 if (rc)
886                         GOTO(out, rc);
887         }
888
889         imp = d->opd_obd->u.cli.cl_import;
890         LASSERT(imp);
891
892         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
893         if (req == NULL)
894                 GOTO(out, rc = -ENOMEM);
895
896         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
897         if (rc) {
898                 ptlrpc_request_free(req);
899                 req = NULL;
900                 GOTO(out, rc);
901         }
902
903         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
904         if (body == NULL)
905                 GOTO(out, rc = -EPROTO);
906
907         body->oa.o_flags = OBD_FL_DELORPHAN;
908         body->oa.o_valid = OBD_MD_FLFLAGS | OBD_MD_FLGROUP;
909
910         fid_to_ostid(&d->opd_last_used_fid, &body->oa.o_oi);
911
912         ptlrpc_request_set_replen(req);
913
914         /* Don't resend the delorphan req */
915         req->rq_no_resend = req->rq_no_delay = 1;
916
917         rc = ptlrpc_queue_wait(req);
918         if (rc) {
919                 update_status = 1;
920                 GOTO(out, rc);
921         }
922
923         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
924         if (body == NULL)
925                 GOTO(out, rc = -EPROTO);
926
927         /*
928          * OST provides us with id new pool starts from in body->oa.o_id
929          */
930         ostid_to_fid(last_fid, &body->oa.o_oi, d->opd_index);
931
932         spin_lock(&d->opd_pre_lock);
933         diff = osp_fid_diff(&d->opd_last_used_fid, last_fid);
934         if (diff > 0) {
935                 d->opd_pre_create_count = OST_MIN_PRECREATE + diff;
936                 d->opd_pre_last_created_fid = d->opd_last_used_fid;
937         } else {
938                 d->opd_pre_create_count = OST_MIN_PRECREATE;
939                 d->opd_pre_last_created_fid = *last_fid;
940         }
941         /*
942          * This empties the pre-creation pool and effectively blocks any new
943          * reservations.
944          */
945         LASSERT(fid_oid(&d->opd_pre_last_created_fid) <=
946                 LUSTRE_DATA_SEQ_MAX_WIDTH);
947         d->opd_pre_used_fid = d->opd_pre_last_created_fid;
948         d->opd_pre_create_slow = 0;
949         spin_unlock(&d->opd_pre_lock);
950
951         CDEBUG(D_HA, "%s: Got last_id "DFID" from OST, last_created "DFID
952                "last_used is "DFID"\n", d->opd_obd->obd_name, PFID(last_fid),
953                PFID(&d->opd_pre_last_created_fid), PFID(&d->opd_last_used_fid));
954 out:
955         if (req)
956                 ptlrpc_req_finished(req);
957
958         /*
959          * If rc is zero, the pre-creation window should have been emptied.
960          * Since waking up the herd would be useless without pre-created
961          * objects, we defer the signal to osp_precreate_send() in that case.
962          */
963         if (rc != 0) {
964                 if (update_status) {
965                         CERROR("%s: cannot cleanup orphans: rc = %d\n",
966                                d->opd_obd->obd_name, rc);
967                         /* we can't proceed from here, OST seem to
968                          * be in a bad shape, better to wait for
969                          * a new instance of the server and repeat
970                          * from the beginning. notify possible waiters
971                          * this OSP isn't quite functional yet */
972                         osp_pre_update_status(d, rc);
973                 } else {
974                         wake_up(&d->opd_pre_user_waitq);
975                 }
976         } else {
977                 spin_lock(&d->opd_pre_lock);
978                 d->opd_pre_recovering = 0;
979                 spin_unlock(&d->opd_pre_lock);
980         }
981
982         RETURN(rc);
983 }
984
985 /**
986  * Update precreate status using statfs data
987  *
988  * The function decides whether this OSP should be used for new objects.
989  * IOW, whether this OST is used up or has some free space. Cached statfs
990  * data is used to make this decision. If the latest result of statfs
991  * request (rc argument) is not success, then just mark OSP unavailable
992  * right away.
993  *
994  * The new statfs data is passed in \a msfs and needs to be stored into
995  * opd_statfs, but only after the various flags in os_state are set, so
996  * that the new statfs data is not visible without appropriate flags set.
997  * As such, there is no need to clear the flags here, since this is called
998  * with new statfs data, and they should not be cleared if sent from OST.
999  *
1000  * Add a bit of hysteresis so this flag isn't continually flapping, and
1001  * ensure that new files don't get extremely fragmented due to only a
1002  * small amount of available space in the filesystem.  We want to set
1003  * the ENOSPC/ENOINO flags unconditionally when there is less than the
1004  * reserved size free, and still copy them from the old state when there
1005  * is less than 2*reserved size free space or inodes.
1006  *
1007  * \param[in] d         OSP device
1008  * \param[in] msfs      statfs data
1009  */
1010 static void osp_pre_update_msfs(struct osp_device *d, struct obd_statfs *msfs)
1011 {
1012         u32 old_state = d->opd_statfs.os_state;
1013         u32 reserved_ino_low = 32;      /* could be tunable in the future */
1014         u32 reserved_ino_high = reserved_ino_low * 2;
1015         u64 available_mb;
1016
1017         /* statfs structure not initialized yet */
1018         if (unlikely(!msfs->os_type))
1019                 return;
1020
1021         /* if the low and high watermarks have not been initialized yet */
1022         if (unlikely(d->opd_reserved_mb_high == 0 &&
1023                      d->opd_reserved_mb_low == 0)) {
1024                 /* Use ~0.1% by default to disable object allocation,
1025                  * and ~0.2% to enable, size in MB, set both watermark
1026                  */
1027                 spin_lock(&d->opd_pre_lock);
1028                 if (d->opd_reserved_mb_high == 0 &&
1029                     d->opd_reserved_mb_low == 0) {
1030                         d->opd_reserved_mb_low = ((msfs->os_bsize >> 10) *
1031                                                   msfs->os_blocks) >> 20;
1032                         if (d->opd_reserved_mb_low == 0)
1033                                 d->opd_reserved_mb_low = 1;
1034                         d->opd_reserved_mb_high =
1035                                 (d->opd_reserved_mb_low << 1) + 1;
1036                 }
1037                 spin_unlock(&d->opd_pre_lock);
1038         }
1039
1040         available_mb = (msfs->os_bavail * (msfs->os_bsize >> 10)) >> 10;
1041         if (msfs->os_ffree < reserved_ino_low)
1042                 msfs->os_state |= OS_STATFS_ENOINO;
1043         else if (msfs->os_ffree <= reserved_ino_high)
1044                 msfs->os_state |= old_state & OS_STATFS_ENOINO;
1045         /* else don't clear flags in new msfs->os_state sent from OST */
1046
1047         CDEBUG(D_INFO,
1048                "%s: blocks=%llu free=%llu avail=%llu avail_mb=%llu hwm_mb=%u files=%llu ffree=%llu state=%x: rc = %d\n",
1049                d->opd_obd->obd_name, msfs->os_blocks, msfs->os_bfree,
1050                msfs->os_bavail, available_mb, d->opd_reserved_mb_high,
1051                msfs->os_files, msfs->os_ffree, msfs->os_state,
1052                d->opd_pre_status);
1053         if (available_mb < d->opd_reserved_mb_low)
1054                 msfs->os_state |= OS_STATFS_ENOSPC;
1055         else if (available_mb <= d->opd_reserved_mb_high)
1056                 msfs->os_state |= old_state & OS_STATFS_ENOSPC;
1057         /* else don't clear flags in new msfs->os_state sent from OST */
1058
1059         if (msfs->os_state & (OS_STATFS_ENOINO | OS_STATFS_ENOSPC)) {
1060                 d->opd_pre_status = -ENOSPC;
1061                 if (!(old_state & (OS_STATFS_ENOINO | OS_STATFS_ENOSPC)))
1062                         CDEBUG(D_INFO, "%s: full: state=%x: rc = %x\n",
1063                                d->opd_obd->obd_name, msfs->os_state,
1064                                d->opd_pre_status);
1065                 CDEBUG(D_INFO, "uncommitted changes=%u in_progress=%u\n",
1066                        atomic_read(&d->opd_sync_changes),
1067                        atomic_read(&d->opd_sync_rpcs_in_progress));
1068         } else if (old_state & (OS_STATFS_ENOINO | OS_STATFS_ENOSPC)) {
1069                 d->opd_pre_status = 0;
1070                 spin_lock(&d->opd_pre_lock);
1071                 d->opd_pre_create_slow = 0;
1072                 d->opd_pre_create_count = OST_MIN_PRECREATE;
1073                 spin_unlock(&d->opd_pre_lock);
1074                 wake_up(&d->opd_pre_waitq);
1075
1076                 CDEBUG(D_INFO,
1077                        "%s: available: state=%x: rc = %d\n",
1078                        d->opd_obd->obd_name, msfs->os_state,
1079                        d->opd_pre_status);
1080         } else {
1081                 /* we only get here if rc == 0 in the caller */
1082                 d->opd_pre_status = 0;
1083         }
1084
1085         /* Object precreation skipped on OST if manually disabled */
1086         if (d->opd_pre_max_create_count == 0)
1087                 msfs->os_state |= OS_STATFS_NOPRECREATE;
1088         /* else don't clear flags in new msfs->os_state sent from OST */
1089
1090         /* copy only new statfs state to make it visible to MDS threads */
1091         if (&d->opd_statfs != msfs)
1092                 d->opd_statfs = *msfs;
1093 }
1094
1095 /**
1096  * Initialize FID for precreation
1097  *
1098  * For a just created new target, a new sequence should be taken.
1099  * The function checks there is no IDIF in use (if the target was
1100  * added with the older version of Lustre), then requests a new
1101  * sequence from FLDB using the regular protocol. Then this new
1102  * sequence is stored on a persisten storage synchronously to prevent
1103  * possible object leakage (for the detail see the description for
1104  * osp_precreate_rollover_new_seq()).
1105  *
1106  * \param[in] osp       OSP device
1107  *
1108  * \retval 0            on success
1109  * \retval negative     negated errno on error
1110  */
1111 int osp_init_pre_fid(struct osp_device *osp)
1112 {
1113         struct lu_env           env;
1114         struct osp_thread_info  *osi;
1115         struct lu_client_seq    *cli_seq;
1116         struct lu_fid           *last_fid;
1117         int                     rc;
1118         ENTRY;
1119
1120         LASSERT(osp->opd_pre != NULL);
1121
1122         /* Let's check if the current last_seq/fid is valid,
1123          * otherwise request new sequence from the controller */
1124         if (osp_is_fid_client(osp) && osp->opd_group != 0) {
1125                 /* Non-MDT0 can only use normal sequence for
1126                  * OST objects */
1127                 if (fid_is_norm(&osp->opd_last_used_fid))
1128                         RETURN(0);
1129         } else {
1130                 /* Initially MDT0 will start with IDIF, after
1131                  * that it will request new sequence from the
1132                  * controller */
1133                 if (fid_is_idif(&osp->opd_last_used_fid) ||
1134                     fid_is_norm(&osp->opd_last_used_fid))
1135                         RETURN(0);
1136         }
1137
1138         if (!fid_is_zero(&osp->opd_last_used_fid))
1139                 CWARN("%s: invalid last used fid "DFID
1140                       ", try to get new sequence.\n",
1141                       osp->opd_obd->obd_name,
1142                       PFID(&osp->opd_last_used_fid));
1143
1144         rc = lu_env_init(&env, osp->opd_dt_dev.dd_lu_dev.ld_type->ldt_ctx_tags);
1145         if (rc) {
1146                 CERROR("%s: init env error: rc = %d\n",
1147                        osp->opd_obd->obd_name, rc);
1148                 RETURN(rc);
1149         }
1150
1151         osi = osp_env_info(&env);
1152         last_fid = &osi->osi_fid;
1153         fid_zero(last_fid);
1154         /* For a freshed fs, it will allocate a new sequence first */
1155         if (osp_is_fid_client(osp) && osp->opd_group != 0) {
1156                 cli_seq = osp->opd_obd->u.cli.cl_seq;
1157                 rc = seq_client_get_seq(&env, cli_seq, &last_fid->f_seq);
1158                 if (rc != 0) {
1159                         CERROR("%s: alloc fid error: rc = %d\n",
1160                                osp->opd_obd->obd_name, rc);
1161                         GOTO(out, rc);
1162                 }
1163         } else {
1164                 last_fid->f_seq = fid_idif_seq(0, osp->opd_index);
1165         }
1166         last_fid->f_oid = 1;
1167         last_fid->f_ver = 0;
1168
1169         spin_lock(&osp->opd_pre_lock);
1170         osp->opd_last_used_fid = *last_fid;
1171         osp->opd_pre_used_fid = *last_fid;
1172         osp->opd_pre_last_created_fid = *last_fid;
1173         spin_unlock(&osp->opd_pre_lock);
1174         rc = osp_write_last_oid_seq_files(&env, osp, last_fid, 1);
1175         if (rc != 0) {
1176                 CERROR("%s: write fid error: rc = %d\n",
1177                        osp->opd_obd->obd_name, rc);
1178                 GOTO(out, rc);
1179         }
1180 out:
1181         lu_env_fini(&env);
1182         RETURN(rc);
1183 }
1184
1185 struct opt_args {
1186         struct osp_device       *opta_dev;
1187         struct lu_env           opta_env;
1188         struct completion       *opta_started;
1189 };
1190 /**
1191  * The core of precreate functionality
1192  *
1193  * The function implements the main precreation loop. Basically it
1194  * involves connecting to the target, precerate FID initialization,
1195  * identifying and removing orphans, then serving precreation. As
1196  * part of the latter, the thread is responsible for statfs data
1197  * updates. The precreation is mostly driven by another threads
1198  * asking for new OST objects - those askers wake the thread when
1199  * the number of precreated objects reach low watermark.
1200  * After a disconnect, the sequence above repeats. This is keep going
1201  * until the thread is requested to stop.
1202  *
1203  * \param[in] _arg      private data the thread (OSP device to handle)
1204  *
1205  * \retval 0            on success
1206  * \retval negative     negated errno on error
1207  */
1208 static int osp_precreate_thread(void *_args)
1209 {
1210         struct opt_args         *args = _args;
1211         struct osp_device       *d = args->opta_dev;
1212         struct lu_env           *env = &args->opta_env;
1213         int                      rc;
1214
1215         ENTRY;
1216
1217         complete(args->opta_started);
1218         while (!kthread_should_stop()) {
1219                 /*
1220                  * need to be connected to OST
1221                  */
1222                 while (!kthread_should_stop()) {
1223                         if ((d->opd_pre == NULL || d->opd_pre_recovering) &&
1224                             d->opd_imp_connected &&
1225                             !d->opd_got_disconnected)
1226                                 break;
1227                         wait_event_idle(d->opd_pre_waitq,
1228                                         kthread_should_stop() ||
1229                                         d->opd_new_connection);
1230
1231                         if (!d->opd_new_connection)
1232                                 continue;
1233
1234                         OBD_FAIL_TIMEOUT(OBD_FAIL_OSP_CON_EVENT_DELAY,
1235                                          cfs_fail_val);
1236                         d->opd_new_connection = 0;
1237                         d->opd_got_disconnected = 0;
1238                         break;
1239                 }
1240
1241                 if (kthread_should_stop())
1242                         break;
1243
1244                 if (d->opd_pre) {
1245                         LASSERT(d->opd_obd->u.cli.cl_seq != NULL);
1246                         /* Sigh, fid client is not ready yet */
1247                         if (d->opd_obd->u.cli.cl_seq->lcs_exp == NULL)
1248                                 continue;
1249
1250                         /* Init fid for osp_precreate if necessary */
1251                         rc = osp_init_pre_fid(d);
1252                         if (rc != 0) {
1253                                 class_export_put(d->opd_exp);
1254                                 d->opd_obd->u.cli.cl_seq->lcs_exp = NULL;
1255                                 CERROR("%s: init pre fid error: rc = %d\n",
1256                                                 d->opd_obd->obd_name, rc);
1257                                 continue;
1258                         }
1259                 }
1260
1261                 if (osp_statfs_update(env, d)) {
1262                         if (wait_event_idle_timeout(d->opd_pre_waitq,
1263                                                     kthread_should_stop(),
1264                                                     cfs_time_seconds(5)) == 0)
1265                                 l_wait_event_abortable(
1266                                         d->opd_pre_waitq,
1267                                         kthread_should_stop());
1268                         continue;
1269                 }
1270
1271                 if (d->opd_pre) {
1272                         /*
1273                          * Clean up orphans or recreate missing objects.
1274                          */
1275                         rc = osp_precreate_cleanup_orphans(env, d);
1276                         if (rc != 0) {
1277                                 schedule_timeout_interruptible(cfs_time_seconds(1));
1278                                 continue;
1279                         }
1280                 }
1281
1282                 /*
1283                  * connected, can handle precreates now
1284                  */
1285                 while (!kthread_should_stop()) {
1286                         wait_event_idle(d->opd_pre_waitq,
1287                                         kthread_should_stop() ||
1288                                         osp_precreate_near_empty(env, d) ||
1289                                         osp_statfs_need_update(d) ||
1290                                         d->opd_got_disconnected);
1291
1292                         if (kthread_should_stop())
1293                                 break;
1294
1295                         /* something happened to the connection
1296                          * have to start from the beginning */
1297                         if (d->opd_got_disconnected)
1298                                 break;
1299
1300                         if (osp_statfs_need_update(d))
1301                                 if (osp_statfs_update(env, d))
1302                                         break;
1303
1304                         if (d->opd_pre == NULL)
1305                                 continue;
1306
1307                         if (OBD_FAIL_CHECK(OBD_FAIL_OSP_GET_LAST_FID)) {
1308                                 d->opd_pre_recovering = 1;
1309                                 break;
1310                         }
1311
1312                         /* To avoid handling different seq in precreate/orphan
1313                          * cleanup, it will hold precreate until current seq is
1314                          * used up. */
1315                         if (unlikely(osp_precreate_end_seq(env, d) &&
1316                             !osp_create_end_seq(env, d)))
1317                                 continue;
1318
1319                         if (unlikely(osp_precreate_end_seq(env, d) &&
1320                                      osp_create_end_seq(env, d))) {
1321                                 LCONSOLE_INFO("%s:%#llx is used up."
1322                                               " Update to new seq\n",
1323                                               d->opd_obd->obd_name,
1324                                          fid_seq(&d->opd_pre_last_created_fid));
1325                                 rc = osp_precreate_rollover_new_seq(env, d);
1326                                 if (rc)
1327                                         continue;
1328                         }
1329
1330                         if (osp_precreate_near_empty(env, d)) {
1331                                 rc = osp_precreate_send(env, d);
1332                                 /* osp_precreate_send() sets opd_pre_status
1333                                  * in case of error, that prevent the using of
1334                                  * failed device. */
1335                                 if (rc < 0 && rc != -ENOSPC &&
1336                                     rc != -ETIMEDOUT && rc != -ENOTCONN)
1337                                         CERROR("%s: cannot precreate objects:"
1338                                                " rc = %d\n",
1339                                                d->opd_obd->obd_name, rc);
1340                         }
1341                 }
1342         }
1343
1344         lu_env_fini(env);
1345         OBD_FREE_PTR(args);
1346
1347         RETURN(0);
1348 }
1349
1350 /**
1351  * Check when to stop to wait for precreate objects.
1352  *
1353  * The caller wanting a new OST object can't wait undefinitely. The
1354  * function checks for few conditions including available new OST
1355  * objects, disconnected OST, lack of space with no pending destroys,
1356  * etc. IOW, it checks whether the current OSP state is good to keep
1357  * waiting or it's better to give up.
1358  *
1359  * \param[in] env       LU environment provided by the caller
1360  * \param[in] d         OSP device
1361  *
1362  * \retval              0 - keep waiting, 1 - no luck
1363  */
1364 static int osp_precreate_ready_condition(const struct lu_env *env,
1365                                          struct osp_device *d)
1366 {
1367         if (d->opd_pre_recovering)
1368                 return 0;
1369
1370         /* ready if got enough precreated objects */
1371         /* we need to wait for others (opd_pre_reserved) and our object (+1) */
1372         if (d->opd_pre_reserved + 1 < osp_objs_precreated(env, d))
1373                 return 1;
1374
1375         /* ready if OST reported no space and no destroys in progress */
1376         if (atomic_read(&d->opd_sync_changes) +
1377             atomic_read(&d->opd_sync_rpcs_in_progress) == 0 &&
1378             d->opd_pre_status == -ENOSPC)
1379                 return 1;
1380
1381         /* Bail out I/O fails to OST */
1382         if (d->opd_pre_status != 0 &&
1383             d->opd_pre_status != -EAGAIN &&
1384             d->opd_pre_status != -ENODEV &&
1385             d->opd_pre_status != -ENOTCONN &&
1386             d->opd_pre_status != -ENOSPC) {
1387                 /* DEBUG LU-3230 */
1388                 if (d->opd_pre_status != -EIO)
1389                         CERROR("%s: precreate failed opd_pre_status %d\n",
1390                                d->opd_obd->obd_name, d->opd_pre_status);
1391                 return 1;
1392         }
1393
1394         return 0;
1395 }
1396
1397 /**
1398  * Reserve object in precreate pool
1399  *
1400  * When the caller wants to create a new object on this target (target
1401  * represented by the given OSP), it should declare this intention using
1402  * a regular ->dt_declare_create() OSD API method. Then OSP will be trying
1403  * to reserve an object in the existing precreated pool or wait up to
1404  * obd_timeout for the available object to appear in the pool (a dedicated
1405  * thread will be doing real precreation in background). The object can be
1406  * consumed later with osp_precreate_get_fid() or be released with call to
1407  * lu_object_put(). Notice the function doesn't reserve a specific ID, just
1408  * some ID. The actual ID assignment happen in osp_precreate_get_fid().
1409  * If the space on the target is short and there is a pending object destroy,
1410  * then the function forces local commit to speedup space release (see
1411  * osp_sync.c for the details).
1412  *
1413  * \param[in] env       LU environment provided by the caller
1414  * \param[in] d         OSP device
1415  *
1416  * \retval              0 on success
1417  * \retval              -ENOSPC when no space on OST
1418  * \retval              -EAGAIN try later, slow precreation in progress
1419  * \retval              -EIO when no access to OST
1420  */
1421 int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d,
1422                           bool can_block)
1423 {
1424         time64_t expire = ktime_get_seconds() + obd_timeout;
1425         int precreated, rc, synced = 0;
1426
1427         ENTRY;
1428
1429         LASSERTF(osp_objs_precreated(env, d) >= 0, "Last created FID "DFID
1430                  "Next FID "DFID"\n", PFID(&d->opd_pre_last_created_fid),
1431                  PFID(&d->opd_pre_used_fid));
1432
1433         /* opd_pre_max_create_count 0 to not use specified OST. */
1434         if (d->opd_pre_max_create_count == 0)
1435                 RETURN(-ENOBUFS);
1436
1437         /*
1438          * wait till:
1439          *  - preallocation is done
1440          *  - no free space expected soon
1441          *  - can't connect to OST for too long (obd_timeout)
1442          *  - OST can allocate fid sequence.
1443          */
1444         while ((rc = d->opd_pre_status) == 0 || rc == -ENOSPC ||
1445                 rc == -ENODEV || rc == -EAGAIN || rc == -ENOTCONN) {
1446
1447                 /*
1448                  * increase number of precreations
1449                  */
1450                 precreated = osp_objs_precreated(env, d);
1451                 if (d->opd_pre_create_count < d->opd_pre_max_create_count &&
1452                     d->opd_pre_create_slow == 0 &&
1453                     precreated <= (d->opd_pre_create_count / 4 + 1)) {
1454                         spin_lock(&d->opd_pre_lock);
1455                         d->opd_pre_create_slow = 1;
1456                         d->opd_pre_create_count *= 2;
1457                         spin_unlock(&d->opd_pre_lock);
1458                 }
1459
1460                 spin_lock(&d->opd_pre_lock);
1461                 precreated = osp_objs_precreated(env, d);
1462                 if (precreated > d->opd_pre_reserved &&
1463                     !d->opd_pre_recovering) {
1464                         d->opd_pre_reserved++;
1465                         spin_unlock(&d->opd_pre_lock);
1466                         rc = 0;
1467
1468                         /* XXX: don't wake up if precreation is in progress */
1469                         if (osp_precreate_near_empty_nolock(env, d) &&
1470                            !osp_precreate_end_seq_nolock(env, d))
1471                                 wake_up(&d->opd_pre_waitq);
1472
1473                         break;
1474                 }
1475                 spin_unlock(&d->opd_pre_lock);
1476
1477                 /*
1478                  * all precreated objects have been used and no-space
1479                  * status leave us no chance to succeed very soon
1480                  * but if there is destroy in progress, then we should
1481                  * wait till that is done - some space might be released
1482                  */
1483                 if (unlikely(rc == -ENOSPC)) {
1484                         if (atomic_read(&d->opd_sync_changes) && synced == 0) {
1485                                 /* force local commit to release space */
1486                                 dt_commit_async(env, d->opd_storage);
1487                                 osp_sync_check_for_work(d);
1488                                 synced = 1;
1489                         }
1490                         if (atomic_read(&d->opd_sync_rpcs_in_progress)) {
1491                                 /* just wait till destroys are done
1492                                  * see wait_event_idle_timeout() below
1493                                  */
1494                         }
1495                         if (atomic_read(&d->opd_sync_changes) +
1496                             atomic_read(&d->opd_sync_rpcs_in_progress) == 0) {
1497                                 /* no hope for free space */
1498                                 break;
1499                         }
1500                 }
1501
1502                 /* XXX: don't wake up if precreation is in progress */
1503                 wake_up(&d->opd_pre_waitq);
1504
1505                 if (ktime_get_seconds() >= expire) {
1506                         rc = -ETIMEDOUT;
1507                         break;
1508                 }
1509
1510                 if (!can_block) {
1511                         LASSERT(d->opd_pre);
1512                         rc = -ENOBUFS;
1513                         break;
1514                 }
1515
1516                 if (wait_event_idle_timeout(
1517                             d->opd_pre_user_waitq,
1518                             osp_precreate_ready_condition(env, d),
1519                             cfs_time_seconds(obd_timeout)) == 0) {
1520                         CDEBUG(D_HA,
1521                                "%s: slow creates, last="DFID", next="DFID", "
1522                                "reserved=%llu, sync_changes=%u, "
1523                                "sync_rpcs_in_progress=%d, status=%d\n",
1524                                d->opd_obd->obd_name,
1525                                PFID(&d->opd_pre_last_created_fid),
1526                                PFID(&d->opd_pre_used_fid), d->opd_pre_reserved,
1527                                atomic_read(&d->opd_sync_changes),
1528                                atomic_read(&d->opd_sync_rpcs_in_progress),
1529                                d->opd_pre_status);
1530                 }
1531         }
1532
1533         RETURN(rc);
1534 }
1535
1536 /**
1537  * Get a FID from precreation pool
1538  *
1539  * The function is a companion for osp_precreate_reserve() - it assigns
1540  * a specific FID from the precreate. The function should be called only
1541  * if the call to osp_precreate_reserve() was successful. The function
1542  * updates a local storage to remember the highest object ID referenced
1543  * by the node in the given sequence.
1544  *
1545  * A very importan details: this is supposed to be called once the
1546  * transaction is started, so on-disk update will be atomic with the
1547  * data (like LOVEA) refering this object. Then the object won't be leaked:
1548  * either it's referenced by the committed transaction or it's a subject
1549  * to the orphan cleanup procedure.
1550  *
1551  * \param[in] env       LU environment provided by the caller
1552  * \param[in] d         OSP device
1553  * \param[out] fid      generated FID
1554  *
1555  * \retval 0            on success
1556  * \retval negative     negated errno on error
1557  */
1558 int osp_precreate_get_fid(const struct lu_env *env, struct osp_device *d,
1559                           struct lu_fid *fid)
1560 {
1561         struct lu_fid *pre_used_fid = &d->opd_pre_used_fid;
1562         /* grab next id from the pool */
1563         spin_lock(&d->opd_pre_lock);
1564
1565         LASSERTF(osp_fid_diff(&d->opd_pre_used_fid,
1566                              &d->opd_pre_last_created_fid) < 0,
1567                  "next fid "DFID" last created fid "DFID"\n",
1568                  PFID(&d->opd_pre_used_fid),
1569                  PFID(&d->opd_pre_last_created_fid));
1570
1571         /*
1572          * When sequence is used up, new one should be allocated in
1573          * osp_precreate_rollover_new_seq. So ASSERT here to avoid
1574          * objid overflow.
1575          */
1576         LASSERTF(osp_fid_end_seq(env, pre_used_fid) == 0,
1577                  "next fid "DFID" last created fid "DFID"\n",
1578                  PFID(&d->opd_pre_used_fid),
1579                  PFID(&d->opd_pre_last_created_fid));
1580         /* Non IDIF fids shoulnd't get here with oid == 0xFFFFFFFF. */
1581         if (fid_is_idif(pre_used_fid) &&
1582             unlikely(fid_oid(pre_used_fid) == LUSTRE_DATA_SEQ_MAX_WIDTH))
1583                 pre_used_fid->f_seq++;
1584
1585         d->opd_pre_used_fid.f_oid++;
1586         memcpy(fid, &d->opd_pre_used_fid, sizeof(*fid));
1587         d->opd_pre_reserved--;
1588         /*
1589          * last_used_id must be changed along with getting new id otherwise
1590          * we might miscalculate gap causing object loss or leak
1591          */
1592         osp_update_last_fid(d, fid);
1593         spin_unlock(&d->opd_pre_lock);
1594
1595         /*
1596          * probably main thread suspended orphan cleanup till
1597          * all reservations are released, see comment in
1598          * osp_precreate_thread() just before orphan cleanup
1599          */
1600         if (unlikely(d->opd_pre_reserved == 0 &&
1601                      (d->opd_pre_recovering || d->opd_pre_status)))
1602                 wake_up(&d->opd_pre_waitq);
1603
1604         return 0;
1605 }
1606
1607 /*
1608  * Set size regular attribute on an object
1609  *
1610  * When a striping is created late, it's possible that size is already
1611  * initialized on the file. Then the new striping should inherit size
1612  * from the file. The function sets size on the object using the regular
1613  * protocol (OST_PUNCH).
1614  * XXX: should be re-implemented using OUT ?
1615  *
1616  * \param[in] env       LU environment provided by the caller
1617  * \param[in] dt        object
1618  * \param[in] size      size to set.
1619  *
1620  * \retval 0            on success
1621  * \retval negative     negated errno on error
1622  */
1623 int osp_object_truncate(const struct lu_env *env, struct dt_object *dt,
1624                         __u64 size)
1625 {
1626         struct osp_device       *d = lu2osp_dev(dt->do_lu.lo_dev);
1627         struct ptlrpc_request   *req = NULL;
1628         struct obd_import       *imp;
1629         struct ost_body         *body;
1630         struct obdo             *oa = NULL;
1631         int                      rc;
1632
1633         ENTRY;
1634
1635         imp = d->opd_obd->u.cli.cl_import;
1636         LASSERT(imp);
1637
1638         req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
1639         if (req == NULL)
1640                 RETURN(-ENOMEM);
1641
1642         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
1643         if (rc) {
1644                 ptlrpc_request_free(req);
1645                 RETURN(rc);
1646         }
1647
1648         /*
1649          * XXX: decide how do we do here with resend
1650          * if we don't resend, then client may see wrong file size
1651          * if we do resend, then MDS thread can get stuck for quite long
1652          * and if we don't resend, then client will also get -EAGAIN !!
1653          * (see LU-7975 and sanity/test_27F use cases)
1654          * but let's decide not to resend/delay this truncate request to OST
1655          * and allow Client to decide to resend, in a less agressive way from
1656          * after_reply(), by returning -EINPROGRESS instead of
1657          * -EAGAIN/-EAGAIN upon return from ptlrpc_queue_wait() at the
1658          * end of this routine
1659          */
1660         req->rq_no_resend = req->rq_no_delay = 1;
1661
1662         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
1663         ptlrpc_at_set_req_timeout(req);
1664
1665         OBD_ALLOC_PTR(oa);
1666         if (oa == NULL)
1667                 GOTO(out, rc = -ENOMEM);
1668
1669         rc = fid_to_ostid(lu_object_fid(&dt->do_lu), &oa->o_oi);
1670         LASSERT(rc == 0);
1671         oa->o_size = size;
1672         oa->o_blocks = OBD_OBJECT_EOF;
1673         oa->o_valid = OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
1674                       OBD_MD_FLID | OBD_MD_FLGROUP;
1675
1676         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
1677         LASSERT(body);
1678         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1679
1680         /* XXX: capa support? */
1681         /* osc_pack_capa(req, body, capa); */
1682
1683         ptlrpc_request_set_replen(req);
1684
1685         rc = ptlrpc_queue_wait(req);
1686         if (rc) {
1687                 /* -EAGAIN/-EWOULDBLOCK means OST is unreachable at the moment
1688                  * since we have decided not to resend/delay, but this could
1689                  * lead to wrong size to be seen at Client side and even process
1690                  * trying to open to exit/fail if not itself handling -EAGAIN.
1691                  * So it should be better to return -EINPROGRESS instead and
1692                  * leave the decision to resend at Client side in after_reply()
1693                  */
1694                 if (rc == -EAGAIN) {
1695                         rc = -EINPROGRESS;
1696                         CDEBUG(D_HA, "returning -EINPROGRESS instead of "
1697                                "-EWOULDBLOCK/-EAGAIN to allow Client to "
1698                                "resend\n");
1699                 } else {
1700                         CERROR("can't punch object: %d\n", rc);
1701                 }
1702         }
1703 out:
1704         ptlrpc_req_finished(req);
1705         if (oa)
1706                 OBD_FREE_PTR(oa);
1707         RETURN(rc);
1708 }
1709
1710 /**
1711  * Initialize precreation functionality of OSP
1712  *
1713  * Prepares all the internal structures and starts the precreate thread
1714  *
1715  * \param[in] d         OSP device
1716  *
1717  * \retval 0            on success
1718  * \retval negative     negated errno on error
1719  */
1720 int osp_init_precreate(struct osp_device *d)
1721 {
1722         ENTRY;
1723
1724         OBD_ALLOC_PTR(d->opd_pre);
1725         if (d->opd_pre == NULL)
1726                 RETURN(-ENOMEM);
1727
1728         /* initially precreation isn't ready */
1729         init_waitqueue_head(&d->opd_pre_user_waitq);
1730         d->opd_pre_status = -EAGAIN;
1731         fid_zero(&d->opd_pre_used_fid);
1732         d->opd_pre_used_fid.f_oid = 1;
1733         fid_zero(&d->opd_pre_last_created_fid);
1734         d->opd_pre_last_created_fid.f_oid = 1;
1735         d->opd_last_id = 0;
1736         d->opd_pre_reserved = 0;
1737         d->opd_got_disconnected = 1;
1738         d->opd_pre_create_slow = 0;
1739         d->opd_pre_create_count = OST_MIN_PRECREATE;
1740         d->opd_pre_min_create_count = OST_MIN_PRECREATE;
1741         d->opd_pre_max_create_count = OST_MAX_PRECREATE;
1742         d->opd_reserved_mb_high = 0;
1743         d->opd_reserved_mb_low = 0;
1744
1745         RETURN(0);
1746 }
1747
1748 /**
1749  * Finish precreate functionality of OSP
1750  *
1751  *
1752  * Asks all the activity (the thread, update timer) to stop, then
1753  * wait till that is done.
1754  *
1755  * \param[in] d         OSP device
1756  */
1757 void osp_precreate_fini(struct osp_device *d)
1758 {
1759         ENTRY;
1760
1761         if (d->opd_pre == NULL)
1762                 RETURN_EXIT;
1763
1764         OBD_FREE_PTR(d->opd_pre);
1765         d->opd_pre = NULL;
1766
1767         EXIT;
1768 }
1769
1770 int osp_init_statfs(struct osp_device *d)
1771 {
1772         struct task_struct      *task;
1773         struct opt_args         *args;
1774         DECLARE_COMPLETION_ONSTACK(started);
1775         int                     rc;
1776
1777         ENTRY;
1778
1779         spin_lock_init(&d->opd_pre_lock);
1780         init_waitqueue_head(&d->opd_pre_waitq);
1781
1782         /*
1783          * Initialize statfs-related things
1784          */
1785         d->opd_statfs_maxage = 5; /* defaultupdate interval */
1786         d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(),
1787                                                 1000 * NSEC_PER_SEC);
1788         CDEBUG(D_OTHER, "current %lldns, fresh till %lldns\n",
1789                ktime_get_ns(),
1790                ktime_to_ns(d->opd_statfs_fresh_till));
1791         cfs_timer_setup(&d->opd_statfs_timer, osp_statfs_timer_cb,
1792                         (unsigned long)d, 0);
1793
1794         if (d->opd_storage->dd_rdonly)
1795                 RETURN(0);
1796
1797         OBD_ALLOC_PTR(args);
1798         if (!args)
1799                 RETURN(0);
1800         args->opta_dev = d;
1801         args->opta_started = &started;
1802         rc = lu_env_init(&args->opta_env,
1803                          d->opd_dt_dev.dd_lu_dev.ld_type->ldt_ctx_tags);
1804         if (rc) {
1805                 CERROR("%s: init env error: rc = %d\n", d->opd_obd->obd_name,
1806                        rc);
1807                 OBD_FREE_PTR(args);
1808                 RETURN(0);
1809         }
1810
1811         /*
1812          * start thread handling precreation and statfs updates
1813          */
1814         task = kthread_create(osp_precreate_thread, args,
1815                               "osp-pre-%u-%u", d->opd_index, d->opd_group);
1816         if (IS_ERR(task)) {
1817                 CERROR("can't start precreate thread %ld\n", PTR_ERR(task));
1818                 lu_env_fini(&args->opta_env);
1819                 OBD_FREE_PTR(args);
1820                 RETURN(PTR_ERR(task));
1821         }
1822         d->opd_pre_task = task;
1823         wake_up_process(task);
1824         wait_for_completion(&started);
1825
1826         RETURN(0);
1827 }
1828
1829 void osp_statfs_fini(struct osp_device *d)
1830 {
1831         struct task_struct *task = d->opd_pre_task;
1832         ENTRY;
1833
1834         del_timer(&d->opd_statfs_timer);
1835
1836         d->opd_pre_task = NULL;
1837         if (task)
1838                 kthread_stop(task);
1839
1840         EXIT;
1841 }