Whamcloud - gitweb
LU-10805 libcfs: timer_setup() API changes
[fs/lustre-release.git] / lustre / osp / osp_precreate.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/osp/osp_precreate.c
33  *
34  * Lustre OST Proxy Device
35  *
36  * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
37  * Author: Mikhail Pershin <mike.pershin@intel.com>
38  * Author: Di Wang <di.wang@intel.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_MDS
42
43 #include <linux/kthread.h>
44
45 #include <lustre_obdo.h>
46
47 #include "osp_internal.h"
48
49 /*
50  * there are two specific states to take care about:
51  *
52  * = import is disconnected =
53  *
54  * = import is inactive =
55  *   in this case osp_declare_create() returns an error
56  *
57  */
58
59 /*
60  **
61  * Check whether statfs data is expired
62  *
63  * OSP device caches statfs data for the target, the function checks
64  * whether the data is expired or not.
65  *
66  * \param[in] d         OSP device
67  *
68  * \retval              0 - not expired, 1 - expired
69  */
70 static inline int osp_statfs_need_update(struct osp_device *d)
71 {
72         return !ktime_before(ktime_get(), d->opd_statfs_fresh_till);
73 }
74
75 /*
76  * OSP tries to maintain pool of available objects so that calls to create
77  * objects don't block most of time
78  *
79  * each time OSP gets connected to OST, we should start from precreation cleanup
80  */
81 static inline bool osp_precreate_running(struct osp_device *d)
82 {
83         return !!(d->opd_pre_thread.t_flags & SVC_RUNNING);
84 }
85
86 static inline bool osp_precreate_stopped(struct osp_device *d)
87 {
88         return !!(d->opd_pre_thread.t_flags & SVC_STOPPED);
89 }
90
91 static void osp_statfs_timer_cb(cfs_timer_cb_arg_t data)
92 {
93         struct osp_device *d = cfs_from_timer(d, data, opd_statfs_timer);
94
95         LASSERT(d);
96         if (d->opd_pre != NULL && osp_precreate_running(d))
97                 wake_up(&d->opd_pre_waitq);
98 }
99
100 /**
101  * RPC interpret callback for OST_STATFS RPC
102  *
103  * An interpretation callback called by ptlrpc for OST_STATFS RPC when it is
104  * replied by the target. It's used to maintain statfs cache for the target.
105  * The function fills data from the reply if successful and schedules another
106  * update.
107  *
108  * \param[in] env       LU environment provided by the caller
109  * \param[in] req       RPC replied
110  * \param[in] aa        callback data
111  * \param[in] rc        RPC result
112  *
113  * \retval 0            on success
114  * \retval negative     negated errno on error
115  */
116 static int osp_statfs_interpret(const struct lu_env *env,
117                                 struct ptlrpc_request *req,
118                                 union ptlrpc_async_args *aa, int rc)
119 {
120         struct obd_import *imp = req->rq_import;
121         struct obd_statfs *msfs;
122         struct osp_device *d;
123         u64 maxage_ns;
124
125         ENTRY;
126
127         aa = ptlrpc_req_async_args(req);
128         d = aa->pointer_arg[0];
129         LASSERT(d);
130
131         if (rc != 0)
132                 GOTO(out, rc);
133
134         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
135         if (msfs == NULL)
136                 GOTO(out, rc = -EPROTO);
137
138         d->opd_statfs = *msfs;
139
140         osp_pre_update_status(d, rc);
141
142         /* schedule next update */
143         maxage_ns = d->opd_statfs_maxage * NSEC_PER_SEC;
144         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), maxage_ns);
145         mod_timer(&d->opd_statfs_timer,
146                   jiffies + cfs_time_seconds(d->opd_statfs_maxage));
147         d->opd_statfs_update_in_progress = 0;
148
149         CDEBUG(D_CACHE, "updated statfs %p\n", d);
150
151         RETURN(0);
152 out:
153         /* couldn't update statfs, try again with a small delay */
154         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), 10 * NSEC_PER_SEC);
155         d->opd_statfs_update_in_progress = 0;
156         if (d->opd_pre != NULL && osp_precreate_running(d))
157                 wake_up(&d->opd_pre_waitq);
158
159         if (req->rq_import_generation == imp->imp_generation)
160                 CDEBUG(D_CACHE, "%s: couldn't update statfs: rc = %d\n",
161                        d->opd_obd->obd_name, rc);
162         RETURN(rc);
163 }
164
165 /**
166  * Send OST_STATFS RPC
167  *
168  * Sends OST_STATFS RPC to refresh cached statfs data for the target.
169  * Also disables scheduled updates as times OSP may need to refresh
170  * statfs data before expiration. The function doesn't block, instead
171  * an interpretation callback osp_statfs_interpret() is used.
172  *
173  * \param[in] d         OSP device
174  */
175 static int osp_statfs_update(const struct lu_env *env, struct osp_device *d)
176 {
177         u64 expire = obd_timeout * 1000 * NSEC_PER_SEC;
178         struct ptlrpc_request   *req;
179         struct obd_import       *imp;
180         union ptlrpc_async_args *aa;
181         int rc;
182
183         ENTRY;
184
185         CDEBUG(D_CACHE, "going to update statfs\n");
186
187         imp = d->opd_obd->u.cli.cl_import;
188         LASSERT(imp);
189
190         req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
191         if (req == NULL)
192                 RETURN(-ENOMEM);
193
194         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
195         if (rc) {
196                 ptlrpc_request_free(req);
197                 RETURN(rc);
198         }
199         ptlrpc_request_set_replen(req);
200         req->rq_request_portal = OST_CREATE_PORTAL;
201         ptlrpc_at_set_req_timeout(req);
202
203         req->rq_interpret_reply = (ptlrpc_interpterer_t)osp_statfs_interpret;
204         aa = ptlrpc_req_async_args(req);
205         aa->pointer_arg[0] = d;
206
207         /*
208          * no updates till reply
209          */
210         del_timer(&d->opd_statfs_timer);
211         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), expire);
212         d->opd_statfs_update_in_progress = 1;
213
214         ptlrpcd_add_req(req);
215
216         /* we still want to sync changes if no new changes are coming */
217         if (ktime_before(ktime_get(), d->opd_sync_next_commit_cb))
218                 GOTO(out, rc);
219
220         if (atomic_read(&d->opd_sync_changes)) {
221                 struct thandle *th;
222
223                 th = dt_trans_create(env, d->opd_storage);
224                 if (IS_ERR(th)) {
225                         CERROR("%s: can't sync\n", d->opd_obd->obd_name);
226                         GOTO(out, rc);
227                 }
228                 rc = dt_trans_start_local(env, d->opd_storage, th);
229                 if (rc == 0) {
230                         CDEBUG(D_OTHER, "%s: sync forced, %d changes\n",
231                                d->opd_obd->obd_name,
232                                atomic_read(&d->opd_sync_changes));
233                         osp_sync_add_commit_cb_1s(env, d, th);
234                         dt_trans_stop(env, d->opd_storage, th);
235                 }
236         }
237
238 out:
239         RETURN(0);
240 }
241
242 /**
243  * Schedule an immediate update for statfs data
244  *
245  * If cached statfs data claim no free space, but OSP has got a request to
246  * destroy an object (so release some space probably), then we may need to
247  * refresh cached statfs data sooner than planned. The function checks there
248  * is no statfs update going and schedules immediate update if so.
249  * XXX: there might be a case where removed object(s) do not add free space (empty
250  * object). If the number of such deletions is high, then we can start to update
251  * statfs too often causing a RPC storm. some throttling is needed...
252  *
253  * \param[in] d         OSP device where statfs data needs to be refreshed
254  */
255 void osp_statfs_need_now(struct osp_device *d)
256 {
257         if (!d->opd_statfs_update_in_progress) {
258                 /*
259                  * if current status is -ENOSPC (lack of free space on OST)
260                  * then we should poll OST immediately once object destroy
261                  * is replied
262                  */
263                 d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(), NSEC_PER_SEC);
264                 del_timer(&d->opd_statfs_timer);
265                 wake_up(&d->opd_pre_waitq);
266         }
267 }
268
269 /**
270  * Return number of precreated objects
271  *
272  * A simple helper to calculate the number of precreated objects on the device.
273  *
274  * \param[in] env       LU environment provided by the caller
275  * \param[in] osp       OSP device
276  *
277  * \retval              the number of the precreated objects
278  */
279 static inline int osp_objs_precreated(const struct lu_env *env,
280                                       struct osp_device *osp)
281 {
282         return osp_fid_diff(&osp->opd_pre_last_created_fid,
283                             &osp->opd_pre_used_fid);
284 }
285
286 /**
287  * Check pool of precreated objects is nearly empty
288  *
289  * We should not wait till the pool of the precreated objects is exhausted,
290  * because then there will be a long period of OSP being unavailable for the
291  * new creations due to lenghty precreate RPC. Instead we ask for another
292  * precreation ahead and hopefully have it ready before the current pool is
293  * empty. Notice this function relies on an external locking.
294  *
295  * \param[in] env       LU environment provided by the caller
296  * \param[in] d         OSP device
297  *
298  * \retval              0 - current pool is good enough, 1 - time to precreate
299  */
300 static inline int osp_precreate_near_empty_nolock(const struct lu_env *env,
301                                                   struct osp_device *d)
302 {
303         int window = osp_objs_precreated(env, d);
304
305         /* don't consider new precreation till OST is healty and
306          * has free space */
307         return ((window - d->opd_pre_reserved < d->opd_pre_create_count / 2) &&
308                 (d->opd_pre_status == 0));
309 }
310
311 /**
312  * Check pool of precreated objects
313  *
314  * This is protected version of osp_precreate_near_empty_nolock(), check that
315  * for the details.
316  *
317  * \param[in] env       LU environment provided by the caller
318  * \param[in] d         OSP device
319  *
320  * \retval              0 - current pool is good enough, 1 - time to precreate
321  */
322 static inline int osp_precreate_near_empty(const struct lu_env *env,
323                                            struct osp_device *d)
324 {
325         int rc;
326
327         /* XXX: do we really need locking here? */
328         spin_lock(&d->opd_pre_lock);
329         rc = osp_precreate_near_empty_nolock(env, d);
330         spin_unlock(&d->opd_pre_lock);
331         return rc;
332 }
333
334 /**
335  * Check given sequence is empty
336  *
337  * Returns a binary result whether the given sequence has some IDs left
338  * or not. Find the details in osp_fid_end_seq(). This is a lock protected
339  * version of that function.
340  *
341  * \param[in] env       LU environment provided by the caller
342  * \param[in] osp       OSP device
343  *
344  * \retval              0 - current sequence has no IDs, 1 - otherwise
345  */
346 static inline int osp_create_end_seq(const struct lu_env *env,
347                                      struct osp_device *osp)
348 {
349         struct lu_fid *fid = &osp->opd_pre_used_fid;
350         int rc;
351
352         spin_lock(&osp->opd_pre_lock);
353         rc = osp_fid_end_seq(env, fid);
354         spin_unlock(&osp->opd_pre_lock);
355         return rc;
356 }
357
358 /**
359  * Write FID into into last_oid/last_seq file
360  *
361  * The function stores the sequence and the in-sequence id into two dedicated
362  * files. The sync argument can be used to request synchronous commit, so the
363  * function won't return until the updates are committed.
364  *
365  * \param[in] env       LU environment provided by the caller
366  * \param[in] osp       OSP device
367  * \param[in] fid       fid where sequence/id is taken
368  * \param[in] sync      update mode: 0 - asynchronously, 1 - synchronously
369  *
370  * \retval 0            on success
371  * \retval negative     negated errno on error
372  **/
373 int osp_write_last_oid_seq_files(struct lu_env *env, struct osp_device *osp,
374                                  struct lu_fid *fid, int sync)
375 {
376         struct osp_thread_info  *oti = osp_env_info(env);
377         struct lu_buf      *lb_oid = &oti->osi_lb;
378         struct lu_buf      *lb_oseq = &oti->osi_lb2;
379         loff_t             oid_off;
380         loff_t             oseq_off;
381         struct thandle    *th;
382         int                   rc;
383         ENTRY;
384
385         if (osp->opd_storage->dd_rdonly)
386                 RETURN(0);
387
388         /* Note: through f_oid is only 32 bits, it will also write 64 bits
389          * for oid to keep compatibility with the previous version. */
390         lb_oid->lb_buf = &fid->f_oid;
391         lb_oid->lb_len = sizeof(u64);
392         oid_off = sizeof(u64) * osp->opd_index;
393
394         lb_oseq->lb_buf = &fid->f_seq;
395         lb_oseq->lb_len = sizeof(u64);
396         oseq_off = sizeof(u64) * osp->opd_index;
397
398         th = dt_trans_create(env, osp->opd_storage);
399         if (IS_ERR(th))
400                 RETURN(PTR_ERR(th));
401
402         th->th_sync |= sync;
403         rc = dt_declare_record_write(env, osp->opd_last_used_oid_file,
404                                      lb_oid, oid_off, th);
405         if (rc != 0)
406                 GOTO(out, rc);
407
408         rc = dt_declare_record_write(env, osp->opd_last_used_seq_file,
409                                      lb_oseq, oseq_off, th);
410         if (rc != 0)
411                 GOTO(out, rc);
412
413         rc = dt_trans_start_local(env, osp->opd_storage, th);
414         if (rc != 0)
415                 GOTO(out, rc);
416
417         rc = dt_record_write(env, osp->opd_last_used_oid_file, lb_oid,
418                              &oid_off, th);
419         if (rc != 0) {
420                 CERROR("%s: can not write to last seq file: rc = %d\n",
421                         osp->opd_obd->obd_name, rc);
422                 GOTO(out, rc);
423         }
424         rc = dt_record_write(env, osp->opd_last_used_seq_file, lb_oseq,
425                              &oseq_off, th);
426         if (rc) {
427                 CERROR("%s: can not write to last seq file: rc = %d\n",
428                         osp->opd_obd->obd_name, rc);
429                 GOTO(out, rc);
430         }
431 out:
432         dt_trans_stop(env, osp->opd_storage, th);
433         RETURN(rc);
434 }
435
436 /**
437  * Switch to another sequence
438  *
439  * When a current sequence has no available IDs left, OSP has to switch to
440  * another new sequence. OSP requests it using the regular FLDB protocol
441  * and stores synchronously before that is used in precreated. This is needed
442  * to basically have the sequences referenced (not orphaned), otherwise it's
443  * possible that OST has some objects precreated and the clients have data
444  * written to it, but after MDT failover nobody refers those objects and OSP
445  * has no idea that the sequence need cleanup to be done.
446  * While this is very expensive operation, it's supposed to happen very very
447  * infrequently because sequence has 2^32 or 2^48 objects (depending on type)
448  *
449  * \param[in] env       LU environment provided by the caller
450  * \param[in] osp       OSP device
451  *
452  * \retval 0            on success
453  * \retval negative     negated errno on error
454  */
455 static int osp_precreate_rollover_new_seq(struct lu_env *env,
456                                           struct osp_device *osp)
457 {
458         struct lu_fid   *fid = &osp_env_info(env)->osi_fid;
459         struct lu_fid   *last_fid = &osp->opd_last_used_fid;
460         int             rc;
461         ENTRY;
462
463         rc = seq_client_get_seq(env, osp->opd_obd->u.cli.cl_seq, &fid->f_seq);
464         if (rc != 0) {
465                 CERROR("%s: alloc fid error: rc = %d\n",
466                        osp->opd_obd->obd_name, rc);
467                 RETURN(rc);
468         }
469
470         fid->f_oid = 1;
471         fid->f_ver = 0;
472         LASSERTF(fid_seq(fid) != fid_seq(last_fid),
473                  "fid "DFID", last_fid "DFID"\n", PFID(fid),
474                  PFID(last_fid));
475
476         rc = osp_write_last_oid_seq_files(env, osp, fid, 1);
477         if (rc != 0) {
478                 CERROR("%s: Can not update oid/seq file: rc = %d\n",
479                        osp->opd_obd->obd_name, rc);
480                 RETURN(rc);
481         }
482
483         LCONSOLE_INFO("%s: update sequence from %#llx to %#llx\n",
484                       osp->opd_obd->obd_name, fid_seq(last_fid),
485                       fid_seq(fid));
486         /* Update last_xxx to the new seq */
487         spin_lock(&osp->opd_pre_lock);
488         osp->opd_last_used_fid = *fid;
489         osp->opd_gap_start_fid = *fid;
490         osp->opd_pre_used_fid = *fid;
491         osp->opd_pre_last_created_fid = *fid;
492         spin_unlock(&osp->opd_pre_lock);
493
494         RETURN(rc);
495 }
496
497 /**
498  * Find IDs available in current sequence
499  *
500  * The function calculates the highest possible ID and the number of IDs
501  * available in the current sequence OSP is using. The number is limited
502  * artifically by the caller (grow param) and the number of IDs available
503  * in the sequence by nature. The function doesn't require an external
504  * locking.
505  *
506  * \param[in] env       LU environment provided by the caller
507  * \param[in] osp       OSP device
508  * \param[in] fid       FID the caller wants to start with
509  * \param[in] grow      how many the caller wants
510  * \param[out] fid      the highest calculated FID
511  * \param[out] grow     the number of available IDs calculated
512  *
513  * \retval              0 on success, 1 - the sequence is empty
514  */
515 static int osp_precreate_fids(const struct lu_env *env, struct osp_device *osp,
516                               struct lu_fid *fid, int *grow)
517 {
518         struct osp_thread_info  *osi = osp_env_info(env);
519         __u64                   end;
520         int                     i = 0;
521
522         if (fid_is_idif(fid)) {
523                 struct lu_fid   *last_fid;
524                 struct ost_id   *oi = &osi->osi_oi;
525                 int rc;
526
527                 spin_lock(&osp->opd_pre_lock);
528                 last_fid = &osp->opd_pre_last_created_fid;
529                 fid_to_ostid(last_fid, oi);
530                 end = min(ostid_id(oi) + *grow, IDIF_MAX_OID);
531                 *grow = end - ostid_id(oi);
532                 rc = ostid_set_id(oi, ostid_id(oi) + *grow);
533                 spin_unlock(&osp->opd_pre_lock);
534
535                 if (*grow == 0 || rc)
536                         return 1;
537
538                 ostid_to_fid(fid, oi, osp->opd_index);
539                 return 0;
540         }
541
542         spin_lock(&osp->opd_pre_lock);
543         *fid = osp->opd_pre_last_created_fid;
544         end = fid->f_oid;
545         end = min((end + *grow), (__u64)LUSTRE_DATA_SEQ_MAX_WIDTH);
546         *grow = end - fid->f_oid;
547         fid->f_oid += end - fid->f_oid;
548         spin_unlock(&osp->opd_pre_lock);
549
550         CDEBUG(D_INFO, "Expect %d, actual %d ["DFID" -- "DFID"]\n",
551                *grow, i, PFID(fid), PFID(&osp->opd_pre_last_created_fid));
552
553         return *grow > 0 ? 0 : 1;
554 }
555
556 /**
557  * Prepare and send precreate RPC
558  *
559  * The function finds how many objects should be precreated.  Then allocates,
560  * prepares and schedules precreate RPC synchronously. Upon reply the function
561  * wake ups the threads waiting for the new objects on this target. If the
562  * target wasn't able to create all the objects requested, then the next
563  * precreate will be asking less objects (i.e. slow precreate down).
564  *
565  * \param[in] env       LU environment provided by the caller
566  * \param[in] d         OSP device
567  *
568  * \retval 0            on success
569  * \retval negative     negated errno on error
570  **/
571 static int osp_precreate_send(const struct lu_env *env, struct osp_device *d)
572 {
573         struct osp_thread_info  *oti = osp_env_info(env);
574         struct ptlrpc_request   *req;
575         struct obd_import       *imp;
576         struct ost_body         *body;
577         int                      rc, grow, diff;
578         struct lu_fid           *fid = &oti->osi_fid;
579         ENTRY;
580
581         /* don't precreate new objects till OST healthy and has free space */
582         if (unlikely(d->opd_pre_status)) {
583                 CDEBUG(D_INFO, "%s: don't send new precreate: rc = %d\n",
584                        d->opd_obd->obd_name, d->opd_pre_status);
585                 RETURN(0);
586         }
587
588         /*
589          * if not connection/initialization is compeleted, ignore
590          */
591         imp = d->opd_obd->u.cli.cl_import;
592         LASSERT(imp);
593
594         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
595         if (req == NULL)
596                 RETURN(-ENOMEM);
597         req->rq_request_portal = OST_CREATE_PORTAL;
598         /* we should not resend create request - anyway we will have delorphan
599          * and kill these objects */
600         req->rq_no_delay = req->rq_no_resend = 1;
601
602         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
603         if (rc) {
604                 ptlrpc_request_free(req);
605                 RETURN(rc);
606         }
607
608         spin_lock(&d->opd_pre_lock);
609         if (d->opd_pre_create_count > d->opd_pre_max_create_count / 2)
610                 d->opd_pre_create_count = d->opd_pre_max_create_count / 2;
611         grow = d->opd_pre_create_count;
612         spin_unlock(&d->opd_pre_lock);
613
614         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
615         LASSERT(body);
616
617         *fid = d->opd_pre_last_created_fid;
618         rc = osp_precreate_fids(env, d, fid, &grow);
619         if (rc == 1) {
620                 /* Current seq has been used up*/
621                 if (!osp_is_fid_client(d)) {
622                         osp_pre_update_status(d, -ENOSPC);
623                         rc = -ENOSPC;
624                 }
625                 wake_up(&d->opd_pre_waitq);
626                 GOTO(out_req, rc);
627         }
628
629         if (!osp_is_fid_client(d)) {
630                 /* Non-FID client will always send seq 0 because of
631                  * compatiblity */
632                 LASSERTF(fid_is_idif(fid), "Invalid fid "DFID"\n", PFID(fid));
633                 fid->f_seq = 0;
634         }
635
636         fid_to_ostid(fid, &body->oa.o_oi);
637         body->oa.o_valid = OBD_MD_FLGROUP;
638
639         ptlrpc_request_set_replen(req);
640
641         if (OBD_FAIL_CHECK(OBD_FAIL_OSP_FAKE_PRECREATE))
642                 GOTO(ready, rc = 0);
643
644         rc = ptlrpc_queue_wait(req);
645         if (rc) {
646                 CERROR("%s: can't precreate: rc = %d\n", d->opd_obd->obd_name,
647                        rc);
648                 GOTO(out_req, rc);
649         }
650         LASSERT(req->rq_transno == 0);
651
652         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
653         if (body == NULL)
654                 GOTO(out_req, rc = -EPROTO);
655
656         ostid_to_fid(fid, &body->oa.o_oi, d->opd_index);
657
658 ready:
659         if (osp_fid_diff(fid, &d->opd_pre_used_fid) <= 0) {
660                 CERROR("%s: precreate fid "DFID" < local used fid "DFID
661                        ": rc = %d\n", d->opd_obd->obd_name,
662                        PFID(fid), PFID(&d->opd_pre_used_fid), -ESTALE);
663                 GOTO(out_req, rc = -ESTALE);
664         }
665
666         diff = osp_fid_diff(fid, &d->opd_pre_last_created_fid);
667
668         spin_lock(&d->opd_pre_lock);
669         if (diff < grow) {
670                 /* the OST has not managed to create all the
671                  * objects we asked for */
672                 d->opd_pre_create_count = max(diff, OST_MIN_PRECREATE);
673                 d->opd_pre_create_slow = 1;
674         } else {
675                 /* the OST is able to keep up with the work,
676                  * we could consider increasing create_count
677                  * next time if needed */
678                 d->opd_pre_create_slow = 0;
679         }
680
681         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
682         fid_to_ostid(fid, &body->oa.o_oi);
683
684         d->opd_pre_last_created_fid = *fid;
685         spin_unlock(&d->opd_pre_lock);
686
687         CDEBUG(D_HA, "%s: current precreated pool: "DFID"-"DFID"\n",
688                d->opd_obd->obd_name, PFID(&d->opd_pre_used_fid),
689                PFID(&d->opd_pre_last_created_fid));
690 out_req:
691         /* now we can wakeup all users awaiting for objects */
692         osp_pre_update_status(d, rc);
693         wake_up(&d->opd_pre_user_waitq);
694
695         ptlrpc_req_finished(req);
696         RETURN(rc);
697 }
698
699 /**
700  * Get last precreated object from target (OST)
701  *
702  * Sends synchronous RPC to the target (OST) to learn the last precreated
703  * object. This later is used to remove all unused objects (cleanup orphan
704  * procedure). Also, the next object after one we got will be used as a
705  * starting point for the new precreates.
706  *
707  * \param[in] env       LU environment provided by the caller
708  * \param[in] d         OSP device
709  *
710  * \retval 0            on success
711  * \retval negative     negated errno on error
712  **/
713 static int osp_get_lastfid_from_ost(const struct lu_env *env,
714                                     struct osp_device *d)
715 {
716         struct ptlrpc_request   *req = NULL;
717         struct obd_import       *imp;
718         struct lu_fid           *last_fid;
719         char                    *tmp;
720         int                     rc;
721         ENTRY;
722
723         imp = d->opd_obd->u.cli.cl_import;
724         LASSERT(imp);
725
726         req = ptlrpc_request_alloc(imp, &RQF_OST_GET_INFO_LAST_FID);
727         if (req == NULL)
728                 RETURN(-ENOMEM);
729
730         req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY, RCL_CLIENT,
731                              sizeof(KEY_LAST_FID));
732
733         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
734         if (rc) {
735                 ptlrpc_request_free(req);
736                 RETURN(rc);
737         }
738
739         tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_KEY);
740         memcpy(tmp, KEY_LAST_FID, sizeof(KEY_LAST_FID));
741
742         req->rq_no_delay = req->rq_no_resend = 1;
743         last_fid = req_capsule_client_get(&req->rq_pill, &RMF_FID);
744         fid_cpu_to_le(last_fid, &d->opd_last_used_fid);
745
746         ptlrpc_request_set_replen(req);
747
748         rc = ptlrpc_queue_wait(req);
749         if (rc) {
750                 /* bad-bad OST.. let sysadm sort this out */
751                 if (rc == -ENOTSUPP) {
752                         CERROR("%s: server does not support FID: rc = %d\n",
753                                d->opd_obd->obd_name, -ENOTSUPP);
754                 }
755                 ptlrpc_set_import_active(imp, 0);
756                 GOTO(out, rc);
757         }
758
759         last_fid = req_capsule_server_get(&req->rq_pill, &RMF_FID);
760         if (last_fid == NULL) {
761                 CERROR("%s: Got last_fid failed.\n", d->opd_obd->obd_name);
762                 GOTO(out, rc = -EPROTO);
763         }
764
765         if (!fid_is_sane(last_fid)) {
766                 CERROR("%s: Got insane last_fid "DFID"\n",
767                        d->opd_obd->obd_name, PFID(last_fid));
768                 GOTO(out, rc = -EPROTO);
769         }
770
771         /* Only update the last used fid, if the OST has objects for
772          * this sequence, i.e. fid_oid > 0 */
773         if (fid_oid(last_fid) > 0)
774                 d->opd_last_used_fid = *last_fid;
775
776         CDEBUG(D_HA, "%s: Got last_fid "DFID"\n", d->opd_obd->obd_name,
777                PFID(last_fid));
778
779 out:
780         ptlrpc_req_finished(req);
781         RETURN(rc);
782 }
783
784 /**
785  * Cleanup orphans on OST
786  *
787  * This function is called in a contex of a dedicated thread handling
788  * all the precreation suff. The function waits till local recovery
789  * is complete, then identify all the unreferenced objects (orphans)
790  * using the highest ID referenced by a local and the highest object
791  * precreated by the target. The found range is a subject to removal
792  * using specially flagged RPC. During this process OSP is marked
793  * unavailable for new objects.
794  *
795  * \param[in] env       LU environment provided by the caller
796  * \param[in] d         OSP device
797  *
798  * \retval 0            on success
799  * \retval negative     negated errno on error
800  */
801 static int osp_precreate_cleanup_orphans(struct lu_env *env,
802                                          struct osp_device *d)
803 {
804         struct osp_thread_info  *osi = osp_env_info(env);
805         struct lu_fid           *last_fid = &osi->osi_fid;
806         struct ptlrpc_request   *req = NULL;
807         struct obd_import       *imp;
808         struct ost_body         *body;
809         struct l_wait_info       lwi = { 0 };
810         int                      update_status = 0;
811         int                      rc;
812         int                      diff;
813
814         ENTRY;
815
816         /*
817          * wait for local recovery to finish, so we can cleanup orphans
818          * orphans are all objects since "last used" (assigned), but
819          * there might be objects reserved and in some cases they won't
820          * be used. we can't cleanup them till we're sure they won't be
821          * used. also can't we allow new reservations because they may
822          * end up getting orphans being cleaned up below. so we block
823          * new reservations and wait till all reserved objects either
824          * user or released.
825          */
826         spin_lock(&d->opd_pre_lock);
827         d->opd_pre_recovering = 1;
828         spin_unlock(&d->opd_pre_lock);
829         /*
830          * The locking above makes sure the opd_pre_reserved check below will
831          * catch all osp_precreate_reserve() calls who find
832          * "!opd_pre_recovering".
833          */
834         l_wait_event(d->opd_pre_waitq,
835                      (!d->opd_pre_reserved && d->opd_recovery_completed) ||
836                      !osp_precreate_running(d) || d->opd_got_disconnected,
837                      &lwi);
838         if (!osp_precreate_running(d) || d->opd_got_disconnected)
839                 GOTO(out, rc = -EAGAIN);
840
841         CDEBUG(D_HA, "%s: going to cleanup orphans since "DFID"\n",
842                d->opd_obd->obd_name, PFID(&d->opd_last_used_fid));
843
844         *last_fid = d->opd_last_used_fid;
845         /* The OSP should already get the valid seq now */
846         LASSERT(!fid_is_zero(last_fid));
847         if (fid_oid(&d->opd_last_used_fid) < 2) {
848                 /* lastfid looks strange... ask OST */
849                 rc = osp_get_lastfid_from_ost(env, d);
850                 if (rc)
851                         GOTO(out, rc);
852         }
853
854         imp = d->opd_obd->u.cli.cl_import;
855         LASSERT(imp);
856
857         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
858         if (req == NULL)
859                 GOTO(out, rc = -ENOMEM);
860
861         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
862         if (rc) {
863                 ptlrpc_request_free(req);
864                 req = NULL;
865                 GOTO(out, rc);
866         }
867
868         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
869         if (body == NULL)
870                 GOTO(out, rc = -EPROTO);
871
872         body->oa.o_flags = OBD_FL_DELORPHAN;
873         body->oa.o_valid = OBD_MD_FLFLAGS | OBD_MD_FLGROUP;
874
875         fid_to_ostid(&d->opd_last_used_fid, &body->oa.o_oi);
876
877         ptlrpc_request_set_replen(req);
878
879         /* Don't resend the delorphan req */
880         req->rq_no_resend = req->rq_no_delay = 1;
881
882         rc = ptlrpc_queue_wait(req);
883         if (rc) {
884                 update_status = 1;
885                 GOTO(out, rc);
886         }
887
888         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
889         if (body == NULL)
890                 GOTO(out, rc = -EPROTO);
891
892         /*
893          * OST provides us with id new pool starts from in body->oa.o_id
894          */
895         ostid_to_fid(last_fid, &body->oa.o_oi, d->opd_index);
896
897         spin_lock(&d->opd_pre_lock);
898         diff = osp_fid_diff(&d->opd_last_used_fid, last_fid);
899         if (diff > 0) {
900                 d->opd_pre_create_count = OST_MIN_PRECREATE + diff;
901                 d->opd_pre_last_created_fid = d->opd_last_used_fid;
902         } else {
903                 d->opd_pre_create_count = OST_MIN_PRECREATE;
904                 d->opd_pre_last_created_fid = *last_fid;
905         }
906         /*
907          * This empties the pre-creation pool and effectively blocks any new
908          * reservations.
909          */
910         LASSERT(fid_oid(&d->opd_pre_last_created_fid) <=
911                 LUSTRE_DATA_SEQ_MAX_WIDTH);
912         d->opd_pre_used_fid = d->opd_pre_last_created_fid;
913         d->opd_pre_create_slow = 0;
914         spin_unlock(&d->opd_pre_lock);
915
916         CDEBUG(D_HA, "%s: Got last_id "DFID" from OST, last_created "DFID
917                "last_used is "DFID"\n", d->opd_obd->obd_name, PFID(last_fid),
918                PFID(&d->opd_pre_last_created_fid), PFID(&d->opd_last_used_fid));
919 out:
920         if (req)
921                 ptlrpc_req_finished(req);
922
923         /*
924          * If rc is zero, the pre-creation window should have been emptied.
925          * Since waking up the herd would be useless without pre-created
926          * objects, we defer the signal to osp_precreate_send() in that case.
927          */
928         if (rc != 0) {
929                 if (update_status) {
930                         CERROR("%s: cannot cleanup orphans: rc = %d\n",
931                                d->opd_obd->obd_name, rc);
932                         /* we can't proceed from here, OST seem to
933                          * be in a bad shape, better to wait for
934                          * a new instance of the server and repeat
935                          * from the beginning. notify possible waiters
936                          * this OSP isn't quite functional yet */
937                         osp_pre_update_status(d, rc);
938                 } else {
939                         wake_up(&d->opd_pre_user_waitq);
940                 }
941         } else {
942                 spin_lock(&d->opd_pre_lock);
943                 d->opd_pre_recovering = 0;
944                 spin_unlock(&d->opd_pre_lock);
945         }
946
947         RETURN(rc);
948 }
949
950 /**
951  * Update precreate status using statfs data
952  *
953  * The function decides whether this OSP should be used for new objects.
954  * IOW, whether this OST is used up or has some free space. Cached statfs
955  * data is used to make this decision. If the latest result of statfs
956  * request (rc argument) is not success, then just mark OSP unavailable
957  * right away.
958
959  * Add a bit of hysteresis so this flag isn't continually flapping,
960  * and ensure that new files don't get extremely fragmented due to
961  * only a small amount of available space in the filesystem.
962  * We want to set the ENOSPC when there is less than reserved size
963  * free and clear it when there is at least 2*reserved size free space.
964  * the function updates current precreation status used: functional or not
965  *
966  * \param[in] d         OSP device
967  * \param[in] rc        new precreate status for device \a d
968  *
969  * \retval 0            on success
970  * \retval negative     negated errno on error
971  */
972 void osp_pre_update_status(struct osp_device *d, int rc)
973 {
974         struct obd_statfs       *msfs = &d->opd_statfs;
975         int                      old = d->opd_pre_status;
976         __u64                    available;
977
978         d->opd_pre_status = rc;
979         if (rc)
980                 goto out;
981
982         if (likely(msfs->os_type)) {
983                 if (unlikely(d->opd_reserved_mb_high == 0 &&
984                              d->opd_reserved_mb_low == 0)) {
985                         /* Use ~0.1% by default to disable object allocation,
986                          * and ~0.2% to enable, size in MB, set both watermark
987                          */
988                         spin_lock(&d->opd_pre_lock);
989                         if (d->opd_reserved_mb_high == 0 &&
990                             d->opd_reserved_mb_low == 0) {
991                                 d->opd_reserved_mb_low =
992                                         ((msfs->os_bsize >> 10) *
993                                         msfs->os_blocks) >> 20;
994                                 if (d->opd_reserved_mb_low == 0)
995                                         d->opd_reserved_mb_low = 1;
996                                 d->opd_reserved_mb_high =
997                                         (d->opd_reserved_mb_low << 1) + 1;
998                         }
999                         spin_unlock(&d->opd_pre_lock);
1000                 }
1001                 /* in MB */
1002                 available = (msfs->os_bavail * (msfs->os_bsize >> 10)) >> 10;
1003                 if (msfs->os_ffree < 32)
1004                         msfs->os_state |= OS_STATE_ENOINO;
1005                 else if (msfs->os_ffree > 64)
1006                         msfs->os_state &= ~OS_STATE_ENOINO;
1007
1008                 if (available < d->opd_reserved_mb_low)
1009                         msfs->os_state |= OS_STATE_ENOSPC;
1010                 else if (available > d->opd_reserved_mb_high)
1011                         msfs->os_state &= ~OS_STATE_ENOSPC;
1012                 if (msfs->os_state & (OS_STATE_ENOINO | OS_STATE_ENOSPC)) {
1013                         d->opd_pre_status = -ENOSPC;
1014                         if (old != -ENOSPC)
1015                                 CDEBUG(D_INFO, "%s: status: %llu blocks, %llu "
1016                                        "free, %llu avail, %llu MB avail, %u "
1017                                        "hwm -> %d: rc = %d\n",
1018                                        d->opd_obd->obd_name, msfs->os_blocks,
1019                                        msfs->os_bfree, msfs->os_bavail,
1020                                        available, d->opd_reserved_mb_high,
1021                                        d->opd_pre_status, rc);
1022                         CDEBUG(D_INFO,
1023                                "non-committed changes: %u, in progress: %u\n",
1024                                atomic_read(&d->opd_sync_changes),
1025                                atomic_read(&d->opd_sync_rpcs_in_progress));
1026                 } else if (unlikely(old == -ENOSPC)) {
1027                         d->opd_pre_status = 0;
1028                         spin_lock(&d->opd_pre_lock);
1029                         d->opd_pre_create_slow = 0;
1030                         d->opd_pre_create_count = OST_MIN_PRECREATE;
1031                         spin_unlock(&d->opd_pre_lock);
1032                         wake_up(&d->opd_pre_waitq);
1033
1034                         CDEBUG(D_INFO, "%s: space available: %llu blocks, %llu"
1035                                " free, %llu avail, %lluMB avail, %u lwm"
1036                                " -> %d: rc = %d\n", d->opd_obd->obd_name,
1037                                msfs->os_blocks, msfs->os_bfree, msfs->os_bavail,
1038                                available, d->opd_reserved_mb_low,
1039                                d->opd_pre_status, rc);
1040                 }
1041         }
1042 out:
1043         wake_up(&d->opd_pre_user_waitq);
1044 }
1045
1046 /**
1047  * Initialize FID for precreation
1048  *
1049  * For a just created new target, a new sequence should be taken.
1050  * The function checks there is no IDIF in use (if the target was
1051  * added with the older version of Lustre), then requests a new
1052  * sequence from FLDB using the regular protocol. Then this new
1053  * sequence is stored on a persisten storage synchronously to prevent
1054  * possible object leakage (for the detail see the description for
1055  * osp_precreate_rollover_new_seq()).
1056  *
1057  * \param[in] osp       OSP device
1058  *
1059  * \retval 0            on success
1060  * \retval negative     negated errno on error
1061  */
1062 int osp_init_pre_fid(struct osp_device *osp)
1063 {
1064         struct lu_env           env;
1065         struct osp_thread_info  *osi;
1066         struct lu_client_seq    *cli_seq;
1067         struct lu_fid           *last_fid;
1068         int                     rc;
1069         ENTRY;
1070
1071         LASSERT(osp->opd_pre != NULL);
1072
1073         /* Let's check if the current last_seq/fid is valid,
1074          * otherwise request new sequence from the controller */
1075         if (osp_is_fid_client(osp) && osp->opd_group != 0) {
1076                 /* Non-MDT0 can only use normal sequence for
1077                  * OST objects */
1078                 if (fid_is_norm(&osp->opd_last_used_fid))
1079                         RETURN(0);
1080         } else {
1081                 /* Initially MDT0 will start with IDIF, after
1082                  * that it will request new sequence from the
1083                  * controller */
1084                 if (fid_is_idif(&osp->opd_last_used_fid) ||
1085                     fid_is_norm(&osp->opd_last_used_fid))
1086                         RETURN(0);
1087         }
1088
1089         if (!fid_is_zero(&osp->opd_last_used_fid))
1090                 CWARN("%s: invalid last used fid "DFID
1091                       ", try to get new sequence.\n",
1092                       osp->opd_obd->obd_name,
1093                       PFID(&osp->opd_last_used_fid));
1094
1095         rc = lu_env_init(&env, osp->opd_dt_dev.dd_lu_dev.ld_type->ldt_ctx_tags);
1096         if (rc) {
1097                 CERROR("%s: init env error: rc = %d\n",
1098                        osp->opd_obd->obd_name, rc);
1099                 RETURN(rc);
1100         }
1101
1102         osi = osp_env_info(&env);
1103         last_fid = &osi->osi_fid;
1104         fid_zero(last_fid);
1105         /* For a freshed fs, it will allocate a new sequence first */
1106         if (osp_is_fid_client(osp) && osp->opd_group != 0) {
1107                 cli_seq = osp->opd_obd->u.cli.cl_seq;
1108                 rc = seq_client_get_seq(&env, cli_seq, &last_fid->f_seq);
1109                 if (rc != 0) {
1110                         CERROR("%s: alloc fid error: rc = %d\n",
1111                                osp->opd_obd->obd_name, rc);
1112                         GOTO(out, rc);
1113                 }
1114         } else {
1115                 last_fid->f_seq = fid_idif_seq(0, osp->opd_index);
1116         }
1117         last_fid->f_oid = 1;
1118         last_fid->f_ver = 0;
1119
1120         spin_lock(&osp->opd_pre_lock);
1121         osp->opd_last_used_fid = *last_fid;
1122         osp->opd_pre_used_fid = *last_fid;
1123         osp->opd_pre_last_created_fid = *last_fid;
1124         spin_unlock(&osp->opd_pre_lock);
1125         rc = osp_write_last_oid_seq_files(&env, osp, last_fid, 1);
1126         if (rc != 0) {
1127                 CERROR("%s: write fid error: rc = %d\n",
1128                        osp->opd_obd->obd_name, rc);
1129                 GOTO(out, rc);
1130         }
1131 out:
1132         lu_env_fini(&env);
1133         RETURN(rc);
1134 }
1135
1136 /**
1137  * The core of precreate functionality
1138  *
1139  * The function implements the main precreation loop. Basically it
1140  * involves connecting to the target, precerate FID initialization,
1141  * identifying and removing orphans, then serving precreation. As
1142  * part of the latter, the thread is responsible for statfs data
1143  * updates. The precreation is mostly driven by another threads
1144  * asking for new OST objects - those askers wake the thread when
1145  * the number of precreated objects reach low watermark.
1146  * After a disconnect, the sequence above repeats. This is keep going
1147  * until the thread is requested to stop.
1148  *
1149  * \param[in] _arg      private data the thread (OSP device to handle)
1150  *
1151  * \retval 0            on success
1152  * \retval negative     negated errno on error
1153  */
1154 static int osp_precreate_thread(void *_arg)
1155 {
1156         struct osp_device       *d = _arg;
1157         struct ptlrpc_thread    *thread = &d->opd_pre_thread;
1158         struct l_wait_info       lwi = { 0 };
1159         struct l_wait_info       lwi2 = LWI_TIMEOUT(cfs_time_seconds(5),
1160                                                     back_to_sleep, NULL);
1161         struct lu_env            env;
1162         int                      rc;
1163
1164         ENTRY;
1165
1166         rc = lu_env_init(&env, d->opd_dt_dev.dd_lu_dev.ld_type->ldt_ctx_tags);
1167         if (rc) {
1168                 CERROR("%s: init env error: rc = %d\n", d->opd_obd->obd_name,
1169                        rc);
1170
1171                 spin_lock(&d->opd_pre_lock);
1172                 thread->t_flags = SVC_STOPPED;
1173                 spin_unlock(&d->opd_pre_lock);
1174                 wake_up(&thread->t_ctl_waitq);
1175
1176                 RETURN(rc);
1177         }
1178
1179         spin_lock(&d->opd_pre_lock);
1180         thread->t_flags = SVC_RUNNING;
1181         spin_unlock(&d->opd_pre_lock);
1182         wake_up(&thread->t_ctl_waitq);
1183
1184         while (osp_precreate_running(d)) {
1185                 /*
1186                  * need to be connected to OST
1187                  */
1188                 while (osp_precreate_running(d)) {
1189                         if (d->opd_pre_recovering &&
1190                             d->opd_imp_connected &&
1191                             !d->opd_got_disconnected)
1192                                 break;
1193                         l_wait_event(d->opd_pre_waitq,
1194                                      !osp_precreate_running(d) ||
1195                                      d->opd_new_connection,
1196                                      &lwi);
1197
1198                         if (!d->opd_new_connection)
1199                                 continue;
1200
1201                         d->opd_new_connection = 0;
1202                         d->opd_got_disconnected = 0;
1203                         break;
1204                 }
1205
1206                 if (!osp_precreate_running(d))
1207                         break;
1208
1209                 LASSERT(d->opd_obd->u.cli.cl_seq != NULL);
1210                 /* Sigh, fid client is not ready yet */
1211                 if (d->opd_obd->u.cli.cl_seq->lcs_exp == NULL)
1212                         continue;
1213
1214                 /* Init fid for osp_precreate if necessary */
1215                 rc = osp_init_pre_fid(d);
1216                 if (rc != 0) {
1217                         class_export_put(d->opd_exp);
1218                         d->opd_obd->u.cli.cl_seq->lcs_exp = NULL;
1219                         CERROR("%s: init pre fid error: rc = %d\n",
1220                                d->opd_obd->obd_name, rc);
1221                         continue;
1222                 }
1223
1224                 if (osp_statfs_update(&env, d)) {
1225                         l_wait_event(d->opd_pre_waitq,
1226                                      !osp_precreate_running(d), &lwi2);
1227                         continue;
1228                 }
1229
1230                 /*
1231                  * Clean up orphans or recreate missing objects.
1232                  */
1233                 rc = osp_precreate_cleanup_orphans(&env, d);
1234                 if (rc != 0) {
1235                         schedule_timeout_interruptible(cfs_time_seconds(1));
1236                         continue;
1237                 }
1238                 /*
1239                  * connected, can handle precreates now
1240                  */
1241                 while (osp_precreate_running(d)) {
1242                         l_wait_event(d->opd_pre_waitq,
1243                                      !osp_precreate_running(d) ||
1244                                      osp_precreate_near_empty(&env, d) ||
1245                                      osp_statfs_need_update(d) ||
1246                                      d->opd_got_disconnected, &lwi);
1247
1248                         if (!osp_precreate_running(d))
1249                                 break;
1250
1251                         /* something happened to the connection
1252                          * have to start from the beginning */
1253                         if (d->opd_got_disconnected)
1254                                 break;
1255
1256                         if (osp_statfs_need_update(d))
1257                                 if (osp_statfs_update(&env, d))
1258                                         break;
1259
1260                         /* To avoid handling different seq in precreate/orphan
1261                          * cleanup, it will hold precreate until current seq is
1262                          * used up. */
1263                         if (unlikely(osp_precreate_end_seq(&env, d) &&
1264                             !osp_create_end_seq(&env, d)))
1265                                 continue;
1266
1267                         if (unlikely(osp_precreate_end_seq(&env, d) &&
1268                                      osp_create_end_seq(&env, d))) {
1269                                 LCONSOLE_INFO("%s:%#llx is used up."
1270                                               " Update to new seq\n",
1271                                               d->opd_obd->obd_name,
1272                                          fid_seq(&d->opd_pre_last_created_fid));
1273                                 rc = osp_precreate_rollover_new_seq(&env, d);
1274                                 if (rc)
1275                                         continue;
1276                         }
1277
1278                         if (osp_precreate_near_empty(&env, d)) {
1279                                 rc = osp_precreate_send(&env, d);
1280                                 /* osp_precreate_send() sets opd_pre_status
1281                                  * in case of error, that prevent the using of
1282                                  * failed device. */
1283                                 if (rc < 0 && rc != -ENOSPC &&
1284                                     rc != -ETIMEDOUT && rc != -ENOTCONN)
1285                                         CERROR("%s: cannot precreate objects:"
1286                                                " rc = %d\n",
1287                                                d->opd_obd->obd_name, rc);
1288                         }
1289                 }
1290         }
1291
1292         thread->t_flags = SVC_STOPPED;
1293         lu_env_fini(&env);
1294         wake_up(&thread->t_ctl_waitq);
1295
1296         RETURN(0);
1297 }
1298
1299 /**
1300  * Check when to stop to wait for precreate objects.
1301  *
1302  * The caller wanting a new OST object can't wait undefinitely. The
1303  * function checks for few conditions including available new OST
1304  * objects, disconnected OST, lack of space with no pending destroys,
1305  * etc. IOW, it checks whether the current OSP state is good to keep
1306  * waiting or it's better to give up.
1307  *
1308  * \param[in] env       LU environment provided by the caller
1309  * \param[in] d         OSP device
1310  *
1311  * \retval              0 - keep waiting, 1 - no luck
1312  */
1313 static int osp_precreate_ready_condition(const struct lu_env *env,
1314                                          struct osp_device *d)
1315 {
1316         if (d->opd_pre_recovering)
1317                 return 0;
1318
1319         /* ready if got enough precreated objects */
1320         /* we need to wait for others (opd_pre_reserved) and our object (+1) */
1321         if (d->opd_pre_reserved + 1 < osp_objs_precreated(env, d))
1322                 return 1;
1323
1324         /* ready if OST reported no space and no destroys in progress */
1325         if (atomic_read(&d->opd_sync_changes) +
1326             atomic_read(&d->opd_sync_rpcs_in_progress) == 0 &&
1327             d->opd_pre_status == -ENOSPC)
1328                 return 1;
1329
1330         /* Bail out I/O fails to OST */
1331         if (d->opd_pre_status != 0 &&
1332             d->opd_pre_status != -EAGAIN &&
1333             d->opd_pre_status != -ENODEV &&
1334             d->opd_pre_status != -ENOTCONN &&
1335             d->opd_pre_status != -ENOSPC) {
1336                 /* DEBUG LU-3230 */
1337                 if (d->opd_pre_status != -EIO)
1338                         CERROR("%s: precreate failed opd_pre_status %d\n",
1339                                d->opd_obd->obd_name, d->opd_pre_status);
1340                 return 1;
1341         }
1342
1343         return 0;
1344 }
1345
1346 static int osp_precreate_timeout_condition(void *data)
1347 {
1348         struct osp_device *d = data;
1349
1350         CDEBUG(D_HA, "%s: slow creates, last="DFID", next="DFID", "
1351               "reserved=%llu, sync_changes=%u, "
1352               "sync_rpcs_in_progress=%d, status=%d\n",
1353               d->opd_obd->obd_name, PFID(&d->opd_pre_last_created_fid),
1354               PFID(&d->opd_pre_used_fid), d->opd_pre_reserved,
1355               atomic_read(&d->opd_sync_changes),
1356               atomic_read(&d->opd_sync_rpcs_in_progress),
1357               d->opd_pre_status);
1358
1359         return 1;
1360 }
1361
1362 /**
1363  * Reserve object in precreate pool
1364  *
1365  * When the caller wants to create a new object on this target (target
1366  * represented by the given OSP), it should declare this intention using
1367  * a regular ->dt_declare_create() OSD API method. Then OSP will be trying
1368  * to reserve an object in the existing precreated pool or wait up to
1369  * obd_timeout for the available object to appear in the pool (a dedicated
1370  * thread will be doing real precreation in background). The object can be
1371  * consumed later with osp_precreate_get_fid() or be released with call to
1372  * lu_object_put(). Notice the function doesn't reserve a specific ID, just
1373  * some ID. The actual ID assignment happen in osp_precreate_get_fid().
1374  * If the space on the target is short and there is a pending object destroy,
1375  * then the function forces local commit to speedup space release (see
1376  * osp_sync.c for the details).
1377  *
1378  * \param[in] env       LU environment provided by the caller
1379  * \param[in] d         OSP device
1380  *
1381  * \retval              0 on success
1382  * \retval              -ENOSPC when no space on OST
1383  * \retval              -EAGAIN try later, slow precreation in progress
1384  * \retval              -EIO when no access to OST
1385  */
1386 int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d)
1387 {
1388         time64_t expire = ktime_get_seconds() + obd_timeout;
1389         struct l_wait_info lwi;
1390         int precreated, rc, synced = 0;
1391
1392         ENTRY;
1393
1394         LASSERTF(osp_objs_precreated(env, d) >= 0, "Last created FID "DFID
1395                  "Next FID "DFID"\n", PFID(&d->opd_pre_last_created_fid),
1396                  PFID(&d->opd_pre_used_fid));
1397
1398         /* opd_pre_max_create_count 0 to not use specified OST. */
1399         if (d->opd_pre_max_create_count == 0)
1400                 RETURN(-ENOBUFS);
1401
1402         /*
1403          * wait till:
1404          *  - preallocation is done
1405          *  - no free space expected soon
1406          *  - can't connect to OST for too long (obd_timeout)
1407          *  - OST can allocate fid sequence.
1408          */
1409         while ((rc = d->opd_pre_status) == 0 || rc == -ENOSPC ||
1410                 rc == -ENODEV || rc == -EAGAIN || rc == -ENOTCONN) {
1411
1412                 /*
1413                  * increase number of precreations
1414                  */
1415                 precreated = osp_objs_precreated(env, d);
1416                 if (d->opd_pre_create_count < d->opd_pre_max_create_count &&
1417                     d->opd_pre_create_slow == 0 &&
1418                     precreated <= (d->opd_pre_create_count / 4 + 1)) {
1419                         spin_lock(&d->opd_pre_lock);
1420                         d->opd_pre_create_slow = 1;
1421                         d->opd_pre_create_count *= 2;
1422                         spin_unlock(&d->opd_pre_lock);
1423                 }
1424
1425                 spin_lock(&d->opd_pre_lock);
1426                 precreated = osp_objs_precreated(env, d);
1427                 if (precreated > d->opd_pre_reserved &&
1428                     !d->opd_pre_recovering) {
1429                         d->opd_pre_reserved++;
1430                         spin_unlock(&d->opd_pre_lock);
1431                         rc = 0;
1432
1433                         /* XXX: don't wake up if precreation is in progress */
1434                         if (osp_precreate_near_empty_nolock(env, d) &&
1435                            !osp_precreate_end_seq_nolock(env, d))
1436                                 wake_up(&d->opd_pre_waitq);
1437
1438                         break;
1439                 }
1440                 spin_unlock(&d->opd_pre_lock);
1441
1442                 /*
1443                  * all precreated objects have been used and no-space
1444                  * status leave us no chance to succeed very soon
1445                  * but if there is destroy in progress, then we should
1446                  * wait till that is done - some space might be released
1447                  */
1448                 if (unlikely(rc == -ENOSPC)) {
1449                         if (atomic_read(&d->opd_sync_changes) && synced == 0) {
1450                                 /* force local commit to release space */
1451                                 dt_commit_async(env, d->opd_storage);
1452                                 osp_sync_force(env, d);
1453                                 synced = 1;
1454                         }
1455                         if (atomic_read(&d->opd_sync_rpcs_in_progress)) {
1456                                 /* just wait till destroys are done */
1457                                 /* see l_wait_even() few lines below */
1458                         }
1459                         if (atomic_read(&d->opd_sync_changes) +
1460                             atomic_read(&d->opd_sync_rpcs_in_progress) == 0) {
1461                                 /* no hope for free space */
1462                                 break;
1463                         }
1464                 }
1465
1466                 /* XXX: don't wake up if precreation is in progress */
1467                 wake_up(&d->opd_pre_waitq);
1468
1469                 lwi = LWI_TIMEOUT(cfs_time_seconds(obd_timeout),
1470                                   osp_precreate_timeout_condition, d);
1471                 if (ktime_get_seconds() >= expire) {
1472                         rc = -ETIMEDOUT;
1473                         break;
1474                 }
1475
1476                 l_wait_event(d->opd_pre_user_waitq,
1477                              osp_precreate_ready_condition(env, d), &lwi);
1478         }
1479
1480         RETURN(rc);
1481 }
1482
1483 /**
1484  * Get a FID from precreation pool
1485  *
1486  * The function is a companion for osp_precreate_reserve() - it assigns
1487  * a specific FID from the precreate. The function should be called only
1488  * if the call to osp_precreate_reserve() was successful. The function
1489  * updates a local storage to remember the highest object ID referenced
1490  * by the node in the given sequence.
1491  *
1492  * A very importan details: this is supposed to be called once the
1493  * transaction is started, so on-disk update will be atomic with the
1494  * data (like LOVEA) refering this object. Then the object won't be leaked:
1495  * either it's referenced by the committed transaction or it's a subject
1496  * to the orphan cleanup procedure.
1497  *
1498  * \param[in] env       LU environment provided by the caller
1499  * \param[in] d         OSP device
1500  * \param[out] fid      generated FID
1501  *
1502  * \retval 0            on success
1503  * \retval negative     negated errno on error
1504  */
1505 int osp_precreate_get_fid(const struct lu_env *env, struct osp_device *d,
1506                           struct lu_fid *fid)
1507 {
1508         struct lu_fid *pre_used_fid = &d->opd_pre_used_fid;
1509         /* grab next id from the pool */
1510         spin_lock(&d->opd_pre_lock);
1511
1512         LASSERTF(osp_fid_diff(&d->opd_pre_used_fid,
1513                              &d->opd_pre_last_created_fid) < 0,
1514                  "next fid "DFID" last created fid "DFID"\n",
1515                  PFID(&d->opd_pre_used_fid),
1516                  PFID(&d->opd_pre_last_created_fid));
1517
1518         /*
1519          * When sequence is used up, new one should be allocated in
1520          * osp_precreate_rollover_new_seq. So ASSERT here to avoid
1521          * objid overflow.
1522          */
1523         LASSERTF(osp_fid_end_seq(env, pre_used_fid) == 0,
1524                  "next fid "DFID" last created fid "DFID"\n",
1525                  PFID(&d->opd_pre_used_fid),
1526                  PFID(&d->opd_pre_last_created_fid));
1527         /* Non IDIF fids shoulnd't get here with oid == 0xFFFFFFFF. */
1528         if (fid_is_idif(pre_used_fid) &&
1529             unlikely(fid_oid(pre_used_fid) == LUSTRE_DATA_SEQ_MAX_WIDTH))
1530                 pre_used_fid->f_seq++;
1531
1532         d->opd_pre_used_fid.f_oid++;
1533         memcpy(fid, &d->opd_pre_used_fid, sizeof(*fid));
1534         d->opd_pre_reserved--;
1535         /*
1536          * last_used_id must be changed along with getting new id otherwise
1537          * we might miscalculate gap causing object loss or leak
1538          */
1539         osp_update_last_fid(d, fid);
1540         spin_unlock(&d->opd_pre_lock);
1541
1542         /*
1543          * probably main thread suspended orphan cleanup till
1544          * all reservations are released, see comment in
1545          * osp_precreate_thread() just before orphan cleanup
1546          */
1547         if (unlikely(d->opd_pre_reserved == 0 &&
1548                      (d->opd_pre_recovering || d->opd_pre_status)))
1549                 wake_up(&d->opd_pre_waitq);
1550
1551         return 0;
1552 }
1553
1554 /*
1555  * Set size regular attribute on an object
1556  *
1557  * When a striping is created late, it's possible that size is already
1558  * initialized on the file. Then the new striping should inherit size
1559  * from the file. The function sets size on the object using the regular
1560  * protocol (OST_PUNCH).
1561  * XXX: should be re-implemented using OUT ?
1562  *
1563  * \param[in] env       LU environment provided by the caller
1564  * \param[in] dt        object
1565  * \param[in] size      size to set.
1566  *
1567  * \retval 0            on success
1568  * \retval negative     negated errno on error
1569  */
1570 int osp_object_truncate(const struct lu_env *env, struct dt_object *dt,
1571                         __u64 size)
1572 {
1573         struct osp_device       *d = lu2osp_dev(dt->do_lu.lo_dev);
1574         struct ptlrpc_request   *req = NULL;
1575         struct obd_import       *imp;
1576         struct ost_body         *body;
1577         struct obdo             *oa = NULL;
1578         int                      rc;
1579
1580         ENTRY;
1581
1582         imp = d->opd_obd->u.cli.cl_import;
1583         LASSERT(imp);
1584
1585         req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
1586         if (req == NULL)
1587                 RETURN(-ENOMEM);
1588
1589         /* XXX: capa support? */
1590         /* osc_set_capa_size(req, &RMF_CAPA1, capa); */
1591         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
1592         if (rc) {
1593                 ptlrpc_request_free(req);
1594                 RETURN(rc);
1595         }
1596
1597         /*
1598          * XXX: decide how do we do here with resend
1599          * if we don't resend, then client may see wrong file size
1600          * if we do resend, then MDS thread can get stuck for quite long
1601          * and if we don't resend, then client will also get -EWOULDBLOCK !!
1602          * (see LU-7975 and sanity/test_27F use cases)
1603          * but let's decide not to resend/delay this truncate request to OST
1604          * and allow Client to decide to resend, in a less agressive way from
1605          * after_reply(), by returning -EINPROGRESS instead of
1606          * -EAGAIN/-EWOULDBLOCK upon return from ptlrpc_queue_wait() at the
1607          * end of this routine
1608          */
1609         req->rq_no_resend = req->rq_no_delay = 1;
1610
1611         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
1612         ptlrpc_at_set_req_timeout(req);
1613
1614         OBD_ALLOC_PTR(oa);
1615         if (oa == NULL)
1616                 GOTO(out, rc = -ENOMEM);
1617
1618         rc = fid_to_ostid(lu_object_fid(&dt->do_lu), &oa->o_oi);
1619         LASSERT(rc == 0);
1620         oa->o_size = size;
1621         oa->o_blocks = OBD_OBJECT_EOF;
1622         oa->o_valid = OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
1623                       OBD_MD_FLID | OBD_MD_FLGROUP;
1624
1625         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
1626         LASSERT(body);
1627         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1628
1629         /* XXX: capa support? */
1630         /* osc_pack_capa(req, body, capa); */
1631
1632         ptlrpc_request_set_replen(req);
1633
1634         rc = ptlrpc_queue_wait(req);
1635         if (rc) {
1636                 /* -EWOULDBLOCK/-EAGAIN means OST is unreachable at the moment
1637                  * since we have decided not to resend/delay, but this could
1638                  * lead to wrong size to be seen at Client side and even process
1639                  * trying to open to exit/fail if not itself handling -EAGAIN.
1640                  * So it should be better to return -EINPROGRESS instead and
1641                  * leave the decision to resend at Client side in after_reply()
1642                  */
1643                 if (rc == -EWOULDBLOCK) {
1644                         rc = -EINPROGRESS;
1645                         CDEBUG(D_HA, "returning -EINPROGRESS instead of "
1646                                "-EWOULDBLOCK/-EAGAIN to allow Client to "
1647                                "resend\n");
1648                 } else {
1649                         CERROR("can't punch object: %d\n", rc);
1650                 }
1651         }
1652 out:
1653         ptlrpc_req_finished(req);
1654         if (oa)
1655                 OBD_FREE_PTR(oa);
1656         RETURN(rc);
1657 }
1658
1659 /**
1660  * Initialize precreation functionality of OSP
1661  *
1662  * Prepares all the internal structures and starts the precreate thread
1663  *
1664  * \param[in] d         OSP device
1665  *
1666  * \retval 0            on success
1667  * \retval negative     negated errno on error
1668  */
1669 int osp_init_precreate(struct osp_device *d)
1670 {
1671         struct l_wait_info       lwi = { 0 };
1672         struct task_struct              *task;
1673
1674         ENTRY;
1675
1676         OBD_ALLOC_PTR(d->opd_pre);
1677         if (d->opd_pre == NULL)
1678                 RETURN(-ENOMEM);
1679
1680         /* initially precreation isn't ready */
1681         d->opd_pre_status = -EAGAIN;
1682         fid_zero(&d->opd_pre_used_fid);
1683         d->opd_pre_used_fid.f_oid = 1;
1684         fid_zero(&d->opd_pre_last_created_fid);
1685         d->opd_pre_last_created_fid.f_oid = 1;
1686         d->opd_pre_reserved = 0;
1687         d->opd_got_disconnected = 1;
1688         d->opd_pre_create_slow = 0;
1689         d->opd_pre_create_count = OST_MIN_PRECREATE;
1690         d->opd_pre_min_create_count = OST_MIN_PRECREATE;
1691         d->opd_pre_max_create_count = OST_MAX_PRECREATE;
1692         d->opd_reserved_mb_high = 0;
1693         d->opd_reserved_mb_low = 0;
1694
1695         spin_lock_init(&d->opd_pre_lock);
1696         init_waitqueue_head(&d->opd_pre_waitq);
1697         init_waitqueue_head(&d->opd_pre_user_waitq);
1698         thread_set_flags(&d->opd_pre_thread, SVC_INIT);
1699         init_waitqueue_head(&d->opd_pre_thread.t_ctl_waitq);
1700
1701         /*
1702          * Initialize statfs-related things
1703          */
1704         d->opd_statfs_maxage = 5; /* defaultupdate interval */
1705         d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(),
1706                                                 1000 * NSEC_PER_SEC);
1707         CDEBUG(D_OTHER, "current %lldns, fresh till %lldns\n",
1708                ktime_get_ns(),
1709                ktime_to_ns(d->opd_statfs_fresh_till));
1710         cfs_timer_setup(&d->opd_statfs_timer, osp_statfs_timer_cb,
1711                         (unsigned long)d, 0);
1712
1713         if (d->opd_storage->dd_rdonly)
1714                 RETURN(0);
1715
1716         /*
1717          * start thread handling precreation and statfs updates
1718          */
1719         task = kthread_run(osp_precreate_thread, d,
1720                            "osp-pre-%u-%u", d->opd_index, d->opd_group);
1721         if (IS_ERR(task)) {
1722                 CERROR("can't start precreate thread %ld\n", PTR_ERR(task));
1723                 RETURN(PTR_ERR(task));
1724         }
1725
1726         l_wait_event(d->opd_pre_thread.t_ctl_waitq,
1727                      osp_precreate_running(d) || osp_precreate_stopped(d),
1728                      &lwi);
1729
1730         RETURN(0);
1731 }
1732
1733 /**
1734  * Finish precreate functionality of OSP
1735  *
1736  *
1737  * Asks all the activity (the thread, update timer) to stop, then
1738  * wait till that is done.
1739  *
1740  * \param[in] d         OSP device
1741  */
1742 void osp_precreate_fini(struct osp_device *d)
1743 {
1744         struct ptlrpc_thread *thread = &d->opd_pre_thread;
1745         ENTRY;
1746
1747         del_timer(&d->opd_statfs_timer);
1748
1749         if (d->opd_pre == NULL)
1750                 RETURN_EXIT;
1751
1752         if (!thread_is_init(thread) && !thread_is_stopped(thread)) {
1753                 thread->t_flags = SVC_STOPPING;
1754                 wake_up(&d->opd_pre_waitq);
1755                 wait_event(thread->t_ctl_waitq, thread_is_stopped(thread));
1756         }
1757
1758         OBD_FREE_PTR(d->opd_pre);
1759         d->opd_pre = NULL;
1760
1761         EXIT;
1762 }
1763