Whamcloud - gitweb
bf6c4629707d8f5ea6bcd71acbbfc4d4f5329e1b
[fs/lustre-release.git] / lustre / osp / osp_precreate.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/osp/osp_precreate.c
33  *
34  * Lustre OST Proxy Device
35  *
36  * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
37  * Author: Mikhail Pershin <mike.pershin@intel.com>
38  * Author: Di Wang <di.wang@intel.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_MDS
42
43 #include <linux/kthread.h>
44
45 #include <lustre_obdo.h>
46
47 #include "osp_internal.h"
48
49 /*
50  * there are two specific states to take care about:
51  *
52  * = import is disconnected =
53  *
54  * = import is inactive =
55  *   in this case osp_declare_create() returns an error
56  *
57  */
58
59 /**
60  * Check whether statfs data is expired
61  *
62  * OSP device caches statfs data for the target, the function checks
63  * whether the data is expired or not.
64  *
65  * \param[in] d         OSP device
66  *
67  * \retval              0 - not expired, 1 - expired
68  */
69 static inline int osp_statfs_need_update(struct osp_device *d)
70 {
71         return !ktime_before(ktime_get(), d->opd_statfs_fresh_till);
72 }
73
74 /*
75  * OSP tries to maintain pool of available objects so that calls to create
76  * objects don't block most of time
77  *
78  * each time OSP gets connected to OST, we should start from precreation cleanup
79  */
80 static inline bool osp_precreate_running(struct osp_device *d)
81 {
82         return !!(d->opd_pre_thread.t_flags & SVC_RUNNING);
83 }
84
85 static inline bool osp_precreate_stopped(struct osp_device *d)
86 {
87         return !!(d->opd_pre_thread.t_flags & SVC_STOPPED);
88 }
89
90 static void osp_statfs_timer_cb(cfs_timer_cb_arg_t data)
91 {
92         struct osp_device *d = cfs_from_timer(d, data, opd_statfs_timer);
93
94         LASSERT(d);
95         if (osp_precreate_running(d))
96                 wake_up(&d->opd_pre_waitq);
97 }
98
99 static void osp_pre_update_msfs(struct osp_device *d, struct obd_statfs *msfs);
100
101 /*
102  * The function updates current precreation status if broken, and
103  * updates that cached statfs state if functional, then wakes up waiters.
104  * We don't clear opd_pre_status directly here, but rather leave this
105  * to osp_pre_update_msfs() to do if everything is OK so that we don't
106  * have a race to clear opd_pre_status and then set it to -ENOSPC again.
107  *
108  * \param[in] d         OSP device
109  * \param[in] msfs      statfs data
110  * \param[in] rc        new precreate status for device \a d
111  */
112 static void osp_pre_update_status_msfs(struct osp_device *d,
113                                        struct obd_statfs *msfs, int rc)
114 {
115         if (rc)
116                 d->opd_pre_status = rc;
117         else
118                 osp_pre_update_msfs(d, msfs);
119
120         wake_up(&d->opd_pre_user_waitq);
121 }
122
123 /* Pass in the old statfs data in case the limits have changed */
124 void osp_pre_update_status(struct osp_device *d, int rc)
125 {
126         osp_pre_update_status_msfs(d, &d->opd_statfs, rc);
127 }
128
129
130 /**
131  * RPC interpret callback for OST_STATFS RPC
132  *
133  * An interpretation callback called by ptlrpc for OST_STATFS RPC when it is
134  * replied by the target. It's used to maintain statfs cache for the target.
135  * The function fills data from the reply if successful and schedules another
136  * update.
137  *
138  * \param[in] env       LU environment provided by the caller
139  * \param[in] req       RPC replied
140  * \param[in] aa        callback data
141  * \param[in] rc        RPC result
142  *
143  * \retval 0            on success
144  * \retval negative     negated errno on error
145  */
146 static int osp_statfs_interpret(const struct lu_env *env,
147                                 struct ptlrpc_request *req, void *args, int rc)
148 {
149         union ptlrpc_async_args *aa = args;
150         struct obd_import *imp = req->rq_import;
151         struct obd_statfs *msfs;
152         struct osp_device *d;
153         u64 maxage_ns;
154
155         ENTRY;
156
157         aa = ptlrpc_req_async_args(aa, req);
158         d = aa->pointer_arg[0];
159         LASSERT(d);
160
161         if (rc != 0)
162                 GOTO(out, rc);
163
164         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
165         if (msfs == NULL)
166                 GOTO(out, rc = -EPROTO);
167
168         if (d->opd_pre)
169                 osp_pre_update_status_msfs(d, msfs, 0);
170         else
171                 d->opd_statfs = *msfs;
172
173         /* schedule next update */
174         maxage_ns = d->opd_statfs_maxage * NSEC_PER_SEC;
175         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), maxage_ns);
176         mod_timer(&d->opd_statfs_timer,
177                   jiffies + cfs_time_seconds(d->opd_statfs_maxage));
178         d->opd_statfs_update_in_progress = 0;
179
180         CDEBUG(D_CACHE, "updated statfs %p\n", d);
181
182         RETURN(0);
183 out:
184         /* couldn't update statfs, try again with a small delay */
185         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), 10 * NSEC_PER_SEC);
186         d->opd_statfs_update_in_progress = 0;
187         if (d->opd_pre != NULL && osp_precreate_running(d))
188                 wake_up(&d->opd_pre_waitq);
189
190         if (req->rq_import_generation == imp->imp_generation)
191                 CDEBUG(D_CACHE, "%s: couldn't update statfs: rc = %d\n",
192                        d->opd_obd->obd_name, rc);
193         RETURN(rc);
194 }
195
196 /**
197  * Send OST_STATFS RPC
198  *
199  * Sends OST_STATFS RPC to refresh cached statfs data for the target.
200  * Also disables scheduled updates as times OSP may need to refresh
201  * statfs data before expiration. The function doesn't block, instead
202  * an interpretation callback osp_statfs_interpret() is used.
203  *
204  * \param[in] d         OSP device
205  */
206 static int osp_statfs_update(const struct lu_env *env, struct osp_device *d)
207 {
208         u64 expire = obd_timeout * 1000 * NSEC_PER_SEC;
209         struct ptlrpc_request   *req;
210         struct obd_import       *imp;
211         union ptlrpc_async_args *aa;
212         int rc;
213
214         ENTRY;
215
216         CDEBUG(D_CACHE, "going to update statfs\n");
217
218         imp = d->opd_obd->u.cli.cl_import;
219         LASSERT(imp);
220
221         req = ptlrpc_request_alloc(imp,
222                            d->opd_pre ? &RQF_OST_STATFS : &RQF_MDS_STATFS);
223         if (req == NULL)
224                 RETURN(-ENOMEM);
225
226         rc = ptlrpc_request_pack(req,
227                          d->opd_pre ? LUSTRE_OST_VERSION : LUSTRE_MDS_VERSION,
228                          d->opd_pre ? OST_STATFS : MDS_STATFS);
229         if (rc) {
230                 ptlrpc_request_free(req);
231                 RETURN(rc);
232         }
233         ptlrpc_request_set_replen(req);
234         if (d->opd_pre)
235                 req->rq_request_portal = OST_CREATE_PORTAL;
236         ptlrpc_at_set_req_timeout(req);
237
238         req->rq_interpret_reply = osp_statfs_interpret;
239         aa = ptlrpc_req_async_args(aa, req);
240         aa->pointer_arg[0] = d;
241
242         /*
243          * no updates till reply
244          */
245         del_timer(&d->opd_statfs_timer);
246         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), expire);
247         d->opd_statfs_update_in_progress = 1;
248
249         ptlrpcd_add_req(req);
250
251         /* we still want to sync changes if no new changes are coming */
252         if (ktime_before(ktime_get(), d->opd_sync_next_commit_cb))
253                 GOTO(out, rc);
254
255         if (atomic_read(&d->opd_sync_changes)) {
256                 struct thandle *th;
257
258                 th = dt_trans_create(env, d->opd_storage);
259                 if (IS_ERR(th)) {
260                         CERROR("%s: can't sync\n", d->opd_obd->obd_name);
261                         GOTO(out, rc);
262                 }
263                 rc = dt_trans_start_local(env, d->opd_storage, th);
264                 if (rc == 0) {
265                         CDEBUG(D_OTHER, "%s: sync forced, %d changes\n",
266                                d->opd_obd->obd_name,
267                                atomic_read(&d->opd_sync_changes));
268                         osp_sync_add_commit_cb_1s(env, d, th);
269                         dt_trans_stop(env, d->opd_storage, th);
270                 }
271         }
272
273 out:
274         RETURN(0);
275 }
276
277 /**
278  * Schedule an immediate update for statfs data
279  *
280  * If cached statfs data claim no free space, but OSP has got a request to
281  * destroy an object (so release some space probably), then we may need to
282  * refresh cached statfs data sooner than planned. The function checks there
283  * is no statfs update going and schedules immediate update if so.
284  * XXX: there might be a case where removed object(s) do not add free space (empty
285  * object). If the number of such deletions is high, then we can start to update
286  * statfs too often causing a RPC storm. some throttling is needed...
287  *
288  * \param[in] d         OSP device where statfs data needs to be refreshed
289  */
290 void osp_statfs_need_now(struct osp_device *d)
291 {
292         if (!d->opd_statfs_update_in_progress) {
293                 /*
294                  * if current status is -ENOSPC (lack of free space on OST)
295                  * then we should poll OST immediately once object destroy
296                  * is replied
297                  */
298                 d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(), NSEC_PER_SEC);
299                 del_timer(&d->opd_statfs_timer);
300                 wake_up(&d->opd_pre_waitq);
301         }
302 }
303
304 /**
305  * Return number of precreated objects
306  *
307  * A simple helper to calculate the number of precreated objects on the device.
308  *
309  * \param[in] env       LU environment provided by the caller
310  * \param[in] osp       OSP device
311  *
312  * \retval              the number of the precreated objects
313  */
314 static inline int osp_objs_precreated(const struct lu_env *env,
315                                       struct osp_device *osp)
316 {
317         return osp_fid_diff(&osp->opd_pre_last_created_fid,
318                             &osp->opd_pre_used_fid);
319 }
320
321 /**
322  * Check pool of precreated objects is nearly empty
323  *
324  * We should not wait till the pool of the precreated objects is exhausted,
325  * because then there will be a long period of OSP being unavailable for the
326  * new creations due to lenghty precreate RPC. Instead we ask for another
327  * precreation ahead and hopefully have it ready before the current pool is
328  * empty. Notice this function relies on an external locking.
329  *
330  * \param[in] env       LU environment provided by the caller
331  * \param[in] d         OSP device
332  *
333  * \retval              0 - current pool is good enough, 1 - time to precreate
334  */
335 static inline int osp_precreate_near_empty_nolock(const struct lu_env *env,
336                                                   struct osp_device *d)
337 {
338         int window = osp_objs_precreated(env, d);
339
340         /* don't consider new precreation till OST is healty and
341          * has free space */
342         return ((window - d->opd_pre_reserved < d->opd_pre_create_count / 2) &&
343                 (d->opd_pre_status == 0));
344 }
345
346 /**
347  * Check pool of precreated objects
348  *
349  * This is protected version of osp_precreate_near_empty_nolock(), check that
350  * for the details.
351  *
352  * \param[in] env       LU environment provided by the caller
353  * \param[in] d         OSP device
354  *
355  * \retval              0 - current pool is good enough, 1 - time to precreate
356  */
357 static inline int osp_precreate_near_empty(const struct lu_env *env,
358                                            struct osp_device *d)
359 {
360         int rc;
361
362         if (d->opd_pre == NULL)
363                 return 0;
364
365         /* XXX: do we really need locking here? */
366         spin_lock(&d->opd_pre_lock);
367         rc = osp_precreate_near_empty_nolock(env, d);
368         spin_unlock(&d->opd_pre_lock);
369         return rc;
370 }
371
372 /**
373  * Check given sequence is empty
374  *
375  * Returns a binary result whether the given sequence has some IDs left
376  * or not. Find the details in osp_fid_end_seq(). This is a lock protected
377  * version of that function.
378  *
379  * \param[in] env       LU environment provided by the caller
380  * \param[in] osp       OSP device
381  *
382  * \retval              0 - current sequence has no IDs, 1 - otherwise
383  */
384 static inline int osp_create_end_seq(const struct lu_env *env,
385                                      struct osp_device *osp)
386 {
387         struct lu_fid *fid = &osp->opd_pre_used_fid;
388         int rc;
389
390         spin_lock(&osp->opd_pre_lock);
391         rc = osp_fid_end_seq(env, fid);
392         spin_unlock(&osp->opd_pre_lock);
393         return rc;
394 }
395
396 /**
397  * Write FID into into last_oid/last_seq file
398  *
399  * The function stores the sequence and the in-sequence id into two dedicated
400  * files. The sync argument can be used to request synchronous commit, so the
401  * function won't return until the updates are committed.
402  *
403  * \param[in] env       LU environment provided by the caller
404  * \param[in] osp       OSP device
405  * \param[in] fid       fid where sequence/id is taken
406  * \param[in] sync      update mode: 0 - asynchronously, 1 - synchronously
407  *
408  * \retval 0            on success
409  * \retval negative     negated errno on error
410  **/
411 int osp_write_last_oid_seq_files(struct lu_env *env, struct osp_device *osp,
412                                  struct lu_fid *fid, int sync)
413 {
414         struct osp_thread_info  *oti = osp_env_info(env);
415         struct lu_buf      *lb_oid = &oti->osi_lb;
416         struct lu_buf      *lb_oseq = &oti->osi_lb2;
417         loff_t             oid_off;
418         u64                oid;
419         loff_t             oseq_off;
420         struct thandle    *th;
421         int                   rc;
422         ENTRY;
423
424         if (osp->opd_storage->dd_rdonly)
425                 RETURN(0);
426
427         /* Note: through f_oid is only 32 bits, it will also write 64 bits
428          * for oid to keep compatibility with the previous version. */
429         oid = fid->f_oid;
430         osp_objid_buf_prep(lb_oid, &oid_off,
431                            &oid, osp->opd_index);
432
433         osp_objseq_buf_prep(lb_oseq, &oseq_off,
434                             &fid->f_seq, osp->opd_index);
435
436         th = dt_trans_create(env, osp->opd_storage);
437         if (IS_ERR(th))
438                 RETURN(PTR_ERR(th));
439
440         th->th_sync |= sync;
441         rc = dt_declare_record_write(env, osp->opd_last_used_oid_file,
442                                      lb_oid, oid_off, th);
443         if (rc != 0)
444                 GOTO(out, rc);
445
446         rc = dt_declare_record_write(env, osp->opd_last_used_seq_file,
447                                      lb_oseq, oseq_off, th);
448         if (rc != 0)
449                 GOTO(out, rc);
450
451         rc = dt_trans_start_local(env, osp->opd_storage, th);
452         if (rc != 0)
453                 GOTO(out, rc);
454
455         rc = dt_record_write(env, osp->opd_last_used_oid_file, lb_oid,
456                              &oid_off, th);
457         if (rc != 0) {
458                 CERROR("%s: can not write to last seq file: rc = %d\n",
459                         osp->opd_obd->obd_name, rc);
460                 GOTO(out, rc);
461         }
462         rc = dt_record_write(env, osp->opd_last_used_seq_file, lb_oseq,
463                              &oseq_off, th);
464         if (rc) {
465                 CERROR("%s: can not write to last seq file: rc = %d\n",
466                         osp->opd_obd->obd_name, rc);
467                 GOTO(out, rc);
468         }
469 out:
470         dt_trans_stop(env, osp->opd_storage, th);
471         RETURN(rc);
472 }
473
474 /**
475  * Switch to another sequence
476  *
477  * When a current sequence has no available IDs left, OSP has to switch to
478  * another new sequence. OSP requests it using the regular FLDB protocol
479  * and stores synchronously before that is used in precreated. This is needed
480  * to basically have the sequences referenced (not orphaned), otherwise it's
481  * possible that OST has some objects precreated and the clients have data
482  * written to it, but after MDT failover nobody refers those objects and OSP
483  * has no idea that the sequence need cleanup to be done.
484  * While this is very expensive operation, it's supposed to happen very very
485  * infrequently because sequence has 2^32 or 2^48 objects (depending on type)
486  *
487  * \param[in] env       LU environment provided by the caller
488  * \param[in] osp       OSP device
489  *
490  * \retval 0            on success
491  * \retval negative     negated errno on error
492  */
493 static int osp_precreate_rollover_new_seq(struct lu_env *env,
494                                           struct osp_device *osp)
495 {
496         struct lu_fid   *fid = &osp_env_info(env)->osi_fid;
497         struct lu_fid   *last_fid = &osp->opd_last_used_fid;
498         int             rc;
499         ENTRY;
500
501         rc = seq_client_get_seq(env, osp->opd_obd->u.cli.cl_seq, &fid->f_seq);
502         if (rc != 0) {
503                 CERROR("%s: alloc fid error: rc = %d\n",
504                        osp->opd_obd->obd_name, rc);
505                 RETURN(rc);
506         }
507
508         fid->f_oid = 1;
509         fid->f_ver = 0;
510         LASSERTF(fid_seq(fid) != fid_seq(last_fid),
511                  "fid "DFID", last_fid "DFID"\n", PFID(fid),
512                  PFID(last_fid));
513
514         rc = osp_write_last_oid_seq_files(env, osp, fid, 1);
515         if (rc != 0) {
516                 CERROR("%s: Can not update oid/seq file: rc = %d\n",
517                        osp->opd_obd->obd_name, rc);
518                 RETURN(rc);
519         }
520
521         LCONSOLE_INFO("%s: update sequence from %#llx to %#llx\n",
522                       osp->opd_obd->obd_name, fid_seq(last_fid),
523                       fid_seq(fid));
524         /* Update last_xxx to the new seq */
525         spin_lock(&osp->opd_pre_lock);
526         osp->opd_last_used_fid = *fid;
527         osp_fid_to_obdid(fid, &osp->opd_last_id);
528         osp->opd_gap_start_fid = *fid;
529         osp->opd_pre_used_fid = *fid;
530         osp->opd_pre_last_created_fid = *fid;
531         spin_unlock(&osp->opd_pre_lock);
532
533         RETURN(rc);
534 }
535
536 /**
537  * Find IDs available in current sequence
538  *
539  * The function calculates the highest possible ID and the number of IDs
540  * available in the current sequence OSP is using. The number is limited
541  * artifically by the caller (grow param) and the number of IDs available
542  * in the sequence by nature. The function doesn't require an external
543  * locking.
544  *
545  * \param[in] env       LU environment provided by the caller
546  * \param[in] osp       OSP device
547  * \param[in] fid       FID the caller wants to start with
548  * \param[in] grow      how many the caller wants
549  * \param[out] fid      the highest calculated FID
550  * \param[out] grow     the number of available IDs calculated
551  *
552  * \retval              0 on success, 1 - the sequence is empty
553  */
554 static int osp_precreate_fids(const struct lu_env *env, struct osp_device *osp,
555                               struct lu_fid *fid, int *grow)
556 {
557         struct osp_thread_info  *osi = osp_env_info(env);
558         __u64                   end;
559         int                     i = 0;
560
561         if (fid_is_idif(fid)) {
562                 struct lu_fid   *last_fid;
563                 struct ost_id   *oi = &osi->osi_oi;
564                 int rc;
565
566                 spin_lock(&osp->opd_pre_lock);
567                 last_fid = &osp->opd_pre_last_created_fid;
568                 fid_to_ostid(last_fid, oi);
569                 end = min(ostid_id(oi) + *grow, IDIF_MAX_OID);
570                 *grow = end - ostid_id(oi);
571                 rc = ostid_set_id(oi, ostid_id(oi) + *grow);
572                 spin_unlock(&osp->opd_pre_lock);
573
574                 if (*grow == 0 || rc)
575                         return 1;
576
577                 ostid_to_fid(fid, oi, osp->opd_index);
578                 return 0;
579         }
580
581         spin_lock(&osp->opd_pre_lock);
582         *fid = osp->opd_pre_last_created_fid;
583         end = fid->f_oid;
584         end = min((end + *grow), (__u64)LUSTRE_DATA_SEQ_MAX_WIDTH);
585         *grow = end - fid->f_oid;
586         fid->f_oid += end - fid->f_oid;
587         spin_unlock(&osp->opd_pre_lock);
588
589         CDEBUG(D_INFO, "Expect %d, actual %d ["DFID" -- "DFID"]\n",
590                *grow, i, PFID(fid), PFID(&osp->opd_pre_last_created_fid));
591
592         return *grow > 0 ? 0 : 1;
593 }
594
595 /**
596  * Prepare and send precreate RPC
597  *
598  * The function finds how many objects should be precreated.  Then allocates,
599  * prepares and schedules precreate RPC synchronously. Upon reply the function
600  * wakes up the threads waiting for the new objects on this target. If the
601  * target wasn't able to create all the objects requested, then the next
602  * precreate will be asking for fewer objects (i.e. slow precreate down).
603  *
604  * \param[in] env       LU environment provided by the caller
605  * \param[in] d         OSP device
606  *
607  * \retval 0            on success
608  * \retval negative     negated errno on error
609  **/
610 static int osp_precreate_send(const struct lu_env *env, struct osp_device *d)
611 {
612         struct osp_thread_info  *oti = osp_env_info(env);
613         struct ptlrpc_request   *req;
614         struct obd_import       *imp;
615         struct ost_body         *body;
616         int                      rc, grow, diff;
617         struct lu_fid           *fid = &oti->osi_fid;
618         ENTRY;
619
620         /* don't precreate new objects till OST healthy and has free space */
621         if (unlikely(d->opd_pre_status)) {
622                 CDEBUG(D_INFO, "%s: don't send new precreate: rc = %d\n",
623                        d->opd_obd->obd_name, d->opd_pre_status);
624                 RETURN(0);
625         }
626
627         /*
628          * if not connection/initialization is compeleted, ignore
629          */
630         imp = d->opd_obd->u.cli.cl_import;
631         LASSERT(imp);
632
633         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
634         if (req == NULL)
635                 RETURN(-ENOMEM);
636         req->rq_request_portal = OST_CREATE_PORTAL;
637         /* we should not resend create request - anyway we will have delorphan
638          * and kill these objects */
639         req->rq_no_delay = req->rq_no_resend = 1;
640
641         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
642         if (rc) {
643                 ptlrpc_request_free(req);
644                 RETURN(rc);
645         }
646
647         spin_lock(&d->opd_pre_lock);
648         if (d->opd_pre_create_count > d->opd_pre_max_create_count / 2)
649                 d->opd_pre_create_count = d->opd_pre_max_create_count / 2;
650         grow = d->opd_pre_create_count;
651         spin_unlock(&d->opd_pre_lock);
652
653         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
654         LASSERT(body);
655
656         *fid = d->opd_pre_last_created_fid;
657         rc = osp_precreate_fids(env, d, fid, &grow);
658         if (rc == 1)
659                 /* Current seq has been used up*/
660                 GOTO(out_req, rc = -ENOSPC);
661
662         if (!osp_is_fid_client(d)) {
663                 /* Non-FID client will always send seq 0 because of
664                  * compatiblity */
665                 LASSERTF(fid_is_idif(fid), "Invalid fid "DFID"\n", PFID(fid));
666                 fid->f_seq = 0;
667         }
668
669         fid_to_ostid(fid, &body->oa.o_oi);
670         body->oa.o_valid = OBD_MD_FLGROUP;
671
672         ptlrpc_request_set_replen(req);
673
674         if (OBD_FAIL_CHECK(OBD_FAIL_OSP_FAKE_PRECREATE))
675                 GOTO(ready, rc = 0);
676
677         rc = ptlrpc_queue_wait(req);
678         if (rc) {
679                 CERROR("%s: can't precreate: rc = %d\n", d->opd_obd->obd_name,
680                        rc);
681                 GOTO(out_req, rc);
682         }
683         LASSERT(req->rq_transno == 0);
684
685         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
686         if (body == NULL)
687                 GOTO(out_req, rc = -EPROTO);
688
689         ostid_to_fid(fid, &body->oa.o_oi, d->opd_index);
690
691 ready:
692         if (osp_fid_diff(fid, &d->opd_pre_used_fid) <= 0) {
693                 CERROR("%s: precreate fid "DFID" <= local used fid "DFID
694                        ": rc = %d\n", d->opd_obd->obd_name,
695                        PFID(fid), PFID(&d->opd_pre_used_fid), -ESTALE);
696                 GOTO(out_req, rc = -ESTALE);
697         }
698
699         diff = osp_fid_diff(fid, &d->opd_pre_last_created_fid);
700
701         spin_lock(&d->opd_pre_lock);
702         if (diff < grow) {
703                 /* the OST has not managed to create all the
704                  * objects we asked for */
705                 d->opd_pre_create_count = max(diff, OST_MIN_PRECREATE);
706                 d->opd_pre_create_slow = 1;
707         } else {
708                 /* the OST is able to keep up with the work,
709                  * we could consider increasing create_count
710                  * next time if needed */
711                 d->opd_pre_create_slow = 0;
712         }
713
714         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
715         fid_to_ostid(fid, &body->oa.o_oi);
716
717         d->opd_pre_last_created_fid = *fid;
718         spin_unlock(&d->opd_pre_lock);
719
720         CDEBUG(D_HA, "%s: current precreated pool: "DFID"-"DFID"\n",
721                d->opd_obd->obd_name, PFID(&d->opd_pre_used_fid),
722                PFID(&d->opd_pre_last_created_fid));
723 out_req:
724         /* now we can wakeup all users awaiting for objects */
725         osp_pre_update_status(d, rc);
726         wake_up(&d->opd_pre_user_waitq);
727
728         ptlrpc_req_finished(req);
729         RETURN(rc);
730 }
731
732 /**
733  * Get last precreated object from target (OST)
734  *
735  * Sends synchronous RPC to the target (OST) to learn the last precreated
736  * object. This later is used to remove all unused objects (cleanup orphan
737  * procedure). Also, the next object after one we got will be used as a
738  * starting point for the new precreates.
739  *
740  * \param[in] env       LU environment provided by the caller
741  * \param[in] d         OSP device
742  *
743  * \retval 0            on success
744  * \retval negative     negated errno on error
745  **/
746 static int osp_get_lastfid_from_ost(const struct lu_env *env,
747                                     struct osp_device *d)
748 {
749         struct ptlrpc_request   *req = NULL;
750         struct obd_import       *imp;
751         struct lu_fid           *last_fid;
752         char                    *tmp;
753         int                     rc;
754         ENTRY;
755
756         imp = d->opd_obd->u.cli.cl_import;
757         LASSERT(imp);
758
759         req = ptlrpc_request_alloc(imp, &RQF_OST_GET_INFO_LAST_FID);
760         if (req == NULL)
761                 RETURN(-ENOMEM);
762
763         req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY, RCL_CLIENT,
764                              sizeof(KEY_LAST_FID));
765
766         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
767         if (rc) {
768                 ptlrpc_request_free(req);
769                 RETURN(rc);
770         }
771
772         tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_KEY);
773         memcpy(tmp, KEY_LAST_FID, sizeof(KEY_LAST_FID));
774
775         req->rq_no_delay = req->rq_no_resend = 1;
776         last_fid = req_capsule_client_get(&req->rq_pill, &RMF_FID);
777         fid_cpu_to_le(last_fid, &d->opd_last_used_fid);
778
779         ptlrpc_request_set_replen(req);
780
781         rc = ptlrpc_queue_wait(req);
782         if (rc) {
783                 /* bad-bad OST.. let sysadm sort this out */
784                 if (rc == -ENOTSUPP) {
785                         CERROR("%s: server does not support FID: rc = %d\n",
786                                d->opd_obd->obd_name, -ENOTSUPP);
787                 }
788                 ptlrpc_set_import_active(imp, 0);
789                 GOTO(out, rc);
790         }
791
792         last_fid = req_capsule_server_get(&req->rq_pill, &RMF_FID);
793         if (last_fid == NULL) {
794                 CERROR("%s: Got last_fid failed.\n", d->opd_obd->obd_name);
795                 GOTO(out, rc = -EPROTO);
796         }
797
798         if (!fid_is_sane(last_fid)) {
799                 CERROR("%s: Got insane last_fid "DFID"\n",
800                        d->opd_obd->obd_name, PFID(last_fid));
801                 GOTO(out, rc = -EPROTO);
802         }
803
804         /* Only update the last used fid, if the OST has objects for
805          * this sequence, i.e. fid_oid > 0 */
806         if (fid_oid(last_fid) > 0)
807                 d->opd_last_used_fid = *last_fid;
808
809         CDEBUG(D_HA, "%s: Got last_fid "DFID"\n", d->opd_obd->obd_name,
810                PFID(last_fid));
811
812 out:
813         ptlrpc_req_finished(req);
814         RETURN(rc);
815 }
816
817 /**
818  * Cleanup orphans on OST
819  *
820  * This function is called in a contex of a dedicated thread handling
821  * all the precreation suff. The function waits till local recovery
822  * is complete, then identify all the unreferenced objects (orphans)
823  * using the highest ID referenced by a local and the highest object
824  * precreated by the target. The found range is a subject to removal
825  * using specially flagged RPC. During this process OSP is marked
826  * unavailable for new objects.
827  *
828  * \param[in] env       LU environment provided by the caller
829  * \param[in] d         OSP device
830  *
831  * \retval 0            on success
832  * \retval negative     negated errno on error
833  */
834 static int osp_precreate_cleanup_orphans(struct lu_env *env,
835                                          struct osp_device *d)
836 {
837         struct osp_thread_info  *osi = osp_env_info(env);
838         struct lu_fid           *last_fid = &osi->osi_fid;
839         struct ptlrpc_request   *req = NULL;
840         struct obd_import       *imp;
841         struct ost_body         *body;
842         int                      update_status = 0;
843         int                      rc;
844         int                      diff;
845
846         ENTRY;
847
848         /*
849          * wait for local recovery to finish, so we can cleanup orphans
850          * orphans are all objects since "last used" (assigned), but
851          * there might be objects reserved and in some cases they won't
852          * be used. we can't cleanup them till we're sure they won't be
853          * used. also can't we allow new reservations because they may
854          * end up getting orphans being cleaned up below. so we block
855          * new reservations and wait till all reserved objects either
856          * user or released.
857          */
858         spin_lock(&d->opd_pre_lock);
859         d->opd_pre_recovering = 1;
860         spin_unlock(&d->opd_pre_lock);
861         /*
862          * The locking above makes sure the opd_pre_reserved check below will
863          * catch all osp_precreate_reserve() calls who find
864          * "!opd_pre_recovering".
865          */
866         wait_event_idle(d->opd_pre_waitq,
867                         (!d->opd_pre_reserved && d->opd_recovery_completed) ||
868                         !osp_precreate_running(d) || d->opd_got_disconnected);
869         if (!osp_precreate_running(d) || d->opd_got_disconnected)
870                 GOTO(out, rc = -EAGAIN);
871
872         CDEBUG(D_HA, "%s: going to cleanup orphans since "DFID"\n",
873                d->opd_obd->obd_name, PFID(&d->opd_last_used_fid));
874
875         *last_fid = d->opd_last_used_fid;
876         /* The OSP should already get the valid seq now */
877         LASSERT(!fid_is_zero(last_fid));
878         if (fid_oid(&d->opd_last_used_fid) < 2) {
879                 /* lastfid looks strange... ask OST */
880                 rc = osp_get_lastfid_from_ost(env, d);
881                 if (rc)
882                         GOTO(out, rc);
883         }
884
885         imp = d->opd_obd->u.cli.cl_import;
886         LASSERT(imp);
887
888         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
889         if (req == NULL)
890                 GOTO(out, rc = -ENOMEM);
891
892         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
893         if (rc) {
894                 ptlrpc_request_free(req);
895                 req = NULL;
896                 GOTO(out, rc);
897         }
898
899         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
900         if (body == NULL)
901                 GOTO(out, rc = -EPROTO);
902
903         body->oa.o_flags = OBD_FL_DELORPHAN;
904         body->oa.o_valid = OBD_MD_FLFLAGS | OBD_MD_FLGROUP;
905
906         fid_to_ostid(&d->opd_last_used_fid, &body->oa.o_oi);
907
908         ptlrpc_request_set_replen(req);
909
910         /* Don't resend the delorphan req */
911         req->rq_no_resend = req->rq_no_delay = 1;
912
913         rc = ptlrpc_queue_wait(req);
914         if (rc) {
915                 update_status = 1;
916                 GOTO(out, rc);
917         }
918
919         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
920         if (body == NULL)
921                 GOTO(out, rc = -EPROTO);
922
923         /*
924          * OST provides us with id new pool starts from in body->oa.o_id
925          */
926         ostid_to_fid(last_fid, &body->oa.o_oi, d->opd_index);
927
928         spin_lock(&d->opd_pre_lock);
929         diff = osp_fid_diff(&d->opd_last_used_fid, last_fid);
930         if (diff > 0) {
931                 d->opd_pre_create_count = OST_MIN_PRECREATE + diff;
932                 d->opd_pre_last_created_fid = d->opd_last_used_fid;
933         } else {
934                 d->opd_pre_create_count = OST_MIN_PRECREATE;
935                 d->opd_pre_last_created_fid = *last_fid;
936         }
937         /*
938          * This empties the pre-creation pool and effectively blocks any new
939          * reservations.
940          */
941         LASSERT(fid_oid(&d->opd_pre_last_created_fid) <=
942                 LUSTRE_DATA_SEQ_MAX_WIDTH);
943         d->opd_pre_used_fid = d->opd_pre_last_created_fid;
944         d->opd_pre_create_slow = 0;
945         spin_unlock(&d->opd_pre_lock);
946
947         CDEBUG(D_HA, "%s: Got last_id "DFID" from OST, last_created "DFID
948                "last_used is "DFID"\n", d->opd_obd->obd_name, PFID(last_fid),
949                PFID(&d->opd_pre_last_created_fid), PFID(&d->opd_last_used_fid));
950 out:
951         if (req)
952                 ptlrpc_req_finished(req);
953
954         /*
955          * If rc is zero, the pre-creation window should have been emptied.
956          * Since waking up the herd would be useless without pre-created
957          * objects, we defer the signal to osp_precreate_send() in that case.
958          */
959         if (rc != 0) {
960                 if (update_status) {
961                         CERROR("%s: cannot cleanup orphans: rc = %d\n",
962                                d->opd_obd->obd_name, rc);
963                         /* we can't proceed from here, OST seem to
964                          * be in a bad shape, better to wait for
965                          * a new instance of the server and repeat
966                          * from the beginning. notify possible waiters
967                          * this OSP isn't quite functional yet */
968                         osp_pre_update_status(d, rc);
969                 } else {
970                         wake_up(&d->opd_pre_user_waitq);
971                 }
972         } else {
973                 spin_lock(&d->opd_pre_lock);
974                 d->opd_pre_recovering = 0;
975                 spin_unlock(&d->opd_pre_lock);
976         }
977
978         RETURN(rc);
979 }
980
981 /**
982  * Update precreate status using statfs data
983  *
984  * The function decides whether this OSP should be used for new objects.
985  * IOW, whether this OST is used up or has some free space. Cached statfs
986  * data is used to make this decision. If the latest result of statfs
987  * request (rc argument) is not success, then just mark OSP unavailable
988  * right away.
989  *
990  * The new statfs data is passed in \a msfs and needs to be stored into
991  * opd_statfs, but only after the various flags in os_state are set, so
992  * that the new statfs data is not visible without appropriate flags set.
993  * As such, there is no need to clear the flags here, since this is called
994  * with new statfs data, and they should not be cleared if sent from OST.
995  *
996  * Add a bit of hysteresis so this flag isn't continually flapping, and
997  * ensure that new files don't get extremely fragmented due to only a
998  * small amount of available space in the filesystem.  We want to set
999  * the ENOSPC/ENOINO flags unconditionally when there is less than the
1000  * reserved size free, and still copy them from the old state when there
1001  * is less than 2*reserved size free space or inodes.
1002  *
1003  * \param[in] d         OSP device
1004  * \param[in] msfs      statfs data
1005  */
1006 static void osp_pre_update_msfs(struct osp_device *d, struct obd_statfs *msfs)
1007 {
1008         u32 old_state = d->opd_statfs.os_state;
1009         u32 reserved_ino_low = 32;      /* could be tunable in the future */
1010         u32 reserved_ino_high = reserved_ino_low * 2;
1011         u64 available_mb;
1012
1013         /* statfs structure not initialized yet */
1014         if (unlikely(!msfs->os_type))
1015                 return;
1016
1017         /* if the low and high watermarks have not been initialized yet */
1018         if (unlikely(d->opd_reserved_mb_high == 0 &&
1019                      d->opd_reserved_mb_low == 0)) {
1020                 /* Use ~0.1% by default to disable object allocation,
1021                  * and ~0.2% to enable, size in MB, set both watermark
1022                  */
1023                 spin_lock(&d->opd_pre_lock);
1024                 if (d->opd_reserved_mb_high == 0 &&
1025                     d->opd_reserved_mb_low == 0) {
1026                         d->opd_reserved_mb_low = ((msfs->os_bsize >> 10) *
1027                                                   msfs->os_blocks) >> 20;
1028                         if (d->opd_reserved_mb_low == 0)
1029                                 d->opd_reserved_mb_low = 1;
1030                         d->opd_reserved_mb_high =
1031                                 (d->opd_reserved_mb_low << 1) + 1;
1032                 }
1033                 spin_unlock(&d->opd_pre_lock);
1034         }
1035
1036         available_mb = (msfs->os_bavail * (msfs->os_bsize >> 10)) >> 10;
1037         if (msfs->os_ffree < reserved_ino_low)
1038                 msfs->os_state |= OS_STATE_ENOINO;
1039         else if (msfs->os_ffree <= reserved_ino_high)
1040                 msfs->os_state |= old_state & OS_STATE_ENOINO;
1041         /* else don't clear flags in new msfs->os_state sent from OST */
1042
1043         CDEBUG(D_INFO,
1044                "%s: blocks=%llu free=%llu avail=%llu avail_mb=%llu hwm_mb=%u files=%llu ffree=%llu state=%x: rc = %d\n",
1045                d->opd_obd->obd_name, msfs->os_blocks, msfs->os_bfree,
1046                msfs->os_bavail, available_mb, d->opd_reserved_mb_high,
1047                msfs->os_files, msfs->os_ffree, msfs->os_state,
1048                d->opd_pre_status);
1049         if (available_mb < d->opd_reserved_mb_low)
1050                 msfs->os_state |= OS_STATE_ENOSPC;
1051         else if (available_mb <= d->opd_reserved_mb_high)
1052                 msfs->os_state |= old_state & OS_STATE_ENOSPC;
1053         /* else don't clear flags in new msfs->os_state sent from OST */
1054
1055         if (msfs->os_state & (OS_STATE_ENOINO | OS_STATE_ENOSPC)) {
1056                 d->opd_pre_status = -ENOSPC;
1057                 if (!(old_state & (OS_STATE_ENOINO | OS_STATE_ENOSPC)))
1058                         CDEBUG(D_INFO, "%s: full: state=%x: rc = %x\n",
1059                                d->opd_obd->obd_name, msfs->os_state,
1060                                d->opd_pre_status);
1061                 CDEBUG(D_INFO, "uncommitted changes=%u in_progress=%u\n",
1062                        atomic_read(&d->opd_sync_changes),
1063                        atomic_read(&d->opd_sync_rpcs_in_progress));
1064         } else if (old_state & (OS_STATE_ENOINO | OS_STATE_ENOSPC)) {
1065                 d->opd_pre_status = 0;
1066                 spin_lock(&d->opd_pre_lock);
1067                 d->opd_pre_create_slow = 0;
1068                 d->opd_pre_create_count = OST_MIN_PRECREATE;
1069                 spin_unlock(&d->opd_pre_lock);
1070                 wake_up(&d->opd_pre_waitq);
1071
1072                 CDEBUG(D_INFO,
1073                        "%s: available: state=%x: rc = %d\n",
1074                        d->opd_obd->obd_name, msfs->os_state,
1075                        d->opd_pre_status);
1076         } else {
1077                 /* we only get here if rc == 0 in the caller */
1078                 d->opd_pre_status = 0;
1079         }
1080
1081         /* Object precreation skipped on OST if manually disabled */
1082         if (d->opd_pre_max_create_count == 0)
1083                 msfs->os_state |= OS_STATE_NOPRECREATE;
1084         /* else don't clear flags in new msfs->os_state sent from OST */
1085
1086         /* copy only new statfs state to make it visible to MDS threads */
1087         if (&d->opd_statfs != msfs)
1088                 d->opd_statfs = *msfs;
1089 }
1090
1091 /**
1092  * Initialize FID for precreation
1093  *
1094  * For a just created new target, a new sequence should be taken.
1095  * The function checks there is no IDIF in use (if the target was
1096  * added with the older version of Lustre), then requests a new
1097  * sequence from FLDB using the regular protocol. Then this new
1098  * sequence is stored on a persisten storage synchronously to prevent
1099  * possible object leakage (for the detail see the description for
1100  * osp_precreate_rollover_new_seq()).
1101  *
1102  * \param[in] osp       OSP device
1103  *
1104  * \retval 0            on success
1105  * \retval negative     negated errno on error
1106  */
1107 int osp_init_pre_fid(struct osp_device *osp)
1108 {
1109         struct lu_env           env;
1110         struct osp_thread_info  *osi;
1111         struct lu_client_seq    *cli_seq;
1112         struct lu_fid           *last_fid;
1113         int                     rc;
1114         ENTRY;
1115
1116         LASSERT(osp->opd_pre != NULL);
1117
1118         /* Let's check if the current last_seq/fid is valid,
1119          * otherwise request new sequence from the controller */
1120         if (osp_is_fid_client(osp) && osp->opd_group != 0) {
1121                 /* Non-MDT0 can only use normal sequence for
1122                  * OST objects */
1123                 if (fid_is_norm(&osp->opd_last_used_fid))
1124                         RETURN(0);
1125         } else {
1126                 /* Initially MDT0 will start with IDIF, after
1127                  * that it will request new sequence from the
1128                  * controller */
1129                 if (fid_is_idif(&osp->opd_last_used_fid) ||
1130                     fid_is_norm(&osp->opd_last_used_fid))
1131                         RETURN(0);
1132         }
1133
1134         if (!fid_is_zero(&osp->opd_last_used_fid))
1135                 CWARN("%s: invalid last used fid "DFID
1136                       ", try to get new sequence.\n",
1137                       osp->opd_obd->obd_name,
1138                       PFID(&osp->opd_last_used_fid));
1139
1140         rc = lu_env_init(&env, osp->opd_dt_dev.dd_lu_dev.ld_type->ldt_ctx_tags);
1141         if (rc) {
1142                 CERROR("%s: init env error: rc = %d\n",
1143                        osp->opd_obd->obd_name, rc);
1144                 RETURN(rc);
1145         }
1146
1147         osi = osp_env_info(&env);
1148         last_fid = &osi->osi_fid;
1149         fid_zero(last_fid);
1150         /* For a freshed fs, it will allocate a new sequence first */
1151         if (osp_is_fid_client(osp) && osp->opd_group != 0) {
1152                 cli_seq = osp->opd_obd->u.cli.cl_seq;
1153                 rc = seq_client_get_seq(&env, cli_seq, &last_fid->f_seq);
1154                 if (rc != 0) {
1155                         CERROR("%s: alloc fid error: rc = %d\n",
1156                                osp->opd_obd->obd_name, rc);
1157                         GOTO(out, rc);
1158                 }
1159         } else {
1160                 last_fid->f_seq = fid_idif_seq(0, osp->opd_index);
1161         }
1162         last_fid->f_oid = 1;
1163         last_fid->f_ver = 0;
1164
1165         spin_lock(&osp->opd_pre_lock);
1166         osp->opd_last_used_fid = *last_fid;
1167         osp->opd_pre_used_fid = *last_fid;
1168         osp->opd_pre_last_created_fid = *last_fid;
1169         spin_unlock(&osp->opd_pre_lock);
1170         rc = osp_write_last_oid_seq_files(&env, osp, last_fid, 1);
1171         if (rc != 0) {
1172                 CERROR("%s: write fid error: rc = %d\n",
1173                        osp->opd_obd->obd_name, rc);
1174                 GOTO(out, rc);
1175         }
1176 out:
1177         lu_env_fini(&env);
1178         RETURN(rc);
1179 }
1180
1181 /**
1182  * The core of precreate functionality
1183  *
1184  * The function implements the main precreation loop. Basically it
1185  * involves connecting to the target, precerate FID initialization,
1186  * identifying and removing orphans, then serving precreation. As
1187  * part of the latter, the thread is responsible for statfs data
1188  * updates. The precreation is mostly driven by another threads
1189  * asking for new OST objects - those askers wake the thread when
1190  * the number of precreated objects reach low watermark.
1191  * After a disconnect, the sequence above repeats. This is keep going
1192  * until the thread is requested to stop.
1193  *
1194  * \param[in] _arg      private data the thread (OSP device to handle)
1195  *
1196  * \retval 0            on success
1197  * \retval negative     negated errno on error
1198  */
1199 static int osp_precreate_thread(void *_arg)
1200 {
1201         struct osp_device       *d = _arg;
1202         struct ptlrpc_thread    *thread = &d->opd_pre_thread;
1203         struct l_wait_info       lwi2 = LWI_TIMEOUT(cfs_time_seconds(5),
1204                                                     back_to_sleep, NULL);
1205         struct lu_env            env;
1206         int                      rc;
1207
1208         ENTRY;
1209
1210         rc = lu_env_init(&env, d->opd_dt_dev.dd_lu_dev.ld_type->ldt_ctx_tags);
1211         if (rc) {
1212                 CERROR("%s: init env error: rc = %d\n", d->opd_obd->obd_name,
1213                        rc);
1214
1215                 spin_lock(&d->opd_pre_lock);
1216                 thread->t_flags = SVC_STOPPED;
1217                 spin_unlock(&d->opd_pre_lock);
1218                 wake_up(&thread->t_ctl_waitq);
1219
1220                 RETURN(rc);
1221         }
1222
1223         spin_lock(&d->opd_pre_lock);
1224         thread->t_flags = SVC_RUNNING;
1225         spin_unlock(&d->opd_pre_lock);
1226         wake_up(&thread->t_ctl_waitq);
1227
1228         while (osp_precreate_running(d)) {
1229                 /*
1230                  * need to be connected to OST
1231                  */
1232                 while (osp_precreate_running(d)) {
1233                         if ((d->opd_pre == NULL || d->opd_pre_recovering) &&
1234                             d->opd_imp_connected &&
1235                             !d->opd_got_disconnected)
1236                                 break;
1237                         wait_event_idle(d->opd_pre_waitq,
1238                                         !osp_precreate_running(d) ||
1239                                         d->opd_new_connection);
1240
1241                         if (!d->opd_new_connection)
1242                                 continue;
1243
1244                         d->opd_new_connection = 0;
1245                         d->opd_got_disconnected = 0;
1246                         break;
1247                 }
1248
1249                 if (!osp_precreate_running(d))
1250                         break;
1251
1252                 if (d->opd_pre) {
1253                         LASSERT(d->opd_obd->u.cli.cl_seq != NULL);
1254                         /* Sigh, fid client is not ready yet */
1255                         if (d->opd_obd->u.cli.cl_seq->lcs_exp == NULL)
1256                                 continue;
1257
1258                         /* Init fid for osp_precreate if necessary */
1259                         rc = osp_init_pre_fid(d);
1260                         if (rc != 0) {
1261                                 class_export_put(d->opd_exp);
1262                                 d->opd_obd->u.cli.cl_seq->lcs_exp = NULL;
1263                                 CERROR("%s: init pre fid error: rc = %d\n",
1264                                                 d->opd_obd->obd_name, rc);
1265                                 continue;
1266                         }
1267                 }
1268
1269                 if (osp_statfs_update(&env, d)) {
1270                         l_wait_event(d->opd_pre_waitq,
1271                                      !osp_precreate_running(d), &lwi2);
1272                         continue;
1273                 }
1274
1275                 if (d->opd_pre) {
1276                         /*
1277                          * Clean up orphans or recreate missing objects.
1278                          */
1279                         rc = osp_precreate_cleanup_orphans(&env, d);
1280                         if (rc != 0) {
1281                                 schedule_timeout_interruptible(cfs_time_seconds(1));
1282                                 continue;
1283                         }
1284                 }
1285
1286                 /*
1287                  * connected, can handle precreates now
1288                  */
1289                 while (osp_precreate_running(d)) {
1290                         wait_event_idle(d->opd_pre_waitq,
1291                                         !osp_precreate_running(d) ||
1292                                         osp_precreate_near_empty(&env, d) ||
1293                                         osp_statfs_need_update(d) ||
1294                                         d->opd_got_disconnected);
1295
1296                         if (!osp_precreate_running(d))
1297                                 break;
1298
1299                         /* something happened to the connection
1300                          * have to start from the beginning */
1301                         if (d->opd_got_disconnected)
1302                                 break;
1303
1304                         if (osp_statfs_need_update(d))
1305                                 if (osp_statfs_update(&env, d))
1306                                         break;
1307
1308                         if (d->opd_pre == NULL)
1309                                 continue;
1310
1311                         /* To avoid handling different seq in precreate/orphan
1312                          * cleanup, it will hold precreate until current seq is
1313                          * used up. */
1314                         if (unlikely(osp_precreate_end_seq(&env, d) &&
1315                             !osp_create_end_seq(&env, d)))
1316                                 continue;
1317
1318                         if (unlikely(osp_precreate_end_seq(&env, d) &&
1319                                      osp_create_end_seq(&env, d))) {
1320                                 LCONSOLE_INFO("%s:%#llx is used up."
1321                                               " Update to new seq\n",
1322                                               d->opd_obd->obd_name,
1323                                          fid_seq(&d->opd_pre_last_created_fid));
1324                                 rc = osp_precreate_rollover_new_seq(&env, d);
1325                                 if (rc)
1326                                         continue;
1327                         }
1328
1329                         if (osp_precreate_near_empty(&env, d)) {
1330                                 rc = osp_precreate_send(&env, d);
1331                                 /* osp_precreate_send() sets opd_pre_status
1332                                  * in case of error, that prevent the using of
1333                                  * failed device. */
1334                                 if (rc < 0 && rc != -ENOSPC &&
1335                                     rc != -ETIMEDOUT && rc != -ENOTCONN)
1336                                         CERROR("%s: cannot precreate objects:"
1337                                                " rc = %d\n",
1338                                                d->opd_obd->obd_name, rc);
1339                         }
1340                 }
1341         }
1342
1343         thread->t_flags = SVC_STOPPED;
1344         lu_env_fini(&env);
1345         wake_up(&thread->t_ctl_waitq);
1346
1347         RETURN(0);
1348 }
1349
1350 /**
1351  * Check when to stop to wait for precreate objects.
1352  *
1353  * The caller wanting a new OST object can't wait undefinitely. The
1354  * function checks for few conditions including available new OST
1355  * objects, disconnected OST, lack of space with no pending destroys,
1356  * etc. IOW, it checks whether the current OSP state is good to keep
1357  * waiting or it's better to give up.
1358  *
1359  * \param[in] env       LU environment provided by the caller
1360  * \param[in] d         OSP device
1361  *
1362  * \retval              0 - keep waiting, 1 - no luck
1363  */
1364 static int osp_precreate_ready_condition(const struct lu_env *env,
1365                                          struct osp_device *d)
1366 {
1367         if (d->opd_pre_recovering)
1368                 return 0;
1369
1370         /* ready if got enough precreated objects */
1371         /* we need to wait for others (opd_pre_reserved) and our object (+1) */
1372         if (d->opd_pre_reserved + 1 < osp_objs_precreated(env, d))
1373                 return 1;
1374
1375         /* ready if OST reported no space and no destroys in progress */
1376         if (atomic_read(&d->opd_sync_changes) +
1377             atomic_read(&d->opd_sync_rpcs_in_progress) == 0 &&
1378             d->opd_pre_status == -ENOSPC)
1379                 return 1;
1380
1381         /* Bail out I/O fails to OST */
1382         if (d->opd_pre_status != 0 &&
1383             d->opd_pre_status != -EAGAIN &&
1384             d->opd_pre_status != -ENODEV &&
1385             d->opd_pre_status != -ENOTCONN &&
1386             d->opd_pre_status != -ENOSPC) {
1387                 /* DEBUG LU-3230 */
1388                 if (d->opd_pre_status != -EIO)
1389                         CERROR("%s: precreate failed opd_pre_status %d\n",
1390                                d->opd_obd->obd_name, d->opd_pre_status);
1391                 return 1;
1392         }
1393
1394         return 0;
1395 }
1396
1397 static int osp_precreate_timeout_condition(void *data)
1398 {
1399         struct osp_device *d = data;
1400
1401         CDEBUG(D_HA, "%s: slow creates, last="DFID", next="DFID", "
1402               "reserved=%llu, sync_changes=%u, "
1403               "sync_rpcs_in_progress=%d, status=%d\n",
1404               d->opd_obd->obd_name, PFID(&d->opd_pre_last_created_fid),
1405               PFID(&d->opd_pre_used_fid), d->opd_pre_reserved,
1406               atomic_read(&d->opd_sync_changes),
1407               atomic_read(&d->opd_sync_rpcs_in_progress),
1408               d->opd_pre_status);
1409
1410         return 1;
1411 }
1412
1413 /**
1414  * Reserve object in precreate pool
1415  *
1416  * When the caller wants to create a new object on this target (target
1417  * represented by the given OSP), it should declare this intention using
1418  * a regular ->dt_declare_create() OSD API method. Then OSP will be trying
1419  * to reserve an object in the existing precreated pool or wait up to
1420  * obd_timeout for the available object to appear in the pool (a dedicated
1421  * thread will be doing real precreation in background). The object can be
1422  * consumed later with osp_precreate_get_fid() or be released with call to
1423  * lu_object_put(). Notice the function doesn't reserve a specific ID, just
1424  * some ID. The actual ID assignment happen in osp_precreate_get_fid().
1425  * If the space on the target is short and there is a pending object destroy,
1426  * then the function forces local commit to speedup space release (see
1427  * osp_sync.c for the details).
1428  *
1429  * \param[in] env       LU environment provided by the caller
1430  * \param[in] d         OSP device
1431  *
1432  * \retval              0 on success
1433  * \retval              -ENOSPC when no space on OST
1434  * \retval              -EAGAIN try later, slow precreation in progress
1435  * \retval              -EIO when no access to OST
1436  */
1437 int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d)
1438 {
1439         time64_t expire = ktime_get_seconds() + obd_timeout;
1440         struct l_wait_info lwi;
1441         int precreated, rc, synced = 0;
1442
1443         ENTRY;
1444
1445         LASSERTF(osp_objs_precreated(env, d) >= 0, "Last created FID "DFID
1446                  "Next FID "DFID"\n", PFID(&d->opd_pre_last_created_fid),
1447                  PFID(&d->opd_pre_used_fid));
1448
1449         /* opd_pre_max_create_count 0 to not use specified OST. */
1450         if (d->opd_pre_max_create_count == 0)
1451                 RETURN(-ENOBUFS);
1452
1453         /*
1454          * wait till:
1455          *  - preallocation is done
1456          *  - no free space expected soon
1457          *  - can't connect to OST for too long (obd_timeout)
1458          *  - OST can allocate fid sequence.
1459          */
1460         while ((rc = d->opd_pre_status) == 0 || rc == -ENOSPC ||
1461                 rc == -ENODEV || rc == -EAGAIN || rc == -ENOTCONN) {
1462
1463                 /*
1464                  * increase number of precreations
1465                  */
1466                 precreated = osp_objs_precreated(env, d);
1467                 if (d->opd_pre_create_count < d->opd_pre_max_create_count &&
1468                     d->opd_pre_create_slow == 0 &&
1469                     precreated <= (d->opd_pre_create_count / 4 + 1)) {
1470                         spin_lock(&d->opd_pre_lock);
1471                         d->opd_pre_create_slow = 1;
1472                         d->opd_pre_create_count *= 2;
1473                         spin_unlock(&d->opd_pre_lock);
1474                 }
1475
1476                 spin_lock(&d->opd_pre_lock);
1477                 precreated = osp_objs_precreated(env, d);
1478                 if (precreated > d->opd_pre_reserved &&
1479                     !d->opd_pre_recovering) {
1480                         d->opd_pre_reserved++;
1481                         spin_unlock(&d->opd_pre_lock);
1482                         rc = 0;
1483
1484                         /* XXX: don't wake up if precreation is in progress */
1485                         if (osp_precreate_near_empty_nolock(env, d) &&
1486                            !osp_precreate_end_seq_nolock(env, d))
1487                                 wake_up(&d->opd_pre_waitq);
1488
1489                         break;
1490                 }
1491                 spin_unlock(&d->opd_pre_lock);
1492
1493                 /*
1494                  * all precreated objects have been used and no-space
1495                  * status leave us no chance to succeed very soon
1496                  * but if there is destroy in progress, then we should
1497                  * wait till that is done - some space might be released
1498                  */
1499                 if (unlikely(rc == -ENOSPC)) {
1500                         if (atomic_read(&d->opd_sync_changes) && synced == 0) {
1501                                 /* force local commit to release space */
1502                                 dt_commit_async(env, d->opd_storage);
1503                                 osp_sync_check_for_work(d);
1504                                 synced = 1;
1505                         }
1506                         if (atomic_read(&d->opd_sync_rpcs_in_progress)) {
1507                                 /* just wait till destroys are done */
1508                                 /* see l_wait_even() few lines below */
1509                         }
1510                         if (atomic_read(&d->opd_sync_changes) +
1511                             atomic_read(&d->opd_sync_rpcs_in_progress) == 0) {
1512                                 /* no hope for free space */
1513                                 break;
1514                         }
1515                 }
1516
1517                 /* XXX: don't wake up if precreation is in progress */
1518                 wake_up(&d->opd_pre_waitq);
1519
1520                 lwi = LWI_TIMEOUT(cfs_time_seconds(obd_timeout),
1521                                   osp_precreate_timeout_condition, d);
1522                 if (ktime_get_seconds() >= expire) {
1523                         rc = -ETIMEDOUT;
1524                         break;
1525                 }
1526
1527                 l_wait_event(d->opd_pre_user_waitq,
1528                              osp_precreate_ready_condition(env, d), &lwi);
1529         }
1530
1531         RETURN(rc);
1532 }
1533
1534 /**
1535  * Get a FID from precreation pool
1536  *
1537  * The function is a companion for osp_precreate_reserve() - it assigns
1538  * a specific FID from the precreate. The function should be called only
1539  * if the call to osp_precreate_reserve() was successful. The function
1540  * updates a local storage to remember the highest object ID referenced
1541  * by the node in the given sequence.
1542  *
1543  * A very importan details: this is supposed to be called once the
1544  * transaction is started, so on-disk update will be atomic with the
1545  * data (like LOVEA) refering this object. Then the object won't be leaked:
1546  * either it's referenced by the committed transaction or it's a subject
1547  * to the orphan cleanup procedure.
1548  *
1549  * \param[in] env       LU environment provided by the caller
1550  * \param[in] d         OSP device
1551  * \param[out] fid      generated FID
1552  *
1553  * \retval 0            on success
1554  * \retval negative     negated errno on error
1555  */
1556 int osp_precreate_get_fid(const struct lu_env *env, struct osp_device *d,
1557                           struct lu_fid *fid)
1558 {
1559         struct lu_fid *pre_used_fid = &d->opd_pre_used_fid;
1560         /* grab next id from the pool */
1561         spin_lock(&d->opd_pre_lock);
1562
1563         LASSERTF(osp_fid_diff(&d->opd_pre_used_fid,
1564                              &d->opd_pre_last_created_fid) < 0,
1565                  "next fid "DFID" last created fid "DFID"\n",
1566                  PFID(&d->opd_pre_used_fid),
1567                  PFID(&d->opd_pre_last_created_fid));
1568
1569         /*
1570          * When sequence is used up, new one should be allocated in
1571          * osp_precreate_rollover_new_seq. So ASSERT here to avoid
1572          * objid overflow.
1573          */
1574         LASSERTF(osp_fid_end_seq(env, pre_used_fid) == 0,
1575                  "next fid "DFID" last created fid "DFID"\n",
1576                  PFID(&d->opd_pre_used_fid),
1577                  PFID(&d->opd_pre_last_created_fid));
1578         /* Non IDIF fids shoulnd't get here with oid == 0xFFFFFFFF. */
1579         if (fid_is_idif(pre_used_fid) &&
1580             unlikely(fid_oid(pre_used_fid) == LUSTRE_DATA_SEQ_MAX_WIDTH))
1581                 pre_used_fid->f_seq++;
1582
1583         d->opd_pre_used_fid.f_oid++;
1584         memcpy(fid, &d->opd_pre_used_fid, sizeof(*fid));
1585         d->opd_pre_reserved--;
1586         /*
1587          * last_used_id must be changed along with getting new id otherwise
1588          * we might miscalculate gap causing object loss or leak
1589          */
1590         osp_update_last_fid(d, fid);
1591         spin_unlock(&d->opd_pre_lock);
1592
1593         /*
1594          * probably main thread suspended orphan cleanup till
1595          * all reservations are released, see comment in
1596          * osp_precreate_thread() just before orphan cleanup
1597          */
1598         if (unlikely(d->opd_pre_reserved == 0 &&
1599                      (d->opd_pre_recovering || d->opd_pre_status)))
1600                 wake_up(&d->opd_pre_waitq);
1601
1602         return 0;
1603 }
1604
1605 /*
1606  * Set size regular attribute on an object
1607  *
1608  * When a striping is created late, it's possible that size is already
1609  * initialized on the file. Then the new striping should inherit size
1610  * from the file. The function sets size on the object using the regular
1611  * protocol (OST_PUNCH).
1612  * XXX: should be re-implemented using OUT ?
1613  *
1614  * \param[in] env       LU environment provided by the caller
1615  * \param[in] dt        object
1616  * \param[in] size      size to set.
1617  *
1618  * \retval 0            on success
1619  * \retval negative     negated errno on error
1620  */
1621 int osp_object_truncate(const struct lu_env *env, struct dt_object *dt,
1622                         __u64 size)
1623 {
1624         struct osp_device       *d = lu2osp_dev(dt->do_lu.lo_dev);
1625         struct ptlrpc_request   *req = NULL;
1626         struct obd_import       *imp;
1627         struct ost_body         *body;
1628         struct obdo             *oa = NULL;
1629         int                      rc;
1630
1631         ENTRY;
1632
1633         imp = d->opd_obd->u.cli.cl_import;
1634         LASSERT(imp);
1635
1636         req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
1637         if (req == NULL)
1638                 RETURN(-ENOMEM);
1639
1640         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
1641         if (rc) {
1642                 ptlrpc_request_free(req);
1643                 RETURN(rc);
1644         }
1645
1646         /*
1647          * XXX: decide how do we do here with resend
1648          * if we don't resend, then client may see wrong file size
1649          * if we do resend, then MDS thread can get stuck for quite long
1650          * and if we don't resend, then client will also get -EWOULDBLOCK !!
1651          * (see LU-7975 and sanity/test_27F use cases)
1652          * but let's decide not to resend/delay this truncate request to OST
1653          * and allow Client to decide to resend, in a less agressive way from
1654          * after_reply(), by returning -EINPROGRESS instead of
1655          * -EAGAIN/-EWOULDBLOCK upon return from ptlrpc_queue_wait() at the
1656          * end of this routine
1657          */
1658         req->rq_no_resend = req->rq_no_delay = 1;
1659
1660         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
1661         ptlrpc_at_set_req_timeout(req);
1662
1663         OBD_ALLOC_PTR(oa);
1664         if (oa == NULL)
1665                 GOTO(out, rc = -ENOMEM);
1666
1667         rc = fid_to_ostid(lu_object_fid(&dt->do_lu), &oa->o_oi);
1668         LASSERT(rc == 0);
1669         oa->o_size = size;
1670         oa->o_blocks = OBD_OBJECT_EOF;
1671         oa->o_valid = OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
1672                       OBD_MD_FLID | OBD_MD_FLGROUP;
1673
1674         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
1675         LASSERT(body);
1676         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1677
1678         /* XXX: capa support? */
1679         /* osc_pack_capa(req, body, capa); */
1680
1681         ptlrpc_request_set_replen(req);
1682
1683         rc = ptlrpc_queue_wait(req);
1684         if (rc) {
1685                 /* -EWOULDBLOCK/-EAGAIN means OST is unreachable at the moment
1686                  * since we have decided not to resend/delay, but this could
1687                  * lead to wrong size to be seen at Client side and even process
1688                  * trying to open to exit/fail if not itself handling -EAGAIN.
1689                  * So it should be better to return -EINPROGRESS instead and
1690                  * leave the decision to resend at Client side in after_reply()
1691                  */
1692                 if (rc == -EWOULDBLOCK) {
1693                         rc = -EINPROGRESS;
1694                         CDEBUG(D_HA, "returning -EINPROGRESS instead of "
1695                                "-EWOULDBLOCK/-EAGAIN to allow Client to "
1696                                "resend\n");
1697                 } else {
1698                         CERROR("can't punch object: %d\n", rc);
1699                 }
1700         }
1701 out:
1702         ptlrpc_req_finished(req);
1703         if (oa)
1704                 OBD_FREE_PTR(oa);
1705         RETURN(rc);
1706 }
1707
1708 /**
1709  * Initialize precreation functionality of OSP
1710  *
1711  * Prepares all the internal structures and starts the precreate thread
1712  *
1713  * \param[in] d         OSP device
1714  *
1715  * \retval 0            on success
1716  * \retval negative     negated errno on error
1717  */
1718 int osp_init_precreate(struct osp_device *d)
1719 {
1720         ENTRY;
1721
1722         OBD_ALLOC_PTR(d->opd_pre);
1723         if (d->opd_pre == NULL)
1724                 RETURN(-ENOMEM);
1725
1726         /* initially precreation isn't ready */
1727         init_waitqueue_head(&d->opd_pre_user_waitq);
1728         d->opd_pre_status = -EAGAIN;
1729         fid_zero(&d->opd_pre_used_fid);
1730         d->opd_pre_used_fid.f_oid = 1;
1731         fid_zero(&d->opd_pre_last_created_fid);
1732         d->opd_pre_last_created_fid.f_oid = 1;
1733         d->opd_last_id = 0;
1734         d->opd_pre_reserved = 0;
1735         d->opd_got_disconnected = 1;
1736         d->opd_pre_create_slow = 0;
1737         d->opd_pre_create_count = OST_MIN_PRECREATE;
1738         d->opd_pre_min_create_count = OST_MIN_PRECREATE;
1739         d->opd_pre_max_create_count = OST_MAX_PRECREATE;
1740         d->opd_reserved_mb_high = 0;
1741         d->opd_reserved_mb_low = 0;
1742
1743         RETURN(0);
1744 }
1745
1746 /**
1747  * Finish precreate functionality of OSP
1748  *
1749  *
1750  * Asks all the activity (the thread, update timer) to stop, then
1751  * wait till that is done.
1752  *
1753  * \param[in] d         OSP device
1754  */
1755 void osp_precreate_fini(struct osp_device *d)
1756 {
1757         ENTRY;
1758
1759         if (d->opd_pre == NULL)
1760                 RETURN_EXIT;
1761
1762         OBD_FREE_PTR(d->opd_pre);
1763         d->opd_pre = NULL;
1764
1765         EXIT;
1766 }
1767
1768 int osp_init_statfs(struct osp_device *d)
1769 {
1770         struct task_struct              *task;
1771
1772         ENTRY;
1773
1774         spin_lock_init(&d->opd_pre_lock);
1775         init_waitqueue_head(&d->opd_pre_waitq);
1776         thread_set_flags(&d->opd_pre_thread, SVC_INIT);
1777         init_waitqueue_head(&d->opd_pre_thread.t_ctl_waitq);
1778
1779         /*
1780          * Initialize statfs-related things
1781          */
1782         d->opd_statfs_maxage = 5; /* defaultupdate interval */
1783         d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(),
1784                                                 1000 * NSEC_PER_SEC);
1785         CDEBUG(D_OTHER, "current %lldns, fresh till %lldns\n",
1786                ktime_get_ns(),
1787                ktime_to_ns(d->opd_statfs_fresh_till));
1788         cfs_timer_setup(&d->opd_statfs_timer, osp_statfs_timer_cb,
1789                         (unsigned long)d, 0);
1790
1791         if (d->opd_storage->dd_rdonly)
1792                 RETURN(0);
1793
1794         /*
1795          * start thread handling precreation and statfs updates
1796          */
1797         task = kthread_run(osp_precreate_thread, d,
1798                            "osp-pre-%u-%u", d->opd_index, d->opd_group);
1799         if (IS_ERR(task)) {
1800                 CERROR("can't start precreate thread %ld\n", PTR_ERR(task));
1801                 RETURN(PTR_ERR(task));
1802         }
1803
1804         wait_event_idle(d->opd_pre_thread.t_ctl_waitq,
1805                         osp_precreate_running(d) || osp_precreate_stopped(d));
1806
1807
1808         RETURN(0);
1809 }
1810
1811 void osp_statfs_fini(struct osp_device *d)
1812 {
1813         struct ptlrpc_thread *thread = &d->opd_pre_thread;
1814         ENTRY;
1815
1816         del_timer(&d->opd_statfs_timer);
1817
1818         if (!thread_is_init(thread) && !thread_is_stopped(thread)) {
1819                 thread->t_flags = SVC_STOPPING;
1820                 wake_up(&d->opd_pre_waitq);
1821                 wait_event(thread->t_ctl_waitq, thread_is_stopped(thread));
1822         }
1823
1824         EXIT;
1825 }