Whamcloud - gitweb
ae7e4ef7e851872b685636a905e91fddc92925b6
[fs/lustre-release.git] / lustre / osp / osp_precreate.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2016, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/osp/osp_sync.c
33  *
34  * Lustre OST Proxy Device
35  *
36  * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
37  * Author: Mikhail Pershin <mike.pershin@intel.com>
38  * Author: Di Wang <di.wang@intel.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_MDS
42
43 #include <linux/kthread.h>
44
45 #include <lustre_obdo.h>
46
47 #include "osp_internal.h"
48
49 /*
50  * there are two specific states to take care about:
51  *
52  * = import is disconnected =
53  *
54  * = import is inactive =
55  *   in this case osp_declare_object_create() returns an error
56  *
57  */
58
59 /*
60  **
61  * Check whether statfs data is expired
62  *
63  * OSP device caches statfs data for the target, the function checks
64  * whether the data is expired or not.
65  *
66  * \param[in] d         OSP device
67  *
68  * \retval              0 - not expired, 1 - expired
69  */
70 static inline int osp_statfs_need_update(struct osp_device *d)
71 {
72         return !cfs_time_before(cfs_time_current(),
73                                 d->opd_statfs_fresh_till);
74 }
75
76 /*
77  * OSP tries to maintain pool of available objects so that calls to create
78  * objects don't block most of time
79  *
80  * each time OSP gets connected to OST, we should start from precreation cleanup
81  */
82 static inline bool osp_precreate_running(struct osp_device *d)
83 {
84         return !!(d->opd_pre_thread.t_flags & SVC_RUNNING);
85 }
86
87 static inline bool osp_precreate_stopped(struct osp_device *d)
88 {
89         return !!(d->opd_pre_thread.t_flags & SVC_STOPPED);
90 }
91
92 static void osp_statfs_timer_cb(unsigned long _d)
93 {
94         struct osp_device *d = (struct osp_device *) _d;
95
96         LASSERT(d);
97         if (d->opd_pre != NULL && osp_precreate_running(d))
98                 wake_up(&d->opd_pre_waitq);
99 }
100
101 /**
102  * RPC interpret callback for OST_STATFS RPC
103  *
104  * An interpretation callback called by ptlrpc for OST_STATFS RPC when it is
105  * replied by the target. It's used to maintain statfs cache for the target.
106  * The function fills data from the reply if successful and schedules another
107  * update.
108  *
109  * \param[in] env       LU environment provided by the caller
110  * \param[in] req       RPC replied
111  * \param[in] aa        callback data
112  * \param[in] rc        RPC result
113  *
114  * \retval 0            on success
115  * \retval negative     negated errno on error
116  */
117 static int osp_statfs_interpret(const struct lu_env *env,
118                                 struct ptlrpc_request *req,
119                                 union ptlrpc_async_args *aa, int rc)
120 {
121         struct obd_import       *imp = req->rq_import;
122         struct obd_statfs       *msfs;
123         struct osp_device       *d;
124
125         ENTRY;
126
127         aa = ptlrpc_req_async_args(req);
128         d = aa->pointer_arg[0];
129         LASSERT(d);
130
131         if (rc != 0)
132                 GOTO(out, rc);
133
134         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
135         if (msfs == NULL)
136                 GOTO(out, rc = -EPROTO);
137
138         d->opd_statfs = *msfs;
139
140         osp_pre_update_status(d, rc);
141
142         /* schedule next update */
143         d->opd_statfs_fresh_till = cfs_time_shift(d->opd_statfs_maxage);
144         mod_timer(&d->opd_statfs_timer, d->opd_statfs_fresh_till);
145         d->opd_statfs_update_in_progress = 0;
146
147         CDEBUG(D_CACHE, "updated statfs %p\n", d);
148
149         RETURN(0);
150 out:
151         /* couldn't update statfs, try again as soon as possible */
152         if (d->opd_pre != NULL && osp_precreate_running(d))
153                 wake_up(&d->opd_pre_waitq);
154
155         if (req->rq_import_generation == imp->imp_generation)
156                 CDEBUG(D_CACHE, "%s: couldn't update statfs: rc = %d\n",
157                        d->opd_obd->obd_name, rc);
158         RETURN(rc);
159 }
160
161 /**
162  * Send OST_STATFS RPC
163  *
164  * Sends OST_STATFS RPC to refresh cached statfs data for the target.
165  * Also disables scheduled updates as times OSP may need to refresh
166  * statfs data before expiration. The function doesn't block, instead
167  * an interpretation callback osp_statfs_interpret() is used.
168  *
169  * \param[in] d         OSP device
170  */
171 static int osp_statfs_update(struct osp_device *d)
172 {
173         struct ptlrpc_request   *req;
174         struct obd_import       *imp;
175         union ptlrpc_async_args *aa;
176         int                      rc;
177
178         ENTRY;
179
180         CDEBUG(D_CACHE, "going to update statfs\n");
181
182         imp = d->opd_obd->u.cli.cl_import;
183         LASSERT(imp);
184
185         req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
186         if (req == NULL)
187                 RETURN(-ENOMEM);
188
189         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
190         if (rc) {
191                 ptlrpc_request_free(req);
192                 RETURN(rc);
193         }
194         ptlrpc_request_set_replen(req);
195         req->rq_request_portal = OST_CREATE_PORTAL;
196         ptlrpc_at_set_req_timeout(req);
197
198         req->rq_interpret_reply = (ptlrpc_interpterer_t)osp_statfs_interpret;
199         aa = ptlrpc_req_async_args(req);
200         aa->pointer_arg[0] = d;
201
202         /*
203          * no updates till reply
204          */
205         del_timer(&d->opd_statfs_timer);
206         d->opd_statfs_fresh_till = cfs_time_shift(obd_timeout * 1000);
207         d->opd_statfs_update_in_progress = 1;
208
209         ptlrpcd_add_req(req);
210
211         RETURN(0);
212 }
213
214 /**
215  * Schedule an immediate update for statfs data
216  *
217  * If cached statfs data claim no free space, but OSP has got a request to
218  * destroy an object (so release some space probably), then we may need to
219  * refresh cached statfs data sooner than planned. The function checks there
220  * is no statfs update going and schedules immediate update if so.
221  * XXX: there might be a case where removed object(s) do not add free space (empty
222  * object). If the number of such deletions is high, then we can start to update
223  * statfs too often causing a RPC storm. some throttling is needed...
224  *
225  * \param[in] d         OSP device where statfs data needs to be refreshed
226  */
227 void osp_statfs_need_now(struct osp_device *d)
228 {
229         if (!d->opd_statfs_update_in_progress) {
230                 /*
231                  * if current status is -ENOSPC (lack of free space on OST)
232                  * then we should poll OST immediately once object destroy
233                  * is replied
234                  */
235                 d->opd_statfs_fresh_till = cfs_time_shift(-1);
236                 del_timer(&d->opd_statfs_timer);
237                 wake_up(&d->opd_pre_waitq);
238         }
239 }
240
241 /**
242  * Return number of precreated objects
243  *
244  * A simple helper to calculate the number of precreated objects on the device.
245  *
246  * \param[in] env       LU environment provided by the caller
247  * \param[in] osp       OSP device
248  *
249  * \retval              the number of the precreated objects
250  */
251 static inline int osp_objs_precreated(const struct lu_env *env,
252                                       struct osp_device *osp)
253 {
254         return osp_fid_diff(&osp->opd_pre_last_created_fid,
255                             &osp->opd_pre_used_fid);
256 }
257
258 /**
259  * Check pool of precreated objects is nearly empty
260  *
261  * We should not wait till the pool of the precreated objects is exhausted,
262  * because then there will be a long period of OSP being unavailable for the
263  * new creations due to lenghty precreate RPC. Instead we ask for another
264  * precreation ahead and hopefully have it ready before the current pool is
265  * empty. Notice this function relies on an external locking.
266  *
267  * \param[in] env       LU environment provided by the caller
268  * \param[in] d         OSP device
269  *
270  * \retval              0 - current pool is good enough, 1 - time to precreate
271  */
272 static inline int osp_precreate_near_empty_nolock(const struct lu_env *env,
273                                                   struct osp_device *d)
274 {
275         int window = osp_objs_precreated(env, d);
276
277         /* don't consider new precreation till OST is healty and
278          * has free space */
279         return ((window - d->opd_pre_reserved < d->opd_pre_create_count / 2) &&
280                 (d->opd_pre_status == 0));
281 }
282
283 /**
284  * Check pool of precreated objects
285  *
286  * This is protected version of osp_precreate_near_empty_nolock(), check that
287  * for the details.
288  *
289  * \param[in] env       LU environment provided by the caller
290  * \param[in] d         OSP device
291  *
292  * \retval              0 - current pool is good enough, 1 - time to precreate
293  */
294 static inline int osp_precreate_near_empty(const struct lu_env *env,
295                                            struct osp_device *d)
296 {
297         int rc;
298
299         /* XXX: do we really need locking here? */
300         spin_lock(&d->opd_pre_lock);
301         rc = osp_precreate_near_empty_nolock(env, d);
302         spin_unlock(&d->opd_pre_lock);
303         return rc;
304 }
305
306 /**
307  * Check given sequence is empty
308  *
309  * Returns a binary result whether the given sequence has some IDs left
310  * or not. Find the details in osp_fid_end_seq(). This is a lock protected
311  * version of that function.
312  *
313  * \param[in] env       LU environment provided by the caller
314  * \param[in] osp       OSP device
315  *
316  * \retval              0 - current sequence has no IDs, 1 - otherwise
317  */
318 static inline int osp_create_end_seq(const struct lu_env *env,
319                                      struct osp_device *osp)
320 {
321         struct lu_fid *fid = &osp->opd_pre_used_fid;
322         int rc;
323
324         spin_lock(&osp->opd_pre_lock);
325         rc = osp_fid_end_seq(env, fid);
326         spin_unlock(&osp->opd_pre_lock);
327         return rc;
328 }
329
330 /**
331  * Write FID into into last_oid/last_seq file
332  *
333  * The function stores the sequence and the in-sequence id into two dedicated
334  * files. The sync argument can be used to request synchronous commit, so the
335  * function won't return until the updates are committed.
336  *
337  * \param[in] env       LU environment provided by the caller
338  * \param[in] osp       OSP device
339  * \param[in] fid       fid where sequence/id is taken
340  * \param[in] sync      update mode: 0 - asynchronously, 1 - synchronously
341  *
342  * \retval 0            on success
343  * \retval negative     negated errno on error
344  **/
345 int osp_write_last_oid_seq_files(struct lu_env *env, struct osp_device *osp,
346                                  struct lu_fid *fid, int sync)
347 {
348         struct osp_thread_info  *oti = osp_env_info(env);
349         struct lu_buf      *lb_oid = &oti->osi_lb;
350         struct lu_buf      *lb_oseq = &oti->osi_lb2;
351         loff_t             oid_off;
352         loff_t             oseq_off;
353         struct thandle    *th;
354         int                   rc;
355         ENTRY;
356
357         /* Note: through f_oid is only 32 bits, it will also write 64 bits
358          * for oid to keep compatibility with the previous version. */
359         lb_oid->lb_buf = &fid->f_oid;
360         lb_oid->lb_len = sizeof(u64);
361         oid_off = sizeof(u64) * osp->opd_index;
362
363         lb_oseq->lb_buf = &fid->f_seq;
364         lb_oseq->lb_len = sizeof(u64);
365         oseq_off = sizeof(u64) * osp->opd_index;
366
367         th = dt_trans_create(env, osp->opd_storage);
368         if (IS_ERR(th))
369                 RETURN(PTR_ERR(th));
370
371         th->th_sync |= sync;
372         rc = dt_declare_record_write(env, osp->opd_last_used_oid_file,
373                                      lb_oid, oid_off, th);
374         if (rc != 0)
375                 GOTO(out, rc);
376
377         rc = dt_declare_record_write(env, osp->opd_last_used_seq_file,
378                                      lb_oseq, oseq_off, th);
379         if (rc != 0)
380                 GOTO(out, rc);
381
382         rc = dt_trans_start_local(env, osp->opd_storage, th);
383         if (rc != 0)
384                 GOTO(out, rc);
385
386         rc = dt_record_write(env, osp->opd_last_used_oid_file, lb_oid,
387                              &oid_off, th);
388         if (rc != 0) {
389                 CERROR("%s: can not write to last seq file: rc = %d\n",
390                         osp->opd_obd->obd_name, rc);
391                 GOTO(out, rc);
392         }
393         rc = dt_record_write(env, osp->opd_last_used_seq_file, lb_oseq,
394                              &oseq_off, th);
395         if (rc) {
396                 CERROR("%s: can not write to last seq file: rc = %d\n",
397                         osp->opd_obd->obd_name, rc);
398                 GOTO(out, rc);
399         }
400 out:
401         dt_trans_stop(env, osp->opd_storage, th);
402         RETURN(rc);
403 }
404
405 /**
406  * Switch to another sequence
407  *
408  * When a current sequence has no available IDs left, OSP has to switch to
409  * another new sequence. OSP requests it using the regular FLDB protocol
410  * and stores synchronously before that is used in precreated. This is needed
411  * to basically have the sequences referenced (not orphaned), otherwise it's
412  * possible that OST has some objects precreated and the clients have data
413  * written to it, but after MDT failover nobody refers those objects and OSP
414  * has no idea that the sequence need cleanup to be done.
415  * While this is very expensive operation, it's supposed to happen very very
416  * infrequently because sequence has 2^32 or 2^48 objects (depending on type)
417  *
418  * \param[in] env       LU environment provided by the caller
419  * \param[in] osp       OSP device
420  *
421  * \retval 0            on success
422  * \retval negative     negated errno on error
423  */
424 static int osp_precreate_rollover_new_seq(struct lu_env *env,
425                                           struct osp_device *osp)
426 {
427         struct lu_fid   *fid = &osp_env_info(env)->osi_fid;
428         struct lu_fid   *last_fid = &osp->opd_last_used_fid;
429         int             rc;
430         ENTRY;
431
432         rc = seq_client_get_seq(env, osp->opd_obd->u.cli.cl_seq, &fid->f_seq);
433         if (rc != 0) {
434                 CERROR("%s: alloc fid error: rc = %d\n",
435                        osp->opd_obd->obd_name, rc);
436                 RETURN(rc);
437         }
438
439         fid->f_oid = 1;
440         fid->f_ver = 0;
441         LASSERTF(fid_seq(fid) != fid_seq(last_fid),
442                  "fid "DFID", last_fid "DFID"\n", PFID(fid),
443                  PFID(last_fid));
444
445         rc = osp_write_last_oid_seq_files(env, osp, fid, 1);
446         if (rc != 0) {
447                 CERROR("%s: Can not update oid/seq file: rc = %d\n",
448                        osp->opd_obd->obd_name, rc);
449                 RETURN(rc);
450         }
451
452         LCONSOLE_INFO("%s: update sequence from %#llx to %#llx\n",
453                       osp->opd_obd->obd_name, fid_seq(last_fid),
454                       fid_seq(fid));
455         /* Update last_xxx to the new seq */
456         spin_lock(&osp->opd_pre_lock);
457         osp->opd_last_used_fid = *fid;
458         osp->opd_gap_start_fid = *fid;
459         osp->opd_pre_used_fid = *fid;
460         osp->opd_pre_last_created_fid = *fid;
461         spin_unlock(&osp->opd_pre_lock);
462
463         RETURN(rc);
464 }
465
466 /**
467  * Find IDs available in current sequence
468  *
469  * The function calculates the highest possible ID and the number of IDs
470  * available in the current sequence OSP is using. The number is limited
471  * artifically by the caller (grow param) and the number of IDs available
472  * in the sequence by nature. The function doesn't require an external
473  * locking.
474  *
475  * \param[in] env       LU environment provided by the caller
476  * \param[in] osp       OSP device
477  * \param[in] fid       FID the caller wants to start with
478  * \param[in] grow      how many the caller wants
479  * \param[out] fid      the highest calculated FID
480  * \param[out] grow     the number of available IDs calculated
481  *
482  * \retval              0 on success, 1 - the sequence is empty
483  */
484 static int osp_precreate_fids(const struct lu_env *env, struct osp_device *osp,
485                               struct lu_fid *fid, int *grow)
486 {
487         struct osp_thread_info  *osi = osp_env_info(env);
488         __u64                   end;
489         int                     i = 0;
490
491         if (fid_is_idif(fid)) {
492                 struct lu_fid   *last_fid;
493                 struct ost_id   *oi = &osi->osi_oi;
494
495                 spin_lock(&osp->opd_pre_lock);
496                 last_fid = &osp->opd_pre_last_created_fid;
497                 fid_to_ostid(last_fid, oi);
498                 end = min(ostid_id(oi) + *grow, IDIF_MAX_OID);
499                 *grow = end - ostid_id(oi);
500                 ostid_set_id(oi, ostid_id(oi) + *grow);
501                 spin_unlock(&osp->opd_pre_lock);
502
503                 if (*grow == 0)
504                         return 1;
505
506                 ostid_to_fid(fid, oi, osp->opd_index);
507                 return 0;
508         }
509
510         spin_lock(&osp->opd_pre_lock);
511         *fid = osp->opd_pre_last_created_fid;
512         end = fid->f_oid;
513         end = min((end + *grow), (__u64)LUSTRE_DATA_SEQ_MAX_WIDTH);
514         *grow = end - fid->f_oid;
515         fid->f_oid += end - fid->f_oid;
516         spin_unlock(&osp->opd_pre_lock);
517
518         CDEBUG(D_INFO, "Expect %d, actual %d ["DFID" -- "DFID"]\n",
519                *grow, i, PFID(fid), PFID(&osp->opd_pre_last_created_fid));
520
521         return *grow > 0 ? 0 : 1;
522 }
523
524 /**
525  * Prepare and send precreate RPC
526  *
527  * The function finds how many objects should be precreated.  Then allocates,
528  * prepares and schedules precreate RPC synchronously. Upon reply the function
529  * wake ups the threads waiting for the new objects on this target. If the
530  * target wasn't able to create all the objects requested, then the next
531  * precreate will be asking less objects (i.e. slow precreate down).
532  *
533  * \param[in] env       LU environment provided by the caller
534  * \param[in] d         OSP device
535  *
536  * \retval 0            on success
537  * \retval negative     negated errno on error
538  **/
539 static int osp_precreate_send(const struct lu_env *env, struct osp_device *d)
540 {
541         struct osp_thread_info  *oti = osp_env_info(env);
542         struct ptlrpc_request   *req;
543         struct obd_import       *imp;
544         struct ost_body         *body;
545         int                      rc, grow, diff;
546         struct lu_fid           *fid = &oti->osi_fid;
547         ENTRY;
548
549         /* don't precreate new objects till OST healthy and has free space */
550         if (unlikely(d->opd_pre_status)) {
551                 CDEBUG(D_INFO, "%s: don't send new precreate: rc = %d\n",
552                        d->opd_obd->obd_name, d->opd_pre_status);
553                 RETURN(0);
554         }
555
556         /*
557          * if not connection/initialization is compeleted, ignore
558          */
559         imp = d->opd_obd->u.cli.cl_import;
560         LASSERT(imp);
561
562         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
563         if (req == NULL)
564                 RETURN(-ENOMEM);
565         req->rq_request_portal = OST_CREATE_PORTAL;
566         /* we should not resend create request - anyway we will have delorphan
567          * and kill these objects */
568         req->rq_no_delay = req->rq_no_resend = 1;
569
570         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
571         if (rc) {
572                 ptlrpc_request_free(req);
573                 RETURN(rc);
574         }
575
576         LASSERT(d->opd_pre->osp_pre_delorphan_sent != 0);
577         spin_lock(&d->opd_pre_lock);
578         if (d->opd_pre_create_count > d->opd_pre_max_create_count / 2)
579                 d->opd_pre_create_count = d->opd_pre_max_create_count / 2;
580         grow = d->opd_pre_create_count;
581         spin_unlock(&d->opd_pre_lock);
582
583         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
584         LASSERT(body);
585
586         *fid = d->opd_pre_last_created_fid;
587         rc = osp_precreate_fids(env, d, fid, &grow);
588         if (rc == 1) {
589                 /* Current seq has been used up*/
590                 if (!osp_is_fid_client(d)) {
591                         osp_pre_update_status(d, -ENOSPC);
592                         rc = -ENOSPC;
593                 }
594                 wake_up(&d->opd_pre_waitq);
595                 GOTO(out_req, rc);
596         }
597
598         if (!osp_is_fid_client(d)) {
599                 /* Non-FID client will always send seq 0 because of
600                  * compatiblity */
601                 LASSERTF(fid_is_idif(fid), "Invalid fid "DFID"\n", PFID(fid));
602                 fid->f_seq = 0;
603         }
604
605         fid_to_ostid(fid, &body->oa.o_oi);
606         body->oa.o_valid = OBD_MD_FLGROUP;
607
608         ptlrpc_request_set_replen(req);
609
610         if (OBD_FAIL_CHECK(OBD_FAIL_OSP_FAKE_PRECREATE))
611                 GOTO(ready, rc = 0);
612
613         rc = ptlrpc_queue_wait(req);
614         if (rc) {
615                 CERROR("%s: can't precreate: rc = %d\n", d->opd_obd->obd_name,
616                        rc);
617                 GOTO(out_req, rc);
618         }
619         LASSERT(req->rq_transno == 0);
620
621         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
622         if (body == NULL)
623                 GOTO(out_req, rc = -EPROTO);
624
625         ostid_to_fid(fid, &body->oa.o_oi, d->opd_index);
626
627 ready:
628         if (osp_fid_diff(fid, &d->opd_pre_used_fid) <= 0) {
629                 CERROR("%s: precreate fid "DFID" < local used fid "DFID
630                        ": rc = %d\n", d->opd_obd->obd_name,
631                        PFID(fid), PFID(&d->opd_pre_used_fid), -ESTALE);
632                 GOTO(out_req, rc = -ESTALE);
633         }
634
635         diff = osp_fid_diff(fid, &d->opd_pre_last_created_fid);
636
637         spin_lock(&d->opd_pre_lock);
638         if (diff < grow) {
639                 /* the OST has not managed to create all the
640                  * objects we asked for */
641                 d->opd_pre_create_count = max(diff, OST_MIN_PRECREATE);
642                 d->opd_pre_create_slow = 1;
643         } else {
644                 /* the OST is able to keep up with the work,
645                  * we could consider increasing create_count
646                  * next time if needed */
647                 d->opd_pre_create_slow = 0;
648         }
649
650         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
651         fid_to_ostid(fid, &body->oa.o_oi);
652
653         d->opd_pre_last_created_fid = *fid;
654         spin_unlock(&d->opd_pre_lock);
655
656         CDEBUG(D_HA, "%s: current precreated pool: "DFID"-"DFID"\n",
657                d->opd_obd->obd_name, PFID(&d->opd_pre_used_fid),
658                PFID(&d->opd_pre_last_created_fid));
659 out_req:
660         /* now we can wakeup all users awaiting for objects */
661         osp_pre_update_status(d, rc);
662         wake_up(&d->opd_pre_user_waitq);
663
664         ptlrpc_req_finished(req);
665         RETURN(rc);
666 }
667
668 /**
669  * Get last precreated object from target (OST)
670  *
671  * Sends synchronous RPC to the target (OST) to learn the last precreated
672  * object. This later is used to remove all unused objects (cleanup orphan
673  * procedure). Also, the next object after one we got will be used as a
674  * starting point for the new precreates.
675  *
676  * \param[in] env       LU environment provided by the caller
677  * \param[in] d         OSP device
678  *
679  * \retval 0            on success
680  * \retval negative     negated errno on error
681  **/
682 static int osp_get_lastfid_from_ost(const struct lu_env *env,
683                                     struct osp_device *d)
684 {
685         struct ptlrpc_request   *req = NULL;
686         struct obd_import       *imp;
687         struct lu_fid           *last_fid;
688         char                    *tmp;
689         int                     rc;
690         ENTRY;
691
692         imp = d->opd_obd->u.cli.cl_import;
693         LASSERT(imp);
694
695         req = ptlrpc_request_alloc(imp, &RQF_OST_GET_INFO_LAST_FID);
696         if (req == NULL)
697                 RETURN(-ENOMEM);
698
699         req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY, RCL_CLIENT,
700                              sizeof(KEY_LAST_FID));
701
702         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
703         if (rc) {
704                 ptlrpc_request_free(req);
705                 RETURN(rc);
706         }
707
708         tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_KEY);
709         memcpy(tmp, KEY_LAST_FID, sizeof(KEY_LAST_FID));
710
711         req->rq_no_delay = req->rq_no_resend = 1;
712         last_fid = req_capsule_client_get(&req->rq_pill, &RMF_FID);
713         fid_cpu_to_le(last_fid, &d->opd_last_used_fid);
714
715         ptlrpc_request_set_replen(req);
716
717         rc = ptlrpc_queue_wait(req);
718         if (rc) {
719                 /* bad-bad OST.. let sysadm sort this out */
720                 if (rc == -ENOTSUPP) {
721                         CERROR("%s: server does not support FID: rc = %d\n",
722                                d->opd_obd->obd_name, -ENOTSUPP);
723                 }
724                 ptlrpc_set_import_active(imp, 0);
725                 GOTO(out, rc);
726         }
727
728         last_fid = req_capsule_server_get(&req->rq_pill, &RMF_FID);
729         if (last_fid == NULL) {
730                 CERROR("%s: Got last_fid failed.\n", d->opd_obd->obd_name);
731                 GOTO(out, rc = -EPROTO);
732         }
733
734         if (!fid_is_sane(last_fid)) {
735                 CERROR("%s: Got insane last_fid "DFID"\n",
736                        d->opd_obd->obd_name, PFID(last_fid));
737                 GOTO(out, rc = -EPROTO);
738         }
739
740         /* Only update the last used fid, if the OST has objects for
741          * this sequence, i.e. fid_oid > 0 */
742         if (fid_oid(last_fid) > 0)
743                 d->opd_last_used_fid = *last_fid;
744
745         CDEBUG(D_HA, "%s: Got last_fid "DFID"\n", d->opd_obd->obd_name,
746                PFID(last_fid));
747
748 out:
749         ptlrpc_req_finished(req);
750         RETURN(rc);
751 }
752
753 /**
754  * Cleanup orphans on OST
755  *
756  * This function is called in a contex of a dedicated thread handling
757  * all the precreation suff. The function waits till local recovery
758  * is complete, then identify all the unreferenced objects (orphans)
759  * using the highest ID referenced by a local and the highest object
760  * precreated by the target. The found range is a subject to removal
761  * using specially flagged RPC. During this process OSP is marked
762  * unavailable for new objects.
763  *
764  * \param[in] env       LU environment provided by the caller
765  * \param[in] d         OSP device
766  *
767  * \retval 0            on success
768  * \retval negative     negated errno on error
769  */
770 static int osp_precreate_cleanup_orphans(struct lu_env *env,
771                                          struct osp_device *d)
772 {
773         struct osp_thread_info  *osi = osp_env_info(env);
774         struct lu_fid           *last_fid = &osi->osi_fid;
775         struct ptlrpc_request   *req = NULL;
776         struct obd_import       *imp;
777         struct ost_body         *body;
778         struct l_wait_info       lwi = { 0 };
779         int                      update_status = 0;
780         int                      rc;
781         int                      diff;
782         struct lu_fid            fid;
783
784         ENTRY;
785
786         /*
787          * wait for local recovery to finish, so we can cleanup orphans.
788          * orphans are all objects since "last used" (assigned).
789          * consider reserved objects as created otherwise we can get into
790          * a livelock when one blocked thread holding a reservation can
791          * block recovery. see LU-8367 for the details. in some cases this
792          * can result in gaps (i.e. leaked objects), but we've got LFSCK...
793          *
794          * do not allow new reservations because they may end up getting
795          * orphans being cleaned up below. so we block new reservations.
796          */
797         spin_lock(&d->opd_pre_lock);
798         d->opd_pre_recovering = 1;
799         spin_unlock(&d->opd_pre_lock);
800         /*
801          * The locking above makes sure the opd_pre_reserved check below will
802          * catch all osp_precreate_reserve() calls who find
803          * "!opd_pre_recovering".
804          */
805         l_wait_event(d->opd_pre_waitq, d->opd_recovery_completed ||
806                      !osp_precreate_running(d) || d->opd_got_disconnected,
807                      &lwi);
808         if (!osp_precreate_running(d) || d->opd_got_disconnected)
809                 GOTO(out, rc = -EAGAIN);
810
811         *last_fid = d->opd_last_used_fid;
812         /* The OSP should already get the valid seq now */
813         LASSERT(!fid_is_zero(last_fid));
814         if (fid_oid(&d->opd_last_used_fid) < 2) {
815                 /* lastfid looks strange... ask OST */
816                 rc = osp_get_lastfid_from_ost(env, d);
817                 if (rc)
818                         GOTO(out, rc);
819         }
820
821         imp = d->opd_obd->u.cli.cl_import;
822         LASSERT(imp);
823
824         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
825         if (req == NULL)
826                 GOTO(out, rc = -ENOMEM);
827
828         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
829         if (rc) {
830                 ptlrpc_request_free(req);
831                 req = NULL;
832                 GOTO(out, rc);
833         }
834
835         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
836         if (body == NULL)
837                 GOTO(out, rc = -EPROTO);
838
839         body->oa.o_flags = 0;
840         body->oa.o_valid = OBD_MD_FLFLAGS | OBD_MD_FLGROUP;
841
842         /* unless this is the very first DELORPHAN (when we really
843          * can destroy some orphans), just tell OST to recreate
844          * missing objects in our precreate pool */
845         spin_lock(&d->opd_pre_lock);
846         if (d->opd_pre->osp_pre_delorphan_sent) {
847                 fid = d->opd_pre_last_created_fid;
848         } else {
849                 fid = d->opd_last_used_fid;
850                 body->oa.o_flags = OBD_FL_DELORPHAN;
851         }
852         spin_unlock(&d->opd_pre_lock);
853         fid_to_ostid(&fid, &body->oa.o_oi);
854
855         CDEBUG(D_HA, "%s: going to cleanup orphans since "DFID"\n",
856                d->opd_obd->obd_name, PFID(&fid));
857
858         ptlrpc_request_set_replen(req);
859
860         /* Don't resend the delorphan req */
861         req->rq_no_resend = req->rq_no_delay = 1;
862
863         rc = ptlrpc_queue_wait(req);
864         if (rc) {
865                 update_status = 1;
866                 GOTO(out, rc);
867         }
868
869         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
870         if (body == NULL)
871                 GOTO(out, rc = -EPROTO);
872
873         /*
874          * OST provides us with id new pool starts from in body->oa.o_id
875          */
876         ostid_to_fid(last_fid, &body->oa.o_oi, d->opd_index);
877
878         spin_lock(&d->opd_pre_lock);
879         diff = osp_fid_diff(&fid, last_fid);
880         if (diff > 0) {
881                 d->opd_pre_create_count = OST_MIN_PRECREATE + diff;
882                 d->opd_pre_last_created_fid = *last_fid;
883         } else {
884                 d->opd_pre_create_count = OST_MIN_PRECREATE;
885                 d->opd_pre_last_created_fid = *last_fid;
886         }
887         /*
888          * This empties the pre-creation pool and effectively blocks any new
889          * reservations.
890          */
891         LASSERT(fid_oid(&d->opd_pre_last_created_fid) <=
892                 LUSTRE_DATA_SEQ_MAX_WIDTH);
893         if (d->opd_pre->osp_pre_delorphan_sent == 0)
894                 d->opd_pre_used_fid = d->opd_pre_last_created_fid;
895         d->opd_pre_create_slow = 0;
896         spin_unlock(&d->opd_pre_lock);
897         d->opd_pre->osp_pre_delorphan_sent = 1;
898
899         CDEBUG(D_HA, "%s: Got last_id "DFID" from OST, last_created "DFID
900                "last_used is "DFID"\n", d->opd_obd->obd_name, PFID(last_fid),
901                PFID(&d->opd_pre_last_created_fid), PFID(&d->opd_last_used_fid));
902 out:
903         if (req)
904                 ptlrpc_req_finished(req);
905
906         /*
907          * If rc is zero, the pre-creation window should have been emptied.
908          * Since waking up the herd would be useless without pre-created
909          * objects, we defer the signal to osp_precreate_send() in that case.
910          */
911         if (rc != 0) {
912                 if (update_status) {
913                         CERROR("%s: cannot cleanup orphans: rc = %d\n",
914                                d->opd_obd->obd_name, rc);
915                         /* we can't proceed from here, OST seem to
916                          * be in a bad shape, better to wait for
917                          * a new instance of the server and repeat
918                          * from the beginning. notify possible waiters
919                          * this OSP isn't quite functional yet */
920                         osp_pre_update_status(d, rc);
921                 } else {
922                         wake_up(&d->opd_pre_user_waitq);
923                 }
924         } else {
925                 spin_lock(&d->opd_pre_lock);
926                 d->opd_pre_recovering = 0;
927                 spin_unlock(&d->opd_pre_lock);
928         }
929
930         RETURN(rc);
931 }
932
933 /**
934  * Update precreate status using statfs data
935  *
936  * The function decides whether this OSP should be used for new objects.
937  * IOW, whether this OST is used up or has some free space. Cached statfs
938  * data is used to make this decision. If the latest result of statfs
939  * request (rc argument) is not success, then just mark OSP unavailable
940  * right away.
941
942  * Add a bit of hysteresis so this flag isn't continually flapping,
943  * and ensure that new files don't get extremely fragmented due to
944  * only a small amount of available space in the filesystem.
945  * We want to set the ENOSPC when there is less than reserved size
946  * free and clear it when there is at least 2*reserved size free space.
947  * the function updates current precreation status used: functional or not
948  *
949  * \param[in] d         OSP device
950  * \param[in] rc        new precreate status for device \a d
951  *
952  * \retval 0            on success
953  * \retval negative     negated errno on error
954  */
955 void osp_pre_update_status(struct osp_device *d, int rc)
956 {
957         struct obd_statfs       *msfs = &d->opd_statfs;
958         int                      old = d->opd_pre_status;
959         __u64                    available;
960
961         d->opd_pre_status = rc;
962         if (rc)
963                 goto out;
964
965         if (likely(msfs->os_type)) {
966                 if (unlikely(d->opd_reserved_mb_high == 0 &&
967                              d->opd_reserved_mb_low == 0)) {
968                         /* Use ~0.1% by default to disable object allocation,
969                          * and ~0.2% to enable, size in MB, set both watermark
970                          */
971                         spin_lock(&d->opd_pre_lock);
972                         if (d->opd_reserved_mb_high == 0 &&
973                             d->opd_reserved_mb_low == 0) {
974                                 d->opd_reserved_mb_low =
975                                         ((msfs->os_bsize >> 10) *
976                                         msfs->os_blocks) >> 20;
977                                 if (d->opd_reserved_mb_low == 0)
978                                         d->opd_reserved_mb_low = 1;
979                                 d->opd_reserved_mb_high =
980                                         (d->opd_reserved_mb_low << 1) + 1;
981                         }
982                         spin_unlock(&d->opd_pre_lock);
983                 }
984                 /* in MB */
985                 available = (msfs->os_bavail * (msfs->os_bsize >> 10)) >> 10;
986                 if (msfs->os_ffree < 32)
987                         msfs->os_state |= OS_STATE_ENOINO;
988                 else if (msfs->os_ffree > 64)
989                         msfs->os_state &= ~OS_STATE_ENOINO;
990
991                 if (available < d->opd_reserved_mb_low)
992                         msfs->os_state |= OS_STATE_ENOSPC;
993                 else if (available > d->opd_reserved_mb_high)
994                         msfs->os_state &= ~OS_STATE_ENOSPC;
995                 if (msfs->os_state & (OS_STATE_ENOINO | OS_STATE_ENOSPC)) {
996                         d->opd_pre_status = -ENOSPC;
997                         if (old != -ENOSPC)
998                                 CDEBUG(D_INFO, "%s: status: %llu blocks, %llu "
999                                        "free, %llu avail, %llu MB avail, %u "
1000                                        "hwm -> %d: rc = %d\n",
1001                                        d->opd_obd->obd_name, msfs->os_blocks,
1002                                        msfs->os_bfree, msfs->os_bavail,
1003                                        available, d->opd_reserved_mb_high,
1004                                        d->opd_pre_status, rc);
1005                         CDEBUG(D_INFO,
1006                                "non-committed changes: %u, in progress: %u\n",
1007                                atomic_read(&d->opd_syn_changes),
1008                                atomic_read(&d->opd_syn_rpc_in_progress));
1009                 } else if (unlikely(old == -ENOSPC)) {
1010                         d->opd_pre_status = 0;
1011                         spin_lock(&d->opd_pre_lock);
1012                         d->opd_pre_create_slow = 0;
1013                         d->opd_pre_create_count = OST_MIN_PRECREATE;
1014                         spin_unlock(&d->opd_pre_lock);
1015                         wake_up(&d->opd_pre_waitq);
1016
1017                         CDEBUG(D_INFO, "%s: space available: %llu blocks, %llu"
1018                                " free, %llu avail, %lluMB avail, %u lwm"
1019                                " -> %d: rc = %d\n", d->opd_obd->obd_name,
1020                                msfs->os_blocks, msfs->os_bfree, msfs->os_bavail,
1021                                available, d->opd_reserved_mb_low,
1022                                d->opd_pre_status, rc);
1023                 }
1024         }
1025 out:
1026         wake_up(&d->opd_pre_user_waitq);
1027 }
1028
1029 /**
1030  * Initialize FID for precreation
1031  *
1032  * For a just created new target, a new sequence should be taken.
1033  * The function checks there is no IDIF in use (if the target was
1034  * added with the older version of Lustre), then requests a new
1035  * sequence from FLDB using the regular protocol. Then this new
1036  * sequence is stored on a persisten storage synchronously to prevent
1037  * possible object leakage (for the detail see the description for
1038  * osp_precreate_rollover_new_seq()).
1039  *
1040  * \param[in] osp       OSP device
1041  *
1042  * \retval 0            on success
1043  * \retval negative     negated errno on error
1044  */
1045 int osp_init_pre_fid(struct osp_device *osp)
1046 {
1047         struct lu_env           env;
1048         struct osp_thread_info  *osi;
1049         struct lu_client_seq    *cli_seq;
1050         struct lu_fid           *last_fid;
1051         int                     rc;
1052         ENTRY;
1053
1054         LASSERT(osp->opd_pre != NULL);
1055
1056         /* Let's check if the current last_seq/fid is valid,
1057          * otherwise request new sequence from the controller */
1058         if (osp_is_fid_client(osp) && osp->opd_group != 0) {
1059                 /* Non-MDT0 can only use normal sequence for
1060                  * OST objects */
1061                 if (fid_is_norm(&osp->opd_last_used_fid))
1062                         RETURN(0);
1063         } else {
1064                 /* Initially MDT0 will start with IDIF, after
1065                  * that it will request new sequence from the
1066                  * controller */
1067                 if (fid_is_idif(&osp->opd_last_used_fid) ||
1068                     fid_is_norm(&osp->opd_last_used_fid))
1069                         RETURN(0);
1070         }
1071
1072         if (!fid_is_zero(&osp->opd_last_used_fid))
1073                 CWARN("%s: invalid last used fid "DFID
1074                       ", try to get new sequence.\n",
1075                       osp->opd_obd->obd_name,
1076                       PFID(&osp->opd_last_used_fid));
1077
1078         rc = lu_env_init(&env, osp->opd_dt_dev.dd_lu_dev.ld_type->ldt_ctx_tags);
1079         if (rc) {
1080                 CERROR("%s: init env error: rc = %d\n",
1081                        osp->opd_obd->obd_name, rc);
1082                 RETURN(rc);
1083         }
1084
1085         osi = osp_env_info(&env);
1086         last_fid = &osi->osi_fid;
1087         fid_zero(last_fid);
1088         /* For a freshed fs, it will allocate a new sequence first */
1089         if (osp_is_fid_client(osp) && osp->opd_group != 0) {
1090                 cli_seq = osp->opd_obd->u.cli.cl_seq;
1091                 rc = seq_client_get_seq(&env, cli_seq, &last_fid->f_seq);
1092                 if (rc != 0) {
1093                         CERROR("%s: alloc fid error: rc = %d\n",
1094                                osp->opd_obd->obd_name, rc);
1095                         GOTO(out, rc);
1096                 }
1097         } else {
1098                 last_fid->f_seq = fid_idif_seq(0, osp->opd_index);
1099         }
1100         last_fid->f_oid = 1;
1101         last_fid->f_ver = 0;
1102
1103         spin_lock(&osp->opd_pre_lock);
1104         osp->opd_last_used_fid = *last_fid;
1105         osp->opd_pre_used_fid = *last_fid;
1106         osp->opd_pre_last_created_fid = *last_fid;
1107         spin_unlock(&osp->opd_pre_lock);
1108         rc = osp_write_last_oid_seq_files(&env, osp, last_fid, 1);
1109         if (rc != 0) {
1110                 CERROR("%s: write fid error: rc = %d\n",
1111                        osp->opd_obd->obd_name, rc);
1112                 GOTO(out, rc);
1113         }
1114 out:
1115         lu_env_fini(&env);
1116         RETURN(rc);
1117 }
1118
1119 /**
1120  * The core of precreate functionality
1121  *
1122  * The function implements the main precreation loop. Basically it
1123  * involves connecting to the target, precerate FID initialization,
1124  * identifying and removing orphans, then serving precreation. As
1125  * part of the latter, the thread is responsible for statfs data
1126  * updates. The precreation is mostly driven by another threads
1127  * asking for new OST objects - those askers wake the thread when
1128  * the number of precreated objects reach low watermark.
1129  * After a disconnect, the sequence above repeats. This is keep going
1130  * until the thread is requested to stop.
1131  *
1132  * \param[in] _arg      private data the thread (OSP device to handle)
1133  *
1134  * \retval 0            on success
1135  * \retval negative     negated errno on error
1136  */
1137 static int osp_precreate_thread(void *_arg)
1138 {
1139         struct osp_device       *d = _arg;
1140         struct ptlrpc_thread    *thread = &d->opd_pre_thread;
1141         struct l_wait_info       lwi = { 0 };
1142         struct l_wait_info       lwi2 = LWI_TIMEOUT(cfs_time_seconds(5),
1143                                                     back_to_sleep, NULL);
1144         struct lu_env            env;
1145         int                      rc;
1146
1147         ENTRY;
1148
1149         rc = lu_env_init(&env, d->opd_dt_dev.dd_lu_dev.ld_type->ldt_ctx_tags);
1150         if (rc) {
1151                 CERROR("%s: init env error: rc = %d\n", d->opd_obd->obd_name,
1152                        rc);
1153                 RETURN(rc);
1154         }
1155
1156         spin_lock(&d->opd_pre_lock);
1157         thread->t_flags = SVC_RUNNING;
1158         spin_unlock(&d->opd_pre_lock);
1159         wake_up(&thread->t_ctl_waitq);
1160
1161         while (osp_precreate_running(d)) {
1162                 /*
1163                  * need to be connected to OST
1164                  */
1165                 while (osp_precreate_running(d)) {
1166                         if (d->opd_pre_recovering &&
1167                             d->opd_imp_connected &&
1168                             !d->opd_got_disconnected)
1169                                 break;
1170                         l_wait_event(d->opd_pre_waitq,
1171                                      !osp_precreate_running(d) ||
1172                                      d->opd_new_connection,
1173                                      &lwi);
1174
1175                         if (!d->opd_new_connection)
1176                                 continue;
1177
1178                         d->opd_new_connection = 0;
1179                         d->opd_got_disconnected = 0;
1180                         break;
1181                 }
1182
1183                 if (!osp_precreate_running(d))
1184                         break;
1185
1186                 LASSERT(d->opd_obd->u.cli.cl_seq != NULL);
1187                 /* Sigh, fid client is not ready yet */
1188                 if (d->opd_obd->u.cli.cl_seq->lcs_exp == NULL)
1189                         continue;
1190
1191                 /* Init fid for osp_precreate if necessary */
1192                 rc = osp_init_pre_fid(d);
1193                 if (rc != 0) {
1194                         class_export_put(d->opd_exp);
1195                         d->opd_obd->u.cli.cl_seq->lcs_exp = NULL;
1196                         CERROR("%s: init pre fid error: rc = %d\n",
1197                                d->opd_obd->obd_name, rc);
1198                         continue;
1199                 }
1200
1201                 if (osp_statfs_update(d)) {
1202                         l_wait_event(d->opd_pre_waitq,
1203                                      !osp_precreate_running(d), &lwi2);
1204                         continue;
1205                 }
1206
1207                 /*
1208                  * Clean up orphans or recreate missing objects.
1209                  */
1210                 rc = osp_precreate_cleanup_orphans(&env, d);
1211                 if (rc != 0) {
1212                         schedule_timeout_interruptible(cfs_time_seconds(1));
1213                         continue;
1214                 }
1215                 /*
1216                  * connected, can handle precreates now
1217                  */
1218                 while (osp_precreate_running(d)) {
1219                         l_wait_event(d->opd_pre_waitq,
1220                                      !osp_precreate_running(d) ||
1221                                      osp_precreate_near_empty(&env, d) ||
1222                                      osp_statfs_need_update(d) ||
1223                                      d->opd_got_disconnected, &lwi);
1224
1225                         if (!osp_precreate_running(d))
1226                                 break;
1227
1228                         /* something happened to the connection
1229                          * have to start from the beginning */
1230                         if (d->opd_got_disconnected)
1231                                 break;
1232
1233                         if (osp_statfs_need_update(d))
1234                                 if (osp_statfs_update(d))
1235                                         break;
1236
1237                         /* To avoid handling different seq in precreate/orphan
1238                          * cleanup, it will hold precreate until current seq is
1239                          * used up. */
1240                         if (unlikely(osp_precreate_end_seq(&env, d) &&
1241                             !osp_create_end_seq(&env, d)))
1242                                 continue;
1243
1244                         if (unlikely(osp_precreate_end_seq(&env, d) &&
1245                                      osp_create_end_seq(&env, d))) {
1246                                 LCONSOLE_INFO("%s:%#llx is used up."
1247                                               " Update to new seq\n",
1248                                               d->opd_obd->obd_name,
1249                                          fid_seq(&d->opd_pre_last_created_fid));
1250                                 rc = osp_precreate_rollover_new_seq(&env, d);
1251                                 if (rc)
1252                                         continue;
1253                         }
1254
1255                         if (osp_precreate_near_empty(&env, d)) {
1256                                 rc = osp_precreate_send(&env, d);
1257                                 /* osp_precreate_send() sets opd_pre_status
1258                                  * in case of error, that prevent the using of
1259                                  * failed device. */
1260                                 if (rc < 0 && rc != -ENOSPC &&
1261                                     rc != -ETIMEDOUT && rc != -ENOTCONN)
1262                                         CERROR("%s: cannot precreate objects:"
1263                                                " rc = %d\n",
1264                                                d->opd_obd->obd_name, rc);
1265                         }
1266                 }
1267         }
1268
1269         thread->t_flags = SVC_STOPPED;
1270         lu_env_fini(&env);
1271         wake_up(&thread->t_ctl_waitq);
1272
1273         RETURN(0);
1274 }
1275
1276 /**
1277  * Check when to stop to wait for precreate objects.
1278  *
1279  * The caller wanting a new OST object can't wait undefinitely. The
1280  * function checks for few conditions including available new OST
1281  * objects, disconnected OST, lack of space with no pending destroys,
1282  * etc. IOW, it checks whether the current OSP state is good to keep
1283  * waiting or it's better to give up.
1284  *
1285  * \param[in] env       LU environment provided by the caller
1286  * \param[in] d         OSP device
1287  *
1288  * \retval              0 - keep waiting, 1 - no luck
1289  */
1290 static int osp_precreate_ready_condition(const struct lu_env *env,
1291                                          struct osp_device *d)
1292 {
1293         if (d->opd_pre_recovering)
1294                 return 0;
1295
1296         /* ready if got enough precreated objects */
1297         /* we need to wait for others (opd_pre_reserved) and our object (+1) */
1298         if (d->opd_pre_reserved + 1 < osp_objs_precreated(env, d))
1299                 return 1;
1300
1301         /* ready if OST reported no space and no destroys in progress */
1302         if (atomic_read(&d->opd_syn_changes) +
1303             atomic_read(&d->opd_syn_rpc_in_progress) == 0 &&
1304             d->opd_pre_status == -ENOSPC)
1305                 return 1;
1306
1307         /* Bail out I/O fails to OST */
1308         if (d->opd_pre_status != 0 &&
1309             d->opd_pre_status != -EAGAIN &&
1310             d->opd_pre_status != -ENODEV &&
1311             d->opd_pre_status != -ENOTCONN &&
1312             d->opd_pre_status != -ENOSPC) {
1313                 /* DEBUG LU-3230 */
1314                 if (d->opd_pre_status != -EIO)
1315                         CERROR("%s: precreate failed opd_pre_status %d\n",
1316                                d->opd_obd->obd_name, d->opd_pre_status);
1317                 return 1;
1318         }
1319
1320         return 0;
1321 }
1322
1323 static int osp_precreate_timeout_condition(void *data)
1324 {
1325         struct osp_device *d = data;
1326
1327         CDEBUG(D_HA, "%s: slow creates, last="DFID", next="DFID", "
1328               "reserved=%llu, syn_changes=%u, "
1329               "syn_rpc_in_progress=%d, status=%d\n",
1330               d->opd_obd->obd_name, PFID(&d->opd_pre_last_created_fid),
1331               PFID(&d->opd_pre_used_fid), d->opd_pre_reserved,
1332               atomic_read(&d->opd_syn_changes),
1333               atomic_read(&d->opd_syn_rpc_in_progress),
1334               d->opd_pre_status);
1335
1336         return 1;
1337 }
1338
1339 /**
1340  * Reserve object in precreate pool
1341  *
1342  * When the caller wants to create a new object on this target (target
1343  * represented by the given OSP), it should declare this intention using
1344  * a regular ->dt_declare_create() OSD API method. Then OSP will be trying
1345  * to reserve an object in the existing precreated pool or wait up to
1346  * obd_timeout for the available object to appear in the pool (a dedicated
1347  * thread will be doing real precreation in background). The object can be
1348  * consumed later with osp_precreate_get_fid() or be released with call to
1349  * lu_object_put(). Notice the function doesn't reserve a specific ID, just
1350  * some ID. The actual ID assignment happen in osp_precreate_get_fid().
1351  * If the space on the target is short and there is a pending object destroy,
1352  * then the function forces local commit to speedup space release (see
1353  * osp_sync.c for the details).
1354  *
1355  * \param[in] env       LU environment provided by the caller
1356  * \param[in] d         OSP device
1357  *
1358  * \retval              0 on success
1359  * \retval              -ENOSPC when no space on OST
1360  * \retval              -EAGAIN try later, slow precreation in progress
1361  * \retval              -EIO when no access to OST
1362  */
1363 int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d)
1364 {
1365         struct l_wait_info       lwi;
1366         cfs_time_t               expire = cfs_time_shift(obd_timeout);
1367         int                      precreated, rc;
1368
1369         ENTRY;
1370
1371         LASSERTF(osp_objs_precreated(env, d) >= 0, "Last created FID "DFID
1372                  "Next FID "DFID"\n", PFID(&d->opd_pre_last_created_fid),
1373                  PFID(&d->opd_pre_used_fid));
1374
1375         /* opd_pre_max_create_count 0 to not use specified OST. */
1376         if (d->opd_pre_max_create_count == 0)
1377                 RETURN(-ENOBUFS);
1378
1379         if (OBD_FAIL_PRECHECK(OBD_FAIL_MDS_OSP_PRECREATE_WAIT)) {
1380                 if (d->opd_index == cfs_fail_val)
1381                         OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_OSP_PRECREATE_WAIT,
1382                                          obd_timeout);
1383         }
1384
1385         /*
1386          * wait till:
1387          *  - preallocation is done
1388          *  - no free space expected soon
1389          *  - can't connect to OST for too long (obd_timeout)
1390          *  - OST can allocate fid sequence.
1391          */
1392         while ((rc = d->opd_pre_status) == 0 || rc == -ENOSPC ||
1393                 rc == -ENODEV || rc == -EAGAIN || rc == -ENOTCONN) {
1394
1395                 /*
1396                  * increase number of precreations
1397                  */
1398                 precreated = osp_objs_precreated(env, d);
1399                 if (d->opd_pre_create_count < d->opd_pre_max_create_count &&
1400                     d->opd_pre_create_slow == 0 &&
1401                     precreated <= (d->opd_pre_create_count / 4 + 1)) {
1402                         spin_lock(&d->opd_pre_lock);
1403                         d->opd_pre_create_slow = 1;
1404                         d->opd_pre_create_count *= 2;
1405                         spin_unlock(&d->opd_pre_lock);
1406                 }
1407
1408                 spin_lock(&d->opd_pre_lock);
1409                 precreated = osp_objs_precreated(env, d);
1410                 if (precreated > d->opd_pre_reserved &&
1411                     !d->opd_pre_recovering) {
1412                         d->opd_pre_reserved++;
1413                         spin_unlock(&d->opd_pre_lock);
1414                         rc = 0;
1415
1416                         /* XXX: don't wake up if precreation is in progress */
1417                         if (osp_precreate_near_empty_nolock(env, d) &&
1418                            !osp_precreate_end_seq_nolock(env, d))
1419                                 wake_up(&d->opd_pre_waitq);
1420
1421                         break;
1422                 }
1423                 spin_unlock(&d->opd_pre_lock);
1424
1425                 /*
1426                  * all precreated objects have been used and no-space
1427                  * status leave us no chance to succeed very soon
1428                  * but if there is destroy in progress, then we should
1429                  * wait till that is done - some space might be released
1430                  */
1431                 if (unlikely(rc == -ENOSPC)) {
1432                         if (atomic_read(&d->opd_syn_changes)) {
1433                                 /* force local commit to release space */
1434                                 dt_commit_async(env, d->opd_storage);
1435                         }
1436                         if (atomic_read(&d->opd_syn_rpc_in_progress)) {
1437                                 /* just wait till destroys are done */
1438                                 /* see l_wait_even() few lines below */
1439                         }
1440                         if (atomic_read(&d->opd_syn_changes) +
1441                             atomic_read(&d->opd_syn_rpc_in_progress) == 0) {
1442                                 /* no hope for free space */
1443                                 break;
1444                         }
1445                 }
1446
1447                 /* XXX: don't wake up if precreation is in progress */
1448                 wake_up(&d->opd_pre_waitq);
1449
1450                 lwi = LWI_TIMEOUT(expire - cfs_time_current(),
1451                                 osp_precreate_timeout_condition, d);
1452                 if (cfs_time_aftereq(cfs_time_current(), expire)) {
1453                         rc = -ETIMEDOUT;
1454                         break;
1455                 }
1456
1457                 l_wait_event(d->opd_pre_user_waitq,
1458                              osp_precreate_ready_condition(env, d), &lwi);
1459         }
1460
1461         RETURN(rc);
1462 }
1463
1464 /**
1465  * Get a FID from precreation pool
1466  *
1467  * The function is a companion for osp_precreate_reserve() - it assigns
1468  * a specific FID from the precreate. The function should be called only
1469  * if the call to osp_precreate_reserve() was successful. The function
1470  * updates a local storage to remember the highest object ID referenced
1471  * by the node in the given sequence.
1472  *
1473  * A very importan details: this is supposed to be called once the
1474  * transaction is started, so on-disk update will be atomic with the
1475  * data (like LOVEA) refering this object. Then the object won't be leaked:
1476  * either it's referenced by the committed transaction or it's a subject
1477  * to the orphan cleanup procedure.
1478  *
1479  * \param[in] env       LU environment provided by the caller
1480  * \param[in] d         OSP device
1481  * \param[out] fid      generated FID
1482  *
1483  * \retval 0            on success
1484  * \retval negative     negated errno on error
1485  */
1486 int osp_precreate_get_fid(const struct lu_env *env, struct osp_device *d,
1487                           struct lu_fid *fid)
1488 {
1489         /* grab next id from the pool */
1490         spin_lock(&d->opd_pre_lock);
1491
1492         LASSERTF(osp_fid_diff(&d->opd_pre_used_fid,
1493                              &d->opd_pre_last_created_fid) < 0,
1494                  "next fid "DFID" last created fid "DFID"\n",
1495                  PFID(&d->opd_pre_used_fid),
1496                  PFID(&d->opd_pre_last_created_fid));
1497
1498         d->opd_pre_used_fid.f_oid++;
1499         memcpy(fid, &d->opd_pre_used_fid, sizeof(*fid));
1500         d->opd_pre_reserved--;
1501         /*
1502          * last_used_id must be changed along with getting new id otherwise
1503          * we might miscalculate gap causing object loss or leak
1504          */
1505         osp_update_last_fid(d, fid);
1506         spin_unlock(&d->opd_pre_lock);
1507
1508         /*
1509          * probably main thread suspended orphan cleanup till
1510          * all reservations are released, see comment in
1511          * osp_precreate_thread() just before orphan cleanup
1512          */
1513         if (unlikely(d->opd_pre_reserved == 0 && d->opd_pre_status))
1514                 wake_up(&d->opd_pre_waitq);
1515
1516         return 0;
1517 }
1518
1519 /*
1520  * Set size regular attribute on an object
1521  *
1522  * When a striping is created late, it's possible that size is already
1523  * initialized on the file. Then the new striping should inherit size
1524  * from the file. The function sets size on the object using the regular
1525  * protocol (OST_PUNCH).
1526  * XXX: should be re-implemented using OUT ?
1527  *
1528  * \param[in] env       LU environment provided by the caller
1529  * \param[in] dt        object
1530  * \param[in] size      size to set.
1531  *
1532  * \retval 0            on success
1533  * \retval negative     negated errno on error
1534  */
1535 int osp_object_truncate(const struct lu_env *env, struct dt_object *dt,
1536                         __u64 size)
1537 {
1538         struct osp_device       *d = lu2osp_dev(dt->do_lu.lo_dev);
1539         struct ptlrpc_request   *req = NULL;
1540         struct obd_import       *imp;
1541         struct ost_body         *body;
1542         struct obdo             *oa = NULL;
1543         int                      rc;
1544
1545         ENTRY;
1546
1547         imp = d->opd_obd->u.cli.cl_import;
1548         LASSERT(imp);
1549
1550         req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
1551         if (req == NULL)
1552                 RETURN(-ENOMEM);
1553
1554         /* XXX: capa support? */
1555         /* osc_set_capa_size(req, &RMF_CAPA1, capa); */
1556         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
1557         if (rc) {
1558                 ptlrpc_request_free(req);
1559                 RETURN(rc);
1560         }
1561
1562         /*
1563          * XXX: decide how do we do here with resend
1564          * if we don't resend, then client may see wrong file size
1565          * if we do resend, then MDS thread can get stuck for quite long
1566          * and if we don't resend, then client will also get -EWOULDBLOCK !!
1567          * (see LU-7975 and sanity/test_27F use cases)
1568          * but let's decide not to resend/delay this truncate request to OST
1569          * and allow Client to decide to resend, in a less agressive way from
1570          * after_reply(), by returning -EINPROGRESS instead of
1571          * -EAGAIN/-EWOULDBLOCK upon return from ptlrpc_queue_wait() at the
1572          * end of this routine
1573          */
1574         req->rq_no_resend = req->rq_no_delay = 1;
1575
1576         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
1577         ptlrpc_at_set_req_timeout(req);
1578
1579         OBD_ALLOC_PTR(oa);
1580         if (oa == NULL)
1581                 GOTO(out, rc = -ENOMEM);
1582
1583         rc = fid_to_ostid(lu_object_fid(&dt->do_lu), &oa->o_oi);
1584         LASSERT(rc == 0);
1585         oa->o_size = size;
1586         oa->o_blocks = OBD_OBJECT_EOF;
1587         oa->o_valid = OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
1588                       OBD_MD_FLID | OBD_MD_FLGROUP;
1589
1590         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
1591         LASSERT(body);
1592         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1593
1594         /* XXX: capa support? */
1595         /* osc_pack_capa(req, body, capa); */
1596
1597         ptlrpc_request_set_replen(req);
1598
1599         rc = ptlrpc_queue_wait(req);
1600         if (rc) {
1601                 /* -EWOULDBLOCK/-EAGAIN means OST is unreachable at the moment
1602                  * since we have decided not to resend/delay, but this could
1603                  * lead to wrong size to be seen at Client side and even process
1604                  * trying to open to exit/fail if not itself handling -EAGAIN.
1605                  * So it should be better to return -EINPROGRESS instead and
1606                  * leave the decision to resend at Client side in after_reply()
1607                  */
1608                 if (rc == -EWOULDBLOCK) {
1609                         rc = -EINPROGRESS;
1610                         CDEBUG(D_HA, "returning -EINPROGRESS instead of "
1611                                "-EWOULDBLOCK/-EAGAIN to allow Client to "
1612                                "resend\n");
1613                 } else {
1614                         CERROR("can't punch object: %d\n", rc);
1615                 }
1616         }
1617 out:
1618         ptlrpc_req_finished(req);
1619         if (oa)
1620                 OBD_FREE_PTR(oa);
1621         RETURN(rc);
1622 }
1623
1624 /**
1625  * Initialize precreation functionality of OSP
1626  *
1627  * Prepares all the internal structures and starts the precreate thread
1628  *
1629  * \param[in] d         OSP device
1630  *
1631  * \retval 0            on success
1632  * \retval negative     negated errno on error
1633  */
1634 int osp_init_precreate(struct osp_device *d)
1635 {
1636         struct l_wait_info       lwi = { 0 };
1637         struct task_struct              *task;
1638
1639         ENTRY;
1640
1641         OBD_ALLOC_PTR(d->opd_pre);
1642         if (d->opd_pre == NULL)
1643                 RETURN(-ENOMEM);
1644
1645         /* initially precreation isn't ready */
1646         d->opd_pre_status = -EAGAIN;
1647         fid_zero(&d->opd_pre_used_fid);
1648         d->opd_pre_used_fid.f_oid = 1;
1649         fid_zero(&d->opd_pre_last_created_fid);
1650         d->opd_pre_last_created_fid.f_oid = 1;
1651         d->opd_pre_reserved = 0;
1652         d->opd_got_disconnected = 1;
1653         d->opd_pre_create_slow = 0;
1654         d->opd_pre_create_count = OST_MIN_PRECREATE;
1655         d->opd_pre_min_create_count = OST_MIN_PRECREATE;
1656         d->opd_pre_max_create_count = OST_MAX_PRECREATE;
1657         d->opd_reserved_mb_high = 0;
1658         d->opd_reserved_mb_low = 0;
1659
1660         spin_lock_init(&d->opd_pre_lock);
1661         init_waitqueue_head(&d->opd_pre_waitq);
1662         init_waitqueue_head(&d->opd_pre_user_waitq);
1663         init_waitqueue_head(&d->opd_pre_thread.t_ctl_waitq);
1664
1665         /*
1666          * Initialize statfs-related things
1667          */
1668         d->opd_statfs_maxage = 5; /* default update interval */
1669         d->opd_statfs_fresh_till = cfs_time_shift(-1000);
1670         CDEBUG(D_OTHER, "current %llu, fresh till %llu\n",
1671                (unsigned long long)cfs_time_current(),
1672                (unsigned long long)d->opd_statfs_fresh_till);
1673         setup_timer(&d->opd_statfs_timer, osp_statfs_timer_cb,
1674                     (unsigned long)d);
1675
1676         /*
1677          * start thread handling precreation and statfs updates
1678          */
1679         task = kthread_run(osp_precreate_thread, d,
1680                            "osp-pre-%u-%u", d->opd_index, d->opd_group);
1681         if (IS_ERR(task)) {
1682                 CERROR("can't start precreate thread %ld\n", PTR_ERR(task));
1683                 RETURN(PTR_ERR(task));
1684         }
1685
1686         l_wait_event(d->opd_pre_thread.t_ctl_waitq,
1687                      osp_precreate_running(d) || osp_precreate_stopped(d),
1688                      &lwi);
1689
1690         RETURN(0);
1691 }
1692
1693 /**
1694  * Finish precreate functionality of OSP
1695  *
1696  *
1697  * Asks all the activity (the thread, update timer) to stop, then
1698  * wait till that is done.
1699  *
1700  * \param[in] d         OSP device
1701  */
1702 void osp_precreate_fini(struct osp_device *d)
1703 {
1704         struct ptlrpc_thread *thread;
1705
1706         ENTRY;
1707
1708         del_timer(&d->opd_statfs_timer);
1709
1710         if (d->opd_pre == NULL)
1711                 RETURN_EXIT;
1712
1713         thread = &d->opd_pre_thread;
1714
1715         thread->t_flags = SVC_STOPPING;
1716         wake_up(&d->opd_pre_waitq);
1717
1718         wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
1719
1720         OBD_FREE_PTR(d->opd_pre);
1721         d->opd_pre = NULL;
1722
1723         EXIT;
1724 }
1725