Whamcloud - gitweb
LU-14047 lustre: change EWOULDBLOCK to EAGAIN
[fs/lustre-release.git] / lustre / osp / osp_precreate.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lustre/osp/osp_precreate.c
33  *
34  * Lustre OST Proxy Device
35  *
36  * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
37  * Author: Mikhail Pershin <mike.pershin@intel.com>
38  * Author: Di Wang <di.wang@intel.com>
39  */
40
41 #define DEBUG_SUBSYSTEM S_MDS
42
43 #include <linux/kthread.h>
44
45 #include <lustre_obdo.h>
46
47 #include "osp_internal.h"
48
49 /*
50  * there are two specific states to take care about:
51  *
52  * = import is disconnected =
53  *
54  * = import is inactive =
55  *   in this case osp_declare_create() returns an error
56  *
57  */
58
59 /**
60  * Check whether statfs data is expired
61  *
62  * OSP device caches statfs data for the target, the function checks
63  * whether the data is expired or not.
64  *
65  * \param[in] d         OSP device
66  *
67  * \retval              0 - not expired, 1 - expired
68  */
69 static inline int osp_statfs_need_update(struct osp_device *d)
70 {
71         return !ktime_before(ktime_get(), d->opd_statfs_fresh_till);
72 }
73
74 /*
75  * OSP tries to maintain pool of available objects so that calls to create
76  * objects don't block most of time
77  *
78  * each time OSP gets connected to OST, we should start from precreation cleanup
79  */
80 static void osp_statfs_timer_cb(cfs_timer_cb_arg_t data)
81 {
82         struct osp_device *d = cfs_from_timer(d, data, opd_statfs_timer);
83
84         LASSERT(d);
85         if (d->opd_pre_task)
86                 wake_up(&d->opd_pre_waitq);
87 }
88
89 static void osp_pre_update_msfs(struct osp_device *d, struct obd_statfs *msfs);
90
91 /*
92  * The function updates current precreation status if broken, and
93  * updates that cached statfs state if functional, then wakes up waiters.
94  * We don't clear opd_pre_status directly here, but rather leave this
95  * to osp_pre_update_msfs() to do if everything is OK so that we don't
96  * have a race to clear opd_pre_status and then set it to -ENOSPC again.
97  *
98  * \param[in] d         OSP device
99  * \param[in] msfs      statfs data
100  * \param[in] rc        new precreate status for device \a d
101  */
102 static void osp_pre_update_status_msfs(struct osp_device *d,
103                                        struct obd_statfs *msfs, int rc)
104 {
105         if (rc)
106                 d->opd_pre_status = rc;
107         else
108                 osp_pre_update_msfs(d, msfs);
109
110         wake_up(&d->opd_pre_user_waitq);
111 }
112
113 /* Pass in the old statfs data in case the limits have changed */
114 void osp_pre_update_status(struct osp_device *d, int rc)
115 {
116         osp_pre_update_status_msfs(d, &d->opd_statfs, rc);
117 }
118
119
120 /**
121  * RPC interpret callback for OST_STATFS RPC
122  *
123  * An interpretation callback called by ptlrpc for OST_STATFS RPC when it is
124  * replied by the target. It's used to maintain statfs cache for the target.
125  * The function fills data from the reply if successful and schedules another
126  * update.
127  *
128  * \param[in] env       LU environment provided by the caller
129  * \param[in] req       RPC replied
130  * \param[in] aa        callback data
131  * \param[in] rc        RPC result
132  *
133  * \retval 0            on success
134  * \retval negative     negated errno on error
135  */
136 static int osp_statfs_interpret(const struct lu_env *env,
137                                 struct ptlrpc_request *req, void *args, int rc)
138 {
139         union ptlrpc_async_args *aa = args;
140         struct obd_import *imp = req->rq_import;
141         struct obd_statfs *msfs;
142         struct obd_statfs *sfs;
143         struct osp_device *d;
144         u64 maxage_ns;
145
146         ENTRY;
147
148         aa = ptlrpc_req_async_args(aa, req);
149         d = aa->pointer_arg[0];
150         LASSERT(d);
151
152         if (rc != 0)
153                 GOTO(out, rc);
154
155         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
156         if (msfs == NULL)
157                 GOTO(out, rc = -EPROTO);
158
159         if (d->opd_pre)
160                 osp_pre_update_status_msfs(d, msfs, 0);
161         else
162                 d->opd_statfs = *msfs;
163
164         /* schedule next update */
165         maxage_ns = d->opd_statfs_maxage * NSEC_PER_SEC;
166         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), maxage_ns);
167         mod_timer(&d->opd_statfs_timer,
168                   jiffies + cfs_time_seconds(d->opd_statfs_maxage));
169         d->opd_statfs_update_in_progress = 0;
170
171         sfs = &d->opd_statfs;
172         CDEBUG(D_CACHE, "%s (%p): %llu blocks, %llu free, %llu avail, "
173                "%u bsize, %u reserved mb low, %u reserved mb high,"
174                "%llu files, %llu free files\n", d->opd_obd->obd_name, d,
175                sfs->os_blocks, sfs->os_bfree, sfs->os_bavail, sfs->os_bsize,
176                d->opd_reserved_mb_low, d->opd_reserved_mb_high,
177                sfs->os_files, sfs->os_ffree);
178
179         RETURN(0);
180 out:
181         /* couldn't update statfs, try again with a small delay */
182         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), 10 * NSEC_PER_SEC);
183         d->opd_statfs_update_in_progress = 0;
184         if (d->opd_pre && d->opd_pre_task)
185                 wake_up(&d->opd_pre_waitq);
186
187         if (req->rq_import_generation == imp->imp_generation)
188                 CDEBUG(D_CACHE, "%s: couldn't update statfs: rc = %d\n",
189                        d->opd_obd->obd_name, rc);
190         RETURN(rc);
191 }
192
193 /**
194  * Send OST_STATFS RPC
195  *
196  * Sends OST_STATFS RPC to refresh cached statfs data for the target.
197  * Also disables scheduled updates as times OSP may need to refresh
198  * statfs data before expiration. The function doesn't block, instead
199  * an interpretation callback osp_statfs_interpret() is used.
200  *
201  * \param[in] d         OSP device
202  */
203 static int osp_statfs_update(const struct lu_env *env, struct osp_device *d)
204 {
205         u64 expire = obd_timeout * 1000 * NSEC_PER_SEC;
206         struct ptlrpc_request   *req;
207         struct obd_import       *imp;
208         union ptlrpc_async_args *aa;
209         int rc;
210
211         ENTRY;
212
213         CDEBUG(D_CACHE, "going to update statfs\n");
214
215         imp = d->opd_obd->u.cli.cl_import;
216         LASSERT(imp);
217
218         req = ptlrpc_request_alloc(imp,
219                            d->opd_pre ? &RQF_OST_STATFS : &RQF_MDS_STATFS);
220         if (req == NULL)
221                 RETURN(-ENOMEM);
222
223         rc = ptlrpc_request_pack(req,
224                          d->opd_pre ? LUSTRE_OST_VERSION : LUSTRE_MDS_VERSION,
225                          d->opd_pre ? OST_STATFS : MDS_STATFS);
226         if (rc) {
227                 ptlrpc_request_free(req);
228                 RETURN(rc);
229         }
230         ptlrpc_request_set_replen(req);
231         if (d->opd_pre)
232                 req->rq_request_portal = OST_CREATE_PORTAL;
233         ptlrpc_at_set_req_timeout(req);
234
235         req->rq_interpret_reply = osp_statfs_interpret;
236         aa = ptlrpc_req_async_args(aa, req);
237         aa->pointer_arg[0] = d;
238
239         /*
240          * no updates till reply
241          */
242         del_timer(&d->opd_statfs_timer);
243         d->opd_statfs_fresh_till = ktime_add_ns(ktime_get(), expire);
244         d->opd_statfs_update_in_progress = 1;
245
246         ptlrpcd_add_req(req);
247
248         /* we still want to sync changes if no new changes are coming */
249         if (ktime_before(ktime_get(), d->opd_sync_next_commit_cb))
250                 GOTO(out, rc);
251
252         if (atomic_read(&d->opd_sync_changes)) {
253                 struct thandle *th;
254
255                 th = dt_trans_create(env, d->opd_storage);
256                 if (IS_ERR(th)) {
257                         CERROR("%s: can't sync\n", d->opd_obd->obd_name);
258                         GOTO(out, rc);
259                 }
260                 rc = dt_trans_start_local(env, d->opd_storage, th);
261                 if (rc == 0) {
262                         CDEBUG(D_OTHER, "%s: sync forced, %d changes\n",
263                                d->opd_obd->obd_name,
264                                atomic_read(&d->opd_sync_changes));
265                         osp_sync_add_commit_cb_1s(env, d, th);
266                         dt_trans_stop(env, d->opd_storage, th);
267                 }
268         }
269
270 out:
271         RETURN(0);
272 }
273
274 /**
275  * Schedule an immediate update for statfs data
276  *
277  * If cached statfs data claim no free space, but OSP has got a request to
278  * destroy an object (so release some space probably), then we may need to
279  * refresh cached statfs data sooner than planned. The function checks there
280  * is no statfs update going and schedules immediate update if so.
281  * XXX: there might be a case where removed object(s) do not add free space (empty
282  * object). If the number of such deletions is high, then we can start to update
283  * statfs too often causing a RPC storm. some throttling is needed...
284  *
285  * \param[in] d         OSP device where statfs data needs to be refreshed
286  */
287 void osp_statfs_need_now(struct osp_device *d)
288 {
289         if (!d->opd_statfs_update_in_progress) {
290                 /*
291                  * if current status is -ENOSPC (lack of free space on OST)
292                  * then we should poll OST immediately once object destroy
293                  * is replied
294                  */
295                 d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(), NSEC_PER_SEC);
296                 del_timer(&d->opd_statfs_timer);
297                 wake_up(&d->opd_pre_waitq);
298         }
299 }
300
301 /**
302  * Return number of precreated objects
303  *
304  * A simple helper to calculate the number of precreated objects on the device.
305  *
306  * \param[in] env       LU environment provided by the caller
307  * \param[in] osp       OSP device
308  *
309  * \retval              the number of the precreated objects
310  */
311 static inline int osp_objs_precreated(const struct lu_env *env,
312                                       struct osp_device *osp)
313 {
314         return osp_fid_diff(&osp->opd_pre_last_created_fid,
315                             &osp->opd_pre_used_fid);
316 }
317
318 /**
319  * Check pool of precreated objects is nearly empty
320  *
321  * We should not wait till the pool of the precreated objects is exhausted,
322  * because then there will be a long period of OSP being unavailable for the
323  * new creations due to lenghty precreate RPC. Instead we ask for another
324  * precreation ahead and hopefully have it ready before the current pool is
325  * empty. Notice this function relies on an external locking.
326  *
327  * \param[in] env       LU environment provided by the caller
328  * \param[in] d         OSP device
329  *
330  * \retval              0 - current pool is good enough, 1 - time to precreate
331  */
332 static inline int osp_precreate_near_empty_nolock(const struct lu_env *env,
333                                                   struct osp_device *d)
334 {
335         int window = osp_objs_precreated(env, d);
336
337         /* don't consider new precreation till OST is healty and
338          * has free space */
339         return ((window - d->opd_pre_reserved < d->opd_pre_create_count / 2) &&
340                 (d->opd_pre_status == 0));
341 }
342
343 /**
344  * Check pool of precreated objects
345  *
346  * This is protected version of osp_precreate_near_empty_nolock(), check that
347  * for the details.
348  *
349  * \param[in] env       LU environment provided by the caller
350  * \param[in] d         OSP device
351  *
352  * \retval              0 - current pool is good enough, 1 - time to precreate
353  */
354 static inline int osp_precreate_near_empty(const struct lu_env *env,
355                                            struct osp_device *d)
356 {
357         int rc;
358
359         if (d->opd_pre == NULL)
360                 return 0;
361
362         /* XXX: do we really need locking here? */
363         spin_lock(&d->opd_pre_lock);
364         rc = osp_precreate_near_empty_nolock(env, d);
365         spin_unlock(&d->opd_pre_lock);
366         return rc;
367 }
368
369 /**
370  * Check given sequence is empty
371  *
372  * Returns a binary result whether the given sequence has some IDs left
373  * or not. Find the details in osp_fid_end_seq(). This is a lock protected
374  * version of that function.
375  *
376  * \param[in] env       LU environment provided by the caller
377  * \param[in] osp       OSP device
378  *
379  * \retval              0 - current sequence has no IDs, 1 - otherwise
380  */
381 static inline int osp_create_end_seq(const struct lu_env *env,
382                                      struct osp_device *osp)
383 {
384         struct lu_fid *fid = &osp->opd_pre_used_fid;
385         int rc;
386
387         spin_lock(&osp->opd_pre_lock);
388         rc = osp_fid_end_seq(env, fid);
389         spin_unlock(&osp->opd_pre_lock);
390         return rc;
391 }
392
393 /**
394  * Write FID into into last_oid/last_seq file
395  *
396  * The function stores the sequence and the in-sequence id into two dedicated
397  * files. The sync argument can be used to request synchronous commit, so the
398  * function won't return until the updates are committed.
399  *
400  * \param[in] env       LU environment provided by the caller
401  * \param[in] osp       OSP device
402  * \param[in] fid       fid where sequence/id is taken
403  * \param[in] sync      update mode: 0 - asynchronously, 1 - synchronously
404  *
405  * \retval 0            on success
406  * \retval negative     negated errno on error
407  **/
408 int osp_write_last_oid_seq_files(struct lu_env *env, struct osp_device *osp,
409                                  struct lu_fid *fid, int sync)
410 {
411         struct osp_thread_info  *oti = osp_env_info(env);
412         struct lu_buf      *lb_oid = &oti->osi_lb;
413         struct lu_buf      *lb_oseq = &oti->osi_lb2;
414         loff_t             oid_off;
415         u64                oid;
416         loff_t             oseq_off;
417         struct thandle    *th;
418         int                   rc;
419         ENTRY;
420
421         if (osp->opd_storage->dd_rdonly)
422                 RETURN(0);
423
424         /* Note: through f_oid is only 32 bits, it will also write 64 bits
425          * for oid to keep compatibility with the previous version. */
426         oid = fid->f_oid;
427         osp_objid_buf_prep(lb_oid, &oid_off,
428                            &oid, osp->opd_index);
429
430         osp_objseq_buf_prep(lb_oseq, &oseq_off,
431                             &fid->f_seq, osp->opd_index);
432
433         th = dt_trans_create(env, osp->opd_storage);
434         if (IS_ERR(th))
435                 RETURN(PTR_ERR(th));
436
437         th->th_sync |= sync;
438         rc = dt_declare_record_write(env, osp->opd_last_used_oid_file,
439                                      lb_oid, oid_off, th);
440         if (rc != 0)
441                 GOTO(out, rc);
442
443         rc = dt_declare_record_write(env, osp->opd_last_used_seq_file,
444                                      lb_oseq, oseq_off, th);
445         if (rc != 0)
446                 GOTO(out, rc);
447
448         rc = dt_trans_start_local(env, osp->opd_storage, th);
449         if (rc != 0)
450                 GOTO(out, rc);
451
452         rc = dt_record_write(env, osp->opd_last_used_oid_file, lb_oid,
453                              &oid_off, th);
454         if (rc != 0) {
455                 CERROR("%s: can not write to last seq file: rc = %d\n",
456                         osp->opd_obd->obd_name, rc);
457                 GOTO(out, rc);
458         }
459         rc = dt_record_write(env, osp->opd_last_used_seq_file, lb_oseq,
460                              &oseq_off, th);
461         if (rc) {
462                 CERROR("%s: can not write to last seq file: rc = %d\n",
463                         osp->opd_obd->obd_name, rc);
464                 GOTO(out, rc);
465         }
466 out:
467         dt_trans_stop(env, osp->opd_storage, th);
468         RETURN(rc);
469 }
470
471 /**
472  * Switch to another sequence
473  *
474  * When a current sequence has no available IDs left, OSP has to switch to
475  * another new sequence. OSP requests it using the regular FLDB protocol
476  * and stores synchronously before that is used in precreated. This is needed
477  * to basically have the sequences referenced (not orphaned), otherwise it's
478  * possible that OST has some objects precreated and the clients have data
479  * written to it, but after MDT failover nobody refers those objects and OSP
480  * has no idea that the sequence need cleanup to be done.
481  * While this is very expensive operation, it's supposed to happen very very
482  * infrequently because sequence has 2^32 or 2^48 objects (depending on type)
483  *
484  * \param[in] env       LU environment provided by the caller
485  * \param[in] osp       OSP device
486  *
487  * \retval 0            on success
488  * \retval negative     negated errno on error
489  */
490 static int osp_precreate_rollover_new_seq(struct lu_env *env,
491                                           struct osp_device *osp)
492 {
493         struct lu_fid   *fid = &osp_env_info(env)->osi_fid;
494         struct lu_fid   *last_fid = &osp->opd_last_used_fid;
495         int             rc;
496         ENTRY;
497
498         rc = seq_client_get_seq(env, osp->opd_obd->u.cli.cl_seq, &fid->f_seq);
499         if (rc != 0) {
500                 CERROR("%s: alloc fid error: rc = %d\n",
501                        osp->opd_obd->obd_name, rc);
502                 RETURN(rc);
503         }
504
505         fid->f_oid = 1;
506         fid->f_ver = 0;
507         LASSERTF(fid_seq(fid) != fid_seq(last_fid),
508                  "fid "DFID", last_fid "DFID"\n", PFID(fid),
509                  PFID(last_fid));
510
511         rc = osp_write_last_oid_seq_files(env, osp, fid, 1);
512         if (rc != 0) {
513                 CERROR("%s: Can not update oid/seq file: rc = %d\n",
514                        osp->opd_obd->obd_name, rc);
515                 RETURN(rc);
516         }
517
518         LCONSOLE_INFO("%s: update sequence from %#llx to %#llx\n",
519                       osp->opd_obd->obd_name, fid_seq(last_fid),
520                       fid_seq(fid));
521         /* Update last_xxx to the new seq */
522         spin_lock(&osp->opd_pre_lock);
523         osp->opd_last_used_fid = *fid;
524         osp_fid_to_obdid(fid, &osp->opd_last_id);
525         osp->opd_gap_start_fid = *fid;
526         osp->opd_pre_used_fid = *fid;
527         osp->opd_pre_last_created_fid = *fid;
528         spin_unlock(&osp->opd_pre_lock);
529
530         RETURN(rc);
531 }
532
533 /**
534  * Find IDs available in current sequence
535  *
536  * The function calculates the highest possible ID and the number of IDs
537  * available in the current sequence OSP is using. The number is limited
538  * artifically by the caller (grow param) and the number of IDs available
539  * in the sequence by nature. The function doesn't require an external
540  * locking.
541  *
542  * \param[in] env       LU environment provided by the caller
543  * \param[in] osp       OSP device
544  * \param[in] fid       FID the caller wants to start with
545  * \param[in] grow      how many the caller wants
546  * \param[out] fid      the highest calculated FID
547  * \param[out] grow     the number of available IDs calculated
548  *
549  * \retval              0 on success, 1 - the sequence is empty
550  */
551 static int osp_precreate_fids(const struct lu_env *env, struct osp_device *osp,
552                               struct lu_fid *fid, int *grow)
553 {
554         struct osp_thread_info  *osi = osp_env_info(env);
555         __u64                   end;
556         int                     i = 0;
557
558         if (fid_is_idif(fid)) {
559                 struct lu_fid   *last_fid;
560                 struct ost_id   *oi = &osi->osi_oi;
561                 int rc;
562
563                 spin_lock(&osp->opd_pre_lock);
564                 last_fid = &osp->opd_pre_last_created_fid;
565                 fid_to_ostid(last_fid, oi);
566                 end = min(ostid_id(oi) + *grow, IDIF_MAX_OID);
567                 *grow = end - ostid_id(oi);
568                 rc = ostid_set_id(oi, ostid_id(oi) + *grow);
569                 spin_unlock(&osp->opd_pre_lock);
570
571                 if (*grow == 0 || rc)
572                         return 1;
573
574                 ostid_to_fid(fid, oi, osp->opd_index);
575                 return 0;
576         }
577
578         spin_lock(&osp->opd_pre_lock);
579         *fid = osp->opd_pre_last_created_fid;
580         end = fid->f_oid;
581         end = min((end + *grow), (__u64)LUSTRE_DATA_SEQ_MAX_WIDTH);
582         *grow = end - fid->f_oid;
583         fid->f_oid += end - fid->f_oid;
584         spin_unlock(&osp->opd_pre_lock);
585
586         CDEBUG(D_INFO, "Expect %d, actual %d ["DFID" -- "DFID"]\n",
587                *grow, i, PFID(fid), PFID(&osp->opd_pre_last_created_fid));
588
589         return *grow > 0 ? 0 : 1;
590 }
591
592 /**
593  * Prepare and send precreate RPC
594  *
595  * The function finds how many objects should be precreated.  Then allocates,
596  * prepares and schedules precreate RPC synchronously. Upon reply the function
597  * wakes up the threads waiting for the new objects on this target. If the
598  * target wasn't able to create all the objects requested, then the next
599  * precreate will be asking for fewer objects (i.e. slow precreate down).
600  *
601  * \param[in] env       LU environment provided by the caller
602  * \param[in] d         OSP device
603  *
604  * \retval 0            on success
605  * \retval negative     negated errno on error
606  **/
607 static int osp_precreate_send(const struct lu_env *env, struct osp_device *d)
608 {
609         struct osp_thread_info  *oti = osp_env_info(env);
610         struct ptlrpc_request   *req;
611         struct obd_import       *imp;
612         struct ost_body         *body;
613         int                      rc, grow, diff;
614         struct lu_fid           *fid = &oti->osi_fid;
615         ENTRY;
616
617         /* don't precreate new objects till OST healthy and has free space */
618         if (unlikely(d->opd_pre_status)) {
619                 CDEBUG(D_INFO, "%s: don't send new precreate: rc = %d\n",
620                        d->opd_obd->obd_name, d->opd_pre_status);
621                 RETURN(0);
622         }
623
624         /*
625          * if not connection/initialization is compeleted, ignore
626          */
627         imp = d->opd_obd->u.cli.cl_import;
628         LASSERT(imp);
629
630         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
631         if (req == NULL)
632                 RETURN(-ENOMEM);
633         req->rq_request_portal = OST_CREATE_PORTAL;
634         /* we should not resend create request - anyway we will have delorphan
635          * and kill these objects */
636         req->rq_no_delay = req->rq_no_resend = 1;
637
638         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
639         if (rc) {
640                 ptlrpc_request_free(req);
641                 RETURN(rc);
642         }
643
644         spin_lock(&d->opd_pre_lock);
645         if (d->opd_pre_create_count > d->opd_pre_max_create_count / 2)
646                 d->opd_pre_create_count = d->opd_pre_max_create_count / 2;
647         grow = d->opd_pre_create_count;
648         spin_unlock(&d->opd_pre_lock);
649
650         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
651         LASSERT(body);
652
653         *fid = d->opd_pre_last_created_fid;
654         rc = osp_precreate_fids(env, d, fid, &grow);
655         if (rc == 1)
656                 /* Current seq has been used up*/
657                 GOTO(out_req, rc = -ENOSPC);
658
659         if (!osp_is_fid_client(d)) {
660                 /* Non-FID client will always send seq 0 because of
661                  * compatiblity */
662                 LASSERTF(fid_is_idif(fid), "Invalid fid "DFID"\n", PFID(fid));
663                 fid->f_seq = 0;
664         }
665
666         fid_to_ostid(fid, &body->oa.o_oi);
667         body->oa.o_valid = OBD_MD_FLGROUP;
668
669         ptlrpc_request_set_replen(req);
670
671         if (OBD_FAIL_CHECK(OBD_FAIL_OSP_FAKE_PRECREATE))
672                 GOTO(ready, rc = 0);
673
674         rc = ptlrpc_queue_wait(req);
675         if (rc) {
676                 CERROR("%s: can't precreate: rc = %d\n", d->opd_obd->obd_name,
677                        rc);
678                 if (req->rq_net_err)
679                         /* have osp_precreate_reserve() to wait for repeat */
680                         rc = -ENOTCONN;
681                 GOTO(out_req, rc);
682         }
683         LASSERT(req->rq_transno == 0);
684
685         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
686         if (body == NULL)
687                 GOTO(out_req, rc = -EPROTO);
688
689         ostid_to_fid(fid, &body->oa.o_oi, d->opd_index);
690
691 ready:
692         if (osp_fid_diff(fid, &d->opd_pre_used_fid) <= 0) {
693                 CERROR("%s: precreate fid "DFID" <= local used fid "DFID
694                        ": rc = %d\n", d->opd_obd->obd_name,
695                        PFID(fid), PFID(&d->opd_pre_used_fid), -ESTALE);
696                 GOTO(out_req, rc = -ESTALE);
697         }
698
699         diff = osp_fid_diff(fid, &d->opd_pre_last_created_fid);
700
701         spin_lock(&d->opd_pre_lock);
702         if (diff < grow) {
703                 /* the OST has not managed to create all the
704                  * objects we asked for */
705                 d->opd_pre_create_count = max(diff, OST_MIN_PRECREATE);
706                 d->opd_pre_create_slow = 1;
707         } else {
708                 /* the OST is able to keep up with the work,
709                  * we could consider increasing create_count
710                  * next time if needed */
711                 d->opd_pre_create_slow = 0;
712         }
713
714         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
715         fid_to_ostid(fid, &body->oa.o_oi);
716
717         d->opd_pre_last_created_fid = *fid;
718         spin_unlock(&d->opd_pre_lock);
719
720         CDEBUG(D_HA, "%s: current precreated pool: "DFID"-"DFID"\n",
721                d->opd_obd->obd_name, PFID(&d->opd_pre_used_fid),
722                PFID(&d->opd_pre_last_created_fid));
723 out_req:
724         /* now we can wakeup all users awaiting for objects */
725         osp_pre_update_status(d, rc);
726         wake_up(&d->opd_pre_user_waitq);
727
728         /* pause to let osp_precreate_reserve to go first */
729         CFS_FAIL_TIMEOUT(OBD_FAIL_OSP_PRECREATE_PAUSE, 2);
730
731         ptlrpc_req_finished(req);
732         RETURN(rc);
733 }
734
735 /**
736  * Get last precreated object from target (OST)
737  *
738  * Sends synchronous RPC to the target (OST) to learn the last precreated
739  * object. This later is used to remove all unused objects (cleanup orphan
740  * procedure). Also, the next object after one we got will be used as a
741  * starting point for the new precreates.
742  *
743  * \param[in] env       LU environment provided by the caller
744  * \param[in] d         OSP device
745  *
746  * \retval 0            on success
747  * \retval negative     negated errno on error
748  **/
749 static int osp_get_lastfid_from_ost(const struct lu_env *env,
750                                     struct osp_device *d)
751 {
752         struct ptlrpc_request   *req = NULL;
753         struct obd_import       *imp;
754         struct lu_fid           *last_fid;
755         char                    *tmp;
756         int                     rc;
757         ENTRY;
758
759         imp = d->opd_obd->u.cli.cl_import;
760         LASSERT(imp);
761
762         req = ptlrpc_request_alloc(imp, &RQF_OST_GET_INFO_LAST_FID);
763         if (req == NULL)
764                 RETURN(-ENOMEM);
765
766         req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY, RCL_CLIENT,
767                              sizeof(KEY_LAST_FID));
768
769         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
770         if (rc) {
771                 ptlrpc_request_free(req);
772                 RETURN(rc);
773         }
774
775         tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_KEY);
776         memcpy(tmp, KEY_LAST_FID, sizeof(KEY_LAST_FID));
777
778         req->rq_no_delay = req->rq_no_resend = 1;
779         last_fid = req_capsule_client_get(&req->rq_pill, &RMF_FID);
780         fid_cpu_to_le(last_fid, &d->opd_last_used_fid);
781
782         ptlrpc_request_set_replen(req);
783
784         rc = ptlrpc_queue_wait(req);
785         if (rc) {
786                 /* bad-bad OST.. let sysadm sort this out */
787                 if (rc == -ENOTSUPP) {
788                         CERROR("%s: server does not support FID: rc = %d\n",
789                                d->opd_obd->obd_name, -ENOTSUPP);
790                 }
791                 ptlrpc_set_import_active(imp, 0);
792                 GOTO(out, rc);
793         }
794
795         last_fid = req_capsule_server_get(&req->rq_pill, &RMF_FID);
796         if (last_fid == NULL) {
797                 CERROR("%s: Got last_fid failed.\n", d->opd_obd->obd_name);
798                 GOTO(out, rc = -EPROTO);
799         }
800
801         if (!fid_is_sane(last_fid)) {
802                 CERROR("%s: Got insane last_fid "DFID"\n",
803                        d->opd_obd->obd_name, PFID(last_fid));
804                 GOTO(out, rc = -EPROTO);
805         }
806
807         /* Only update the last used fid, if the OST has objects for
808          * this sequence, i.e. fid_oid > 0 */
809         if (fid_oid(last_fid) > 0)
810                 d->opd_last_used_fid = *last_fid;
811
812         CDEBUG(D_HA, "%s: Got last_fid "DFID"\n", d->opd_obd->obd_name,
813                PFID(last_fid));
814
815 out:
816         ptlrpc_req_finished(req);
817         RETURN(rc);
818 }
819
820 /**
821  * Cleanup orphans on OST
822  *
823  * This function is called in a contex of a dedicated thread handling
824  * all the precreation suff. The function waits till local recovery
825  * is complete, then identify all the unreferenced objects (orphans)
826  * using the highest ID referenced by a local and the highest object
827  * precreated by the target. The found range is a subject to removal
828  * using specially flagged RPC. During this process OSP is marked
829  * unavailable for new objects.
830  *
831  * \param[in] env       LU environment provided by the caller
832  * \param[in] d         OSP device
833  *
834  * \retval 0            on success
835  * \retval negative     negated errno on error
836  */
837 static int osp_precreate_cleanup_orphans(struct lu_env *env,
838                                          struct osp_device *d)
839 {
840         struct osp_thread_info  *osi = osp_env_info(env);
841         struct lu_fid           *last_fid = &osi->osi_fid;
842         struct ptlrpc_request   *req = NULL;
843         struct obd_import       *imp;
844         struct ost_body         *body;
845         int                      update_status = 0;
846         int                      rc;
847         int                      diff;
848
849         ENTRY;
850
851         /*
852          * wait for local recovery to finish, so we can cleanup orphans
853          * orphans are all objects since "last used" (assigned), but
854          * there might be objects reserved and in some cases they won't
855          * be used. we can't cleanup them till we're sure they won't be
856          * used. also can't we allow new reservations because they may
857          * end up getting orphans being cleaned up below. so we block
858          * new reservations and wait till all reserved objects either
859          * user or released.
860          */
861         spin_lock(&d->opd_pre_lock);
862         d->opd_pre_recovering = 1;
863         spin_unlock(&d->opd_pre_lock);
864         /*
865          * The locking above makes sure the opd_pre_reserved check below will
866          * catch all osp_precreate_reserve() calls who find
867          * "!opd_pre_recovering".
868          */
869         wait_event_idle(d->opd_pre_waitq,
870                         (!d->opd_pre_reserved && d->opd_recovery_completed) ||
871                         !d->opd_pre_task || d->opd_got_disconnected);
872         if (!d->opd_pre_task || d->opd_got_disconnected)
873                 GOTO(out, rc = -EAGAIN);
874
875         CDEBUG(D_HA, "%s: going to cleanup orphans since "DFID"\n",
876                d->opd_obd->obd_name, PFID(&d->opd_last_used_fid));
877
878         *last_fid = d->opd_last_used_fid;
879         /* The OSP should already get the valid seq now */
880         LASSERT(!fid_is_zero(last_fid));
881         if (fid_oid(&d->opd_last_used_fid) < 2) {
882                 /* lastfid looks strange... ask OST */
883                 rc = osp_get_lastfid_from_ost(env, d);
884                 if (rc)
885                         GOTO(out, rc);
886         }
887
888         imp = d->opd_obd->u.cli.cl_import;
889         LASSERT(imp);
890
891         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
892         if (req == NULL)
893                 GOTO(out, rc = -ENOMEM);
894
895         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
896         if (rc) {
897                 ptlrpc_request_free(req);
898                 req = NULL;
899                 GOTO(out, rc);
900         }
901
902         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
903         if (body == NULL)
904                 GOTO(out, rc = -EPROTO);
905
906         body->oa.o_flags = OBD_FL_DELORPHAN;
907         body->oa.o_valid = OBD_MD_FLFLAGS | OBD_MD_FLGROUP;
908
909         fid_to_ostid(&d->opd_last_used_fid, &body->oa.o_oi);
910
911         ptlrpc_request_set_replen(req);
912
913         /* Don't resend the delorphan req */
914         req->rq_no_resend = req->rq_no_delay = 1;
915
916         rc = ptlrpc_queue_wait(req);
917         if (rc) {
918                 update_status = 1;
919                 GOTO(out, rc);
920         }
921
922         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
923         if (body == NULL)
924                 GOTO(out, rc = -EPROTO);
925
926         /*
927          * OST provides us with id new pool starts from in body->oa.o_id
928          */
929         ostid_to_fid(last_fid, &body->oa.o_oi, d->opd_index);
930
931         spin_lock(&d->opd_pre_lock);
932         diff = osp_fid_diff(&d->opd_last_used_fid, last_fid);
933         if (diff > 0) {
934                 d->opd_pre_create_count = OST_MIN_PRECREATE + diff;
935                 d->opd_pre_last_created_fid = d->opd_last_used_fid;
936         } else {
937                 d->opd_pre_create_count = OST_MIN_PRECREATE;
938                 d->opd_pre_last_created_fid = *last_fid;
939         }
940         /*
941          * This empties the pre-creation pool and effectively blocks any new
942          * reservations.
943          */
944         LASSERT(fid_oid(&d->opd_pre_last_created_fid) <=
945                 LUSTRE_DATA_SEQ_MAX_WIDTH);
946         d->opd_pre_used_fid = d->opd_pre_last_created_fid;
947         d->opd_pre_create_slow = 0;
948         spin_unlock(&d->opd_pre_lock);
949
950         CDEBUG(D_HA, "%s: Got last_id "DFID" from OST, last_created "DFID
951                "last_used is "DFID"\n", d->opd_obd->obd_name, PFID(last_fid),
952                PFID(&d->opd_pre_last_created_fid), PFID(&d->opd_last_used_fid));
953 out:
954         if (req)
955                 ptlrpc_req_finished(req);
956
957         /*
958          * If rc is zero, the pre-creation window should have been emptied.
959          * Since waking up the herd would be useless without pre-created
960          * objects, we defer the signal to osp_precreate_send() in that case.
961          */
962         if (rc != 0) {
963                 if (update_status) {
964                         CERROR("%s: cannot cleanup orphans: rc = %d\n",
965                                d->opd_obd->obd_name, rc);
966                         /* we can't proceed from here, OST seem to
967                          * be in a bad shape, better to wait for
968                          * a new instance of the server and repeat
969                          * from the beginning. notify possible waiters
970                          * this OSP isn't quite functional yet */
971                         osp_pre_update_status(d, rc);
972                 } else {
973                         wake_up(&d->opd_pre_user_waitq);
974                 }
975         } else {
976                 spin_lock(&d->opd_pre_lock);
977                 d->opd_pre_recovering = 0;
978                 spin_unlock(&d->opd_pre_lock);
979         }
980
981         RETURN(rc);
982 }
983
984 /**
985  * Update precreate status using statfs data
986  *
987  * The function decides whether this OSP should be used for new objects.
988  * IOW, whether this OST is used up or has some free space. Cached statfs
989  * data is used to make this decision. If the latest result of statfs
990  * request (rc argument) is not success, then just mark OSP unavailable
991  * right away.
992  *
993  * The new statfs data is passed in \a msfs and needs to be stored into
994  * opd_statfs, but only after the various flags in os_state are set, so
995  * that the new statfs data is not visible without appropriate flags set.
996  * As such, there is no need to clear the flags here, since this is called
997  * with new statfs data, and they should not be cleared if sent from OST.
998  *
999  * Add a bit of hysteresis so this flag isn't continually flapping, and
1000  * ensure that new files don't get extremely fragmented due to only a
1001  * small amount of available space in the filesystem.  We want to set
1002  * the ENOSPC/ENOINO flags unconditionally when there is less than the
1003  * reserved size free, and still copy them from the old state when there
1004  * is less than 2*reserved size free space or inodes.
1005  *
1006  * \param[in] d         OSP device
1007  * \param[in] msfs      statfs data
1008  */
1009 static void osp_pre_update_msfs(struct osp_device *d, struct obd_statfs *msfs)
1010 {
1011         u32 old_state = d->opd_statfs.os_state;
1012         u32 reserved_ino_low = 32;      /* could be tunable in the future */
1013         u32 reserved_ino_high = reserved_ino_low * 2;
1014         u64 available_mb;
1015
1016         /* statfs structure not initialized yet */
1017         if (unlikely(!msfs->os_type))
1018                 return;
1019
1020         /* if the low and high watermarks have not been initialized yet */
1021         if (unlikely(d->opd_reserved_mb_high == 0 &&
1022                      d->opd_reserved_mb_low == 0)) {
1023                 /* Use ~0.1% by default to disable object allocation,
1024                  * and ~0.2% to enable, size in MB, set both watermark
1025                  */
1026                 spin_lock(&d->opd_pre_lock);
1027                 if (d->opd_reserved_mb_high == 0 &&
1028                     d->opd_reserved_mb_low == 0) {
1029                         d->opd_reserved_mb_low = ((msfs->os_bsize >> 10) *
1030                                                   msfs->os_blocks) >> 20;
1031                         if (d->opd_reserved_mb_low == 0)
1032                                 d->opd_reserved_mb_low = 1;
1033                         d->opd_reserved_mb_high =
1034                                 (d->opd_reserved_mb_low << 1) + 1;
1035                 }
1036                 spin_unlock(&d->opd_pre_lock);
1037         }
1038
1039         available_mb = (msfs->os_bavail * (msfs->os_bsize >> 10)) >> 10;
1040         if (msfs->os_ffree < reserved_ino_low)
1041                 msfs->os_state |= OS_STATFS_ENOINO;
1042         else if (msfs->os_ffree <= reserved_ino_high)
1043                 msfs->os_state |= old_state & OS_STATFS_ENOINO;
1044         /* else don't clear flags in new msfs->os_state sent from OST */
1045
1046         CDEBUG(D_INFO,
1047                "%s: blocks=%llu free=%llu avail=%llu avail_mb=%llu hwm_mb=%u files=%llu ffree=%llu state=%x: rc = %d\n",
1048                d->opd_obd->obd_name, msfs->os_blocks, msfs->os_bfree,
1049                msfs->os_bavail, available_mb, d->opd_reserved_mb_high,
1050                msfs->os_files, msfs->os_ffree, msfs->os_state,
1051                d->opd_pre_status);
1052         if (available_mb < d->opd_reserved_mb_low)
1053                 msfs->os_state |= OS_STATFS_ENOSPC;
1054         else if (available_mb <= d->opd_reserved_mb_high)
1055                 msfs->os_state |= old_state & OS_STATFS_ENOSPC;
1056         /* else don't clear flags in new msfs->os_state sent from OST */
1057
1058         if (msfs->os_state & (OS_STATFS_ENOINO | OS_STATFS_ENOSPC)) {
1059                 d->opd_pre_status = -ENOSPC;
1060                 if (!(old_state & (OS_STATFS_ENOINO | OS_STATFS_ENOSPC)))
1061                         CDEBUG(D_INFO, "%s: full: state=%x: rc = %x\n",
1062                                d->opd_obd->obd_name, msfs->os_state,
1063                                d->opd_pre_status);
1064                 CDEBUG(D_INFO, "uncommitted changes=%u in_progress=%u\n",
1065                        atomic_read(&d->opd_sync_changes),
1066                        atomic_read(&d->opd_sync_rpcs_in_progress));
1067         } else if (old_state & (OS_STATFS_ENOINO | OS_STATFS_ENOSPC)) {
1068                 d->opd_pre_status = 0;
1069                 spin_lock(&d->opd_pre_lock);
1070                 d->opd_pre_create_slow = 0;
1071                 d->opd_pre_create_count = OST_MIN_PRECREATE;
1072                 spin_unlock(&d->opd_pre_lock);
1073                 wake_up(&d->opd_pre_waitq);
1074
1075                 CDEBUG(D_INFO,
1076                        "%s: available: state=%x: rc = %d\n",
1077                        d->opd_obd->obd_name, msfs->os_state,
1078                        d->opd_pre_status);
1079         } else {
1080                 /* we only get here if rc == 0 in the caller */
1081                 d->opd_pre_status = 0;
1082         }
1083
1084         /* Object precreation skipped on OST if manually disabled */
1085         if (d->opd_pre_max_create_count == 0)
1086                 msfs->os_state |= OS_STATFS_NOPRECREATE;
1087         /* else don't clear flags in new msfs->os_state sent from OST */
1088
1089         /* copy only new statfs state to make it visible to MDS threads */
1090         if (&d->opd_statfs != msfs)
1091                 d->opd_statfs = *msfs;
1092 }
1093
1094 /**
1095  * Initialize FID for precreation
1096  *
1097  * For a just created new target, a new sequence should be taken.
1098  * The function checks there is no IDIF in use (if the target was
1099  * added with the older version of Lustre), then requests a new
1100  * sequence from FLDB using the regular protocol. Then this new
1101  * sequence is stored on a persisten storage synchronously to prevent
1102  * possible object leakage (for the detail see the description for
1103  * osp_precreate_rollover_new_seq()).
1104  *
1105  * \param[in] osp       OSP device
1106  *
1107  * \retval 0            on success
1108  * \retval negative     negated errno on error
1109  */
1110 int osp_init_pre_fid(struct osp_device *osp)
1111 {
1112         struct lu_env           env;
1113         struct osp_thread_info  *osi;
1114         struct lu_client_seq    *cli_seq;
1115         struct lu_fid           *last_fid;
1116         int                     rc;
1117         ENTRY;
1118
1119         LASSERT(osp->opd_pre != NULL);
1120
1121         /* Let's check if the current last_seq/fid is valid,
1122          * otherwise request new sequence from the controller */
1123         if (osp_is_fid_client(osp) && osp->opd_group != 0) {
1124                 /* Non-MDT0 can only use normal sequence for
1125                  * OST objects */
1126                 if (fid_is_norm(&osp->opd_last_used_fid))
1127                         RETURN(0);
1128         } else {
1129                 /* Initially MDT0 will start with IDIF, after
1130                  * that it will request new sequence from the
1131                  * controller */
1132                 if (fid_is_idif(&osp->opd_last_used_fid) ||
1133                     fid_is_norm(&osp->opd_last_used_fid))
1134                         RETURN(0);
1135         }
1136
1137         if (!fid_is_zero(&osp->opd_last_used_fid))
1138                 CWARN("%s: invalid last used fid "DFID
1139                       ", try to get new sequence.\n",
1140                       osp->opd_obd->obd_name,
1141                       PFID(&osp->opd_last_used_fid));
1142
1143         rc = lu_env_init(&env, osp->opd_dt_dev.dd_lu_dev.ld_type->ldt_ctx_tags);
1144         if (rc) {
1145                 CERROR("%s: init env error: rc = %d\n",
1146                        osp->opd_obd->obd_name, rc);
1147                 RETURN(rc);
1148         }
1149
1150         osi = osp_env_info(&env);
1151         last_fid = &osi->osi_fid;
1152         fid_zero(last_fid);
1153         /* For a freshed fs, it will allocate a new sequence first */
1154         if (osp_is_fid_client(osp) && osp->opd_group != 0) {
1155                 cli_seq = osp->opd_obd->u.cli.cl_seq;
1156                 rc = seq_client_get_seq(&env, cli_seq, &last_fid->f_seq);
1157                 if (rc != 0) {
1158                         CERROR("%s: alloc fid error: rc = %d\n",
1159                                osp->opd_obd->obd_name, rc);
1160                         GOTO(out, rc);
1161                 }
1162         } else {
1163                 last_fid->f_seq = fid_idif_seq(0, osp->opd_index);
1164         }
1165         last_fid->f_oid = 1;
1166         last_fid->f_ver = 0;
1167
1168         spin_lock(&osp->opd_pre_lock);
1169         osp->opd_last_used_fid = *last_fid;
1170         osp->opd_pre_used_fid = *last_fid;
1171         osp->opd_pre_last_created_fid = *last_fid;
1172         spin_unlock(&osp->opd_pre_lock);
1173         rc = osp_write_last_oid_seq_files(&env, osp, last_fid, 1);
1174         if (rc != 0) {
1175                 CERROR("%s: write fid error: rc = %d\n",
1176                        osp->opd_obd->obd_name, rc);
1177                 GOTO(out, rc);
1178         }
1179 out:
1180         lu_env_fini(&env);
1181         RETURN(rc);
1182 }
1183
1184 struct opt_args {
1185         struct osp_device       *opta_dev;
1186         struct lu_env           opta_env;
1187         struct completion       *opta_started;
1188 };
1189 /**
1190  * The core of precreate functionality
1191  *
1192  * The function implements the main precreation loop. Basically it
1193  * involves connecting to the target, precerate FID initialization,
1194  * identifying and removing orphans, then serving precreation. As
1195  * part of the latter, the thread is responsible for statfs data
1196  * updates. The precreation is mostly driven by another threads
1197  * asking for new OST objects - those askers wake the thread when
1198  * the number of precreated objects reach low watermark.
1199  * After a disconnect, the sequence above repeats. This is keep going
1200  * until the thread is requested to stop.
1201  *
1202  * \param[in] _arg      private data the thread (OSP device to handle)
1203  *
1204  * \retval 0            on success
1205  * \retval negative     negated errno on error
1206  */
1207 static int osp_precreate_thread(void *_args)
1208 {
1209         struct opt_args         *args = _args;
1210         struct osp_device       *d = args->opta_dev;
1211         struct lu_env           *env = &args->opta_env;
1212         int                      rc;
1213
1214         ENTRY;
1215
1216         complete(args->opta_started);
1217         while (!kthread_should_stop()) {
1218                 /*
1219                  * need to be connected to OST
1220                  */
1221                 while (!kthread_should_stop()) {
1222                         if ((d->opd_pre == NULL || d->opd_pre_recovering) &&
1223                             d->opd_imp_connected &&
1224                             !d->opd_got_disconnected)
1225                                 break;
1226                         wait_event_idle(d->opd_pre_waitq,
1227                                         kthread_should_stop() ||
1228                                         d->opd_new_connection);
1229
1230                         if (!d->opd_new_connection)
1231                                 continue;
1232
1233                         OBD_FAIL_TIMEOUT(OBD_FAIL_OSP_CON_EVENT_DELAY,
1234                                          cfs_fail_val);
1235                         d->opd_new_connection = 0;
1236                         d->opd_got_disconnected = 0;
1237                         break;
1238                 }
1239
1240                 if (kthread_should_stop())
1241                         break;
1242
1243                 if (d->opd_pre) {
1244                         LASSERT(d->opd_obd->u.cli.cl_seq != NULL);
1245                         /* Sigh, fid client is not ready yet */
1246                         if (d->opd_obd->u.cli.cl_seq->lcs_exp == NULL)
1247                                 continue;
1248
1249                         /* Init fid for osp_precreate if necessary */
1250                         rc = osp_init_pre_fid(d);
1251                         if (rc != 0) {
1252                                 class_export_put(d->opd_exp);
1253                                 d->opd_obd->u.cli.cl_seq->lcs_exp = NULL;
1254                                 CERROR("%s: init pre fid error: rc = %d\n",
1255                                                 d->opd_obd->obd_name, rc);
1256                                 continue;
1257                         }
1258                 }
1259
1260                 if (osp_statfs_update(env, d)) {
1261                         if (wait_event_idle_timeout(d->opd_pre_waitq,
1262                                                     kthread_should_stop(),
1263                                                     cfs_time_seconds(5)) == 0)
1264                                 l_wait_event_abortable(
1265                                         d->opd_pre_waitq,
1266                                         kthread_should_stop());
1267                         continue;
1268                 }
1269
1270                 if (d->opd_pre) {
1271                         /*
1272                          * Clean up orphans or recreate missing objects.
1273                          */
1274                         rc = osp_precreate_cleanup_orphans(env, d);
1275                         if (rc != 0) {
1276                                 schedule_timeout_interruptible(cfs_time_seconds(1));
1277                                 continue;
1278                         }
1279                 }
1280
1281                 /*
1282                  * connected, can handle precreates now
1283                  */
1284                 while (!kthread_should_stop()) {
1285                         wait_event_idle(d->opd_pre_waitq,
1286                                         kthread_should_stop() ||
1287                                         osp_precreate_near_empty(env, d) ||
1288                                         osp_statfs_need_update(d) ||
1289                                         d->opd_got_disconnected);
1290
1291                         if (kthread_should_stop())
1292                                 break;
1293
1294                         /* something happened to the connection
1295                          * have to start from the beginning */
1296                         if (d->opd_got_disconnected)
1297                                 break;
1298
1299                         if (osp_statfs_need_update(d))
1300                                 if (osp_statfs_update(env, d))
1301                                         break;
1302
1303                         if (d->opd_pre == NULL)
1304                                 continue;
1305
1306                         /* To avoid handling different seq in precreate/orphan
1307                          * cleanup, it will hold precreate until current seq is
1308                          * used up. */
1309                         if (unlikely(osp_precreate_end_seq(env, d) &&
1310                             !osp_create_end_seq(env, d)))
1311                                 continue;
1312
1313                         if (unlikely(osp_precreate_end_seq(env, d) &&
1314                                      osp_create_end_seq(env, d))) {
1315                                 LCONSOLE_INFO("%s:%#llx is used up."
1316                                               " Update to new seq\n",
1317                                               d->opd_obd->obd_name,
1318                                          fid_seq(&d->opd_pre_last_created_fid));
1319                                 rc = osp_precreate_rollover_new_seq(env, d);
1320                                 if (rc)
1321                                         continue;
1322                         }
1323
1324                         if (osp_precreate_near_empty(env, d)) {
1325                                 rc = osp_precreate_send(env, d);
1326                                 /* osp_precreate_send() sets opd_pre_status
1327                                  * in case of error, that prevent the using of
1328                                  * failed device. */
1329                                 if (rc < 0 && rc != -ENOSPC &&
1330                                     rc != -ETIMEDOUT && rc != -ENOTCONN)
1331                                         CERROR("%s: cannot precreate objects:"
1332                                                " rc = %d\n",
1333                                                d->opd_obd->obd_name, rc);
1334                         }
1335                 }
1336         }
1337
1338         lu_env_fini(env);
1339         OBD_FREE_PTR(args);
1340
1341         RETURN(0);
1342 }
1343
1344 /**
1345  * Check when to stop to wait for precreate objects.
1346  *
1347  * The caller wanting a new OST object can't wait undefinitely. The
1348  * function checks for few conditions including available new OST
1349  * objects, disconnected OST, lack of space with no pending destroys,
1350  * etc. IOW, it checks whether the current OSP state is good to keep
1351  * waiting or it's better to give up.
1352  *
1353  * \param[in] env       LU environment provided by the caller
1354  * \param[in] d         OSP device
1355  *
1356  * \retval              0 - keep waiting, 1 - no luck
1357  */
1358 static int osp_precreate_ready_condition(const struct lu_env *env,
1359                                          struct osp_device *d)
1360 {
1361         if (d->opd_pre_recovering)
1362                 return 0;
1363
1364         /* ready if got enough precreated objects */
1365         /* we need to wait for others (opd_pre_reserved) and our object (+1) */
1366         if (d->opd_pre_reserved + 1 < osp_objs_precreated(env, d))
1367                 return 1;
1368
1369         /* ready if OST reported no space and no destroys in progress */
1370         if (atomic_read(&d->opd_sync_changes) +
1371             atomic_read(&d->opd_sync_rpcs_in_progress) == 0 &&
1372             d->opd_pre_status == -ENOSPC)
1373                 return 1;
1374
1375         /* Bail out I/O fails to OST */
1376         if (d->opd_pre_status != 0 &&
1377             d->opd_pre_status != -EAGAIN &&
1378             d->opd_pre_status != -ENODEV &&
1379             d->opd_pre_status != -ENOTCONN &&
1380             d->opd_pre_status != -ENOSPC) {
1381                 /* DEBUG LU-3230 */
1382                 if (d->opd_pre_status != -EIO)
1383                         CERROR("%s: precreate failed opd_pre_status %d\n",
1384                                d->opd_obd->obd_name, d->opd_pre_status);
1385                 return 1;
1386         }
1387
1388         return 0;
1389 }
1390
1391 /**
1392  * Reserve object in precreate pool
1393  *
1394  * When the caller wants to create a new object on this target (target
1395  * represented by the given OSP), it should declare this intention using
1396  * a regular ->dt_declare_create() OSD API method. Then OSP will be trying
1397  * to reserve an object in the existing precreated pool or wait up to
1398  * obd_timeout for the available object to appear in the pool (a dedicated
1399  * thread will be doing real precreation in background). The object can be
1400  * consumed later with osp_precreate_get_fid() or be released with call to
1401  * lu_object_put(). Notice the function doesn't reserve a specific ID, just
1402  * some ID. The actual ID assignment happen in osp_precreate_get_fid().
1403  * If the space on the target is short and there is a pending object destroy,
1404  * then the function forces local commit to speedup space release (see
1405  * osp_sync.c for the details).
1406  *
1407  * \param[in] env       LU environment provided by the caller
1408  * \param[in] d         OSP device
1409  *
1410  * \retval              0 on success
1411  * \retval              -ENOSPC when no space on OST
1412  * \retval              -EAGAIN try later, slow precreation in progress
1413  * \retval              -EIO when no access to OST
1414  */
1415 int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d)
1416 {
1417         time64_t expire = ktime_get_seconds() + obd_timeout;
1418         int precreated, rc, synced = 0;
1419
1420         ENTRY;
1421
1422         LASSERTF(osp_objs_precreated(env, d) >= 0, "Last created FID "DFID
1423                  "Next FID "DFID"\n", PFID(&d->opd_pre_last_created_fid),
1424                  PFID(&d->opd_pre_used_fid));
1425
1426         /* opd_pre_max_create_count 0 to not use specified OST. */
1427         if (d->opd_pre_max_create_count == 0)
1428                 RETURN(-ENOBUFS);
1429
1430         /*
1431          * wait till:
1432          *  - preallocation is done
1433          *  - no free space expected soon
1434          *  - can't connect to OST for too long (obd_timeout)
1435          *  - OST can allocate fid sequence.
1436          */
1437         while ((rc = d->opd_pre_status) == 0 || rc == -ENOSPC ||
1438                 rc == -ENODEV || rc == -EAGAIN || rc == -ENOTCONN) {
1439
1440                 /*
1441                  * increase number of precreations
1442                  */
1443                 precreated = osp_objs_precreated(env, d);
1444                 if (d->opd_pre_create_count < d->opd_pre_max_create_count &&
1445                     d->opd_pre_create_slow == 0 &&
1446                     precreated <= (d->opd_pre_create_count / 4 + 1)) {
1447                         spin_lock(&d->opd_pre_lock);
1448                         d->opd_pre_create_slow = 1;
1449                         d->opd_pre_create_count *= 2;
1450                         spin_unlock(&d->opd_pre_lock);
1451                 }
1452
1453                 spin_lock(&d->opd_pre_lock);
1454                 precreated = osp_objs_precreated(env, d);
1455                 if (precreated > d->opd_pre_reserved &&
1456                     !d->opd_pre_recovering) {
1457                         d->opd_pre_reserved++;
1458                         spin_unlock(&d->opd_pre_lock);
1459                         rc = 0;
1460
1461                         /* XXX: don't wake up if precreation is in progress */
1462                         if (osp_precreate_near_empty_nolock(env, d) &&
1463                            !osp_precreate_end_seq_nolock(env, d))
1464                                 wake_up(&d->opd_pre_waitq);
1465
1466                         break;
1467                 }
1468                 spin_unlock(&d->opd_pre_lock);
1469
1470                 /*
1471                  * all precreated objects have been used and no-space
1472                  * status leave us no chance to succeed very soon
1473                  * but if there is destroy in progress, then we should
1474                  * wait till that is done - some space might be released
1475                  */
1476                 if (unlikely(rc == -ENOSPC)) {
1477                         if (atomic_read(&d->opd_sync_changes) && synced == 0) {
1478                                 /* force local commit to release space */
1479                                 dt_commit_async(env, d->opd_storage);
1480                                 osp_sync_check_for_work(d);
1481                                 synced = 1;
1482                         }
1483                         if (atomic_read(&d->opd_sync_rpcs_in_progress)) {
1484                                 /* just wait till destroys are done
1485                                  * see wait_event_idle_timeout() below
1486                                  */
1487                         }
1488                         if (atomic_read(&d->opd_sync_changes) +
1489                             atomic_read(&d->opd_sync_rpcs_in_progress) == 0) {
1490                                 /* no hope for free space */
1491                                 break;
1492                         }
1493                 }
1494
1495                 /* XXX: don't wake up if precreation is in progress */
1496                 wake_up(&d->opd_pre_waitq);
1497
1498                 if (ktime_get_seconds() >= expire) {
1499                         rc = -ETIMEDOUT;
1500                         break;
1501                 }
1502
1503                 if (wait_event_idle_timeout(
1504                             d->opd_pre_user_waitq,
1505                             osp_precreate_ready_condition(env, d),
1506                             cfs_time_seconds(obd_timeout)) == 0) {
1507                         CDEBUG(D_HA,
1508                                "%s: slow creates, last="DFID", next="DFID", "
1509                                "reserved=%llu, sync_changes=%u, "
1510                                "sync_rpcs_in_progress=%d, status=%d\n",
1511                                d->opd_obd->obd_name,
1512                                PFID(&d->opd_pre_last_created_fid),
1513                                PFID(&d->opd_pre_used_fid), d->opd_pre_reserved,
1514                                atomic_read(&d->opd_sync_changes),
1515                                atomic_read(&d->opd_sync_rpcs_in_progress),
1516                                d->opd_pre_status);
1517                 }
1518         }
1519
1520         RETURN(rc);
1521 }
1522
1523 /**
1524  * Get a FID from precreation pool
1525  *
1526  * The function is a companion for osp_precreate_reserve() - it assigns
1527  * a specific FID from the precreate. The function should be called only
1528  * if the call to osp_precreate_reserve() was successful. The function
1529  * updates a local storage to remember the highest object ID referenced
1530  * by the node in the given sequence.
1531  *
1532  * A very importan details: this is supposed to be called once the
1533  * transaction is started, so on-disk update will be atomic with the
1534  * data (like LOVEA) refering this object. Then the object won't be leaked:
1535  * either it's referenced by the committed transaction or it's a subject
1536  * to the orphan cleanup procedure.
1537  *
1538  * \param[in] env       LU environment provided by the caller
1539  * \param[in] d         OSP device
1540  * \param[out] fid      generated FID
1541  *
1542  * \retval 0            on success
1543  * \retval negative     negated errno on error
1544  */
1545 int osp_precreate_get_fid(const struct lu_env *env, struct osp_device *d,
1546                           struct lu_fid *fid)
1547 {
1548         struct lu_fid *pre_used_fid = &d->opd_pre_used_fid;
1549         /* grab next id from the pool */
1550         spin_lock(&d->opd_pre_lock);
1551
1552         LASSERTF(osp_fid_diff(&d->opd_pre_used_fid,
1553                              &d->opd_pre_last_created_fid) < 0,
1554                  "next fid "DFID" last created fid "DFID"\n",
1555                  PFID(&d->opd_pre_used_fid),
1556                  PFID(&d->opd_pre_last_created_fid));
1557
1558         /*
1559          * When sequence is used up, new one should be allocated in
1560          * osp_precreate_rollover_new_seq. So ASSERT here to avoid
1561          * objid overflow.
1562          */
1563         LASSERTF(osp_fid_end_seq(env, pre_used_fid) == 0,
1564                  "next fid "DFID" last created fid "DFID"\n",
1565                  PFID(&d->opd_pre_used_fid),
1566                  PFID(&d->opd_pre_last_created_fid));
1567         /* Non IDIF fids shoulnd't get here with oid == 0xFFFFFFFF. */
1568         if (fid_is_idif(pre_used_fid) &&
1569             unlikely(fid_oid(pre_used_fid) == LUSTRE_DATA_SEQ_MAX_WIDTH))
1570                 pre_used_fid->f_seq++;
1571
1572         d->opd_pre_used_fid.f_oid++;
1573         memcpy(fid, &d->opd_pre_used_fid, sizeof(*fid));
1574         d->opd_pre_reserved--;
1575         /*
1576          * last_used_id must be changed along with getting new id otherwise
1577          * we might miscalculate gap causing object loss or leak
1578          */
1579         osp_update_last_fid(d, fid);
1580         spin_unlock(&d->opd_pre_lock);
1581
1582         /*
1583          * probably main thread suspended orphan cleanup till
1584          * all reservations are released, see comment in
1585          * osp_precreate_thread() just before orphan cleanup
1586          */
1587         if (unlikely(d->opd_pre_reserved == 0 &&
1588                      (d->opd_pre_recovering || d->opd_pre_status)))
1589                 wake_up(&d->opd_pre_waitq);
1590
1591         return 0;
1592 }
1593
1594 /*
1595  * Set size regular attribute on an object
1596  *
1597  * When a striping is created late, it's possible that size is already
1598  * initialized on the file. Then the new striping should inherit size
1599  * from the file. The function sets size on the object using the regular
1600  * protocol (OST_PUNCH).
1601  * XXX: should be re-implemented using OUT ?
1602  *
1603  * \param[in] env       LU environment provided by the caller
1604  * \param[in] dt        object
1605  * \param[in] size      size to set.
1606  *
1607  * \retval 0            on success
1608  * \retval negative     negated errno on error
1609  */
1610 int osp_object_truncate(const struct lu_env *env, struct dt_object *dt,
1611                         __u64 size)
1612 {
1613         struct osp_device       *d = lu2osp_dev(dt->do_lu.lo_dev);
1614         struct ptlrpc_request   *req = NULL;
1615         struct obd_import       *imp;
1616         struct ost_body         *body;
1617         struct obdo             *oa = NULL;
1618         int                      rc;
1619
1620         ENTRY;
1621
1622         imp = d->opd_obd->u.cli.cl_import;
1623         LASSERT(imp);
1624
1625         req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
1626         if (req == NULL)
1627                 RETURN(-ENOMEM);
1628
1629         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
1630         if (rc) {
1631                 ptlrpc_request_free(req);
1632                 RETURN(rc);
1633         }
1634
1635         /*
1636          * XXX: decide how do we do here with resend
1637          * if we don't resend, then client may see wrong file size
1638          * if we do resend, then MDS thread can get stuck for quite long
1639          * and if we don't resend, then client will also get -EAGAIN !!
1640          * (see LU-7975 and sanity/test_27F use cases)
1641          * but let's decide not to resend/delay this truncate request to OST
1642          * and allow Client to decide to resend, in a less agressive way from
1643          * after_reply(), by returning -EINPROGRESS instead of
1644          * -EAGAIN/-EAGAIN upon return from ptlrpc_queue_wait() at the
1645          * end of this routine
1646          */
1647         req->rq_no_resend = req->rq_no_delay = 1;
1648
1649         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
1650         ptlrpc_at_set_req_timeout(req);
1651
1652         OBD_ALLOC_PTR(oa);
1653         if (oa == NULL)
1654                 GOTO(out, rc = -ENOMEM);
1655
1656         rc = fid_to_ostid(lu_object_fid(&dt->do_lu), &oa->o_oi);
1657         LASSERT(rc == 0);
1658         oa->o_size = size;
1659         oa->o_blocks = OBD_OBJECT_EOF;
1660         oa->o_valid = OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
1661                       OBD_MD_FLID | OBD_MD_FLGROUP;
1662
1663         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
1664         LASSERT(body);
1665         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1666
1667         /* XXX: capa support? */
1668         /* osc_pack_capa(req, body, capa); */
1669
1670         ptlrpc_request_set_replen(req);
1671
1672         rc = ptlrpc_queue_wait(req);
1673         if (rc) {
1674                 /* -EAGAIN/-EWOULDBLOCK means OST is unreachable at the moment
1675                  * since we have decided not to resend/delay, but this could
1676                  * lead to wrong size to be seen at Client side and even process
1677                  * trying to open to exit/fail if not itself handling -EAGAIN.
1678                  * So it should be better to return -EINPROGRESS instead and
1679                  * leave the decision to resend at Client side in after_reply()
1680                  */
1681                 if (rc == -EAGAIN) {
1682                         rc = -EINPROGRESS;
1683                         CDEBUG(D_HA, "returning -EINPROGRESS instead of "
1684                                "-EWOULDBLOCK/-EAGAIN to allow Client to "
1685                                "resend\n");
1686                 } else {
1687                         CERROR("can't punch object: %d\n", rc);
1688                 }
1689         }
1690 out:
1691         ptlrpc_req_finished(req);
1692         if (oa)
1693                 OBD_FREE_PTR(oa);
1694         RETURN(rc);
1695 }
1696
1697 /**
1698  * Initialize precreation functionality of OSP
1699  *
1700  * Prepares all the internal structures and starts the precreate thread
1701  *
1702  * \param[in] d         OSP device
1703  *
1704  * \retval 0            on success
1705  * \retval negative     negated errno on error
1706  */
1707 int osp_init_precreate(struct osp_device *d)
1708 {
1709         ENTRY;
1710
1711         OBD_ALLOC_PTR(d->opd_pre);
1712         if (d->opd_pre == NULL)
1713                 RETURN(-ENOMEM);
1714
1715         /* initially precreation isn't ready */
1716         init_waitqueue_head(&d->opd_pre_user_waitq);
1717         d->opd_pre_status = -EAGAIN;
1718         fid_zero(&d->opd_pre_used_fid);
1719         d->opd_pre_used_fid.f_oid = 1;
1720         fid_zero(&d->opd_pre_last_created_fid);
1721         d->opd_pre_last_created_fid.f_oid = 1;
1722         d->opd_last_id = 0;
1723         d->opd_pre_reserved = 0;
1724         d->opd_got_disconnected = 1;
1725         d->opd_pre_create_slow = 0;
1726         d->opd_pre_create_count = OST_MIN_PRECREATE;
1727         d->opd_pre_min_create_count = OST_MIN_PRECREATE;
1728         d->opd_pre_max_create_count = OST_MAX_PRECREATE;
1729         d->opd_reserved_mb_high = 0;
1730         d->opd_reserved_mb_low = 0;
1731
1732         RETURN(0);
1733 }
1734
1735 /**
1736  * Finish precreate functionality of OSP
1737  *
1738  *
1739  * Asks all the activity (the thread, update timer) to stop, then
1740  * wait till that is done.
1741  *
1742  * \param[in] d         OSP device
1743  */
1744 void osp_precreate_fini(struct osp_device *d)
1745 {
1746         ENTRY;
1747
1748         if (d->opd_pre == NULL)
1749                 RETURN_EXIT;
1750
1751         OBD_FREE_PTR(d->opd_pre);
1752         d->opd_pre = NULL;
1753
1754         EXIT;
1755 }
1756
1757 int osp_init_statfs(struct osp_device *d)
1758 {
1759         struct task_struct      *task;
1760         struct opt_args         *args;
1761         DECLARE_COMPLETION_ONSTACK(started);
1762         int                     rc;
1763
1764         ENTRY;
1765
1766         spin_lock_init(&d->opd_pre_lock);
1767         init_waitqueue_head(&d->opd_pre_waitq);
1768
1769         /*
1770          * Initialize statfs-related things
1771          */
1772         d->opd_statfs_maxage = 5; /* defaultupdate interval */
1773         d->opd_statfs_fresh_till = ktime_sub_ns(ktime_get(),
1774                                                 1000 * NSEC_PER_SEC);
1775         CDEBUG(D_OTHER, "current %lldns, fresh till %lldns\n",
1776                ktime_get_ns(),
1777                ktime_to_ns(d->opd_statfs_fresh_till));
1778         cfs_timer_setup(&d->opd_statfs_timer, osp_statfs_timer_cb,
1779                         (unsigned long)d, 0);
1780
1781         if (d->opd_storage->dd_rdonly)
1782                 RETURN(0);
1783
1784         OBD_ALLOC_PTR(args);
1785         if (!args)
1786                 RETURN(0);
1787         args->opta_dev = d;
1788         args->opta_started = &started;
1789         rc = lu_env_init(&args->opta_env,
1790                          d->opd_dt_dev.dd_lu_dev.ld_type->ldt_ctx_tags);
1791         if (rc) {
1792                 CERROR("%s: init env error: rc = %d\n", d->opd_obd->obd_name,
1793                        rc);
1794                 OBD_FREE_PTR(args);
1795                 RETURN(0);
1796         }
1797
1798         /*
1799          * start thread handling precreation and statfs updates
1800          */
1801         task = kthread_create(osp_precreate_thread, args,
1802                               "osp-pre-%u-%u", d->opd_index, d->opd_group);
1803         if (IS_ERR(task)) {
1804                 CERROR("can't start precreate thread %ld\n", PTR_ERR(task));
1805                 lu_env_fini(&args->opta_env);
1806                 OBD_FREE_PTR(args);
1807                 RETURN(PTR_ERR(task));
1808         }
1809         d->opd_pre_task = task;
1810         wake_up_process(task);
1811         wait_for_completion(&started);
1812
1813         RETURN(0);
1814 }
1815
1816 void osp_statfs_fini(struct osp_device *d)
1817 {
1818         struct task_struct *task = d->opd_pre_task;
1819         ENTRY;
1820
1821         del_timer(&d->opd_statfs_timer);
1822
1823         d->opd_pre_task = NULL;
1824         if (task)
1825                 kthread_stop(task);
1826
1827         EXIT;
1828 }