Whamcloud - gitweb
LU-2237 tests: new test for re-recreating last_rcvd
[fs/lustre-release.git] / lustre / osp / osp_precreate.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/osp/osp_sync.c
37  *
38  * Lustre OST Proxy Device
39  *
40  * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
41  * Author: Mikhail Pershin <mike.pershin@intel.com>
42  */
43
44 #ifndef EXPORT_SYMTAB
45 # define EXPORT_SYMTAB
46 #endif
47 #define DEBUG_SUBSYSTEM S_MDS
48
49 #include "osp_internal.h"
50
51 /*
52  * there are two specific states to take care about:
53  *
54  * = import is disconnected =
55  *
56  * = import is inactive =
57  *   in this case osp_declare_object_create() returns an error
58  *
59  */
60
61 /*
62  * statfs
63  */
64 static inline int osp_statfs_need_update(struct osp_device *d)
65 {
66         return !cfs_time_before(cfs_time_current(),
67                                 d->opd_statfs_fresh_till);
68 }
69
70 static void osp_statfs_timer_cb(unsigned long _d)
71 {
72         struct osp_device *d = (struct osp_device *) _d;
73
74         LASSERT(d);
75         cfs_waitq_signal(&d->opd_pre_waitq);
76 }
77
78 static int osp_statfs_interpret(const struct lu_env *env,
79                                 struct ptlrpc_request *req,
80                                 union ptlrpc_async_args *aa, int rc)
81 {
82         struct obd_import       *imp = req->rq_import;
83         struct obd_statfs       *msfs;
84         struct osp_device       *d;
85
86         ENTRY;
87
88         aa = ptlrpc_req_async_args(req);
89         d = aa->pointer_arg[0];
90         LASSERT(d);
91
92         if (rc != 0)
93                 GOTO(out, rc);
94
95         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
96         if (msfs == NULL)
97                 GOTO(out, rc = -EPROTO);
98
99         d->opd_statfs = *msfs;
100
101         osp_pre_update_status(d, rc);
102
103         /* schedule next update */
104         d->opd_statfs_fresh_till = cfs_time_shift(d->opd_statfs_maxage);
105         cfs_timer_arm(&d->opd_statfs_timer, d->opd_statfs_fresh_till);
106         d->opd_statfs_update_in_progress = 0;
107
108         CDEBUG(D_CACHE, "updated statfs %p\n", d);
109
110         RETURN(0);
111 out:
112         /* couldn't update statfs, try again as soon as possible */
113         cfs_waitq_signal(&d->opd_pre_waitq);
114         if (req->rq_import_generation == imp->imp_generation)
115                 CDEBUG(D_CACHE, "%s: couldn't update statfs: rc = %d\n",
116                        d->opd_obd->obd_name, rc);
117         RETURN(rc);
118 }
119
120 static int osp_statfs_update(struct osp_device *d)
121 {
122         struct ptlrpc_request   *req;
123         struct obd_import       *imp;
124         union ptlrpc_async_args *aa;
125         int                      rc;
126
127         ENTRY;
128
129         CDEBUG(D_CACHE, "going to update statfs\n");
130
131         imp = d->opd_obd->u.cli.cl_import;
132         LASSERT(imp);
133
134         req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
135         if (req == NULL)
136                 RETURN(-ENOMEM);
137
138         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
139         if (rc) {
140                 ptlrpc_request_free(req);
141                 RETURN(rc);
142         }
143         ptlrpc_request_set_replen(req);
144         req->rq_request_portal = OST_CREATE_PORTAL;
145         ptlrpc_at_set_req_timeout(req);
146
147         req->rq_interpret_reply = (ptlrpc_interpterer_t)osp_statfs_interpret;
148         aa = ptlrpc_req_async_args(req);
149         aa->pointer_arg[0] = d;
150
151         /*
152          * no updates till reply
153          */
154         cfs_timer_disarm(&d->opd_statfs_timer);
155         d->opd_statfs_fresh_till = cfs_time_shift(obd_timeout * 1000);
156         d->opd_statfs_update_in_progress = 1;
157
158         ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
159
160         RETURN(0);
161 }
162
163 /*
164  * XXX: there might be a case where removed object(s) do not add free
165  * space (empty object). if the number of such deletions is high, then
166  * we can start to update statfs too often - a rpc storm
167  * TODO: some throttling is needed
168  */
169 void osp_statfs_need_now(struct osp_device *d)
170 {
171         if (!d->opd_statfs_update_in_progress) {
172                 /*
173                  * if current status is -ENOSPC (lack of free space on OST)
174                  * then we should poll OST immediately once object destroy
175                  * is replied
176                  */
177                 d->opd_statfs_fresh_till = cfs_time_shift(-1);
178                 cfs_timer_disarm(&d->opd_statfs_timer);
179                 cfs_waitq_signal(&d->opd_pre_waitq);
180         }
181 }
182
183
184 /*
185  * OSP tries to maintain pool of available objects so that calls to create
186  * objects don't block most of time
187  *
188  * each time OSP gets connected to OST, we should start from precreation cleanup
189  */
190 static inline int osp_precreate_running(struct osp_device *d)
191 {
192         return !!(d->opd_pre_thread.t_flags & SVC_RUNNING);
193 }
194
195 static inline int osp_precreate_stopped(struct osp_device *d)
196 {
197         return !!(d->opd_pre_thread.t_flags & SVC_STOPPED);
198 }
199
200 static inline int osp_precreate_near_empty_nolock(struct osp_device *d)
201 {
202         int window = d->opd_pre_last_created - d->opd_pre_used_id;
203
204         /* don't consider new precreation till OST is healty and
205          * has free space */
206         return ((window - d->opd_pre_reserved < d->opd_pre_grow_count / 2) &&
207                 (d->opd_pre_status == 0));
208 }
209
210 static inline int osp_precreate_near_empty(struct osp_device *d)
211 {
212         int rc;
213
214         /* XXX: do we really need locking here? */
215         spin_lock(&d->opd_pre_lock);
216         rc = osp_precreate_near_empty_nolock(d);
217         spin_unlock(&d->opd_pre_lock);
218         return rc;
219 }
220
221 static int osp_precreate_send(struct osp_device *d)
222 {
223         struct ptlrpc_request   *req;
224         struct obd_import       *imp;
225         struct ost_body         *body;
226         int                      rc, grow, diff;
227
228         ENTRY;
229
230         /* don't precreate new objects till OST healthy and has free space */
231         if (unlikely(d->opd_pre_status)) {
232                 CDEBUG(D_INFO, "%s: don't send new precreate: rc = %d\n",
233                        d->opd_obd->obd_name, d->opd_pre_status);
234                 RETURN(0);
235         }
236
237         /*
238          * if not connection/initialization is compeleted, ignore
239          */
240         imp = d->opd_obd->u.cli.cl_import;
241         LASSERT(imp);
242
243         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
244         if (req == NULL)
245                 RETURN(-ENOMEM);
246         req->rq_request_portal = OST_CREATE_PORTAL;
247         /* we should not resend create request - anyway we will have delorphan
248          * and kill these objects */
249         req->rq_no_delay = req->rq_no_resend = 1;
250
251         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
252         if (rc) {
253                 ptlrpc_request_free(req);
254                 RETURN(rc);
255         }
256
257         spin_lock(&d->opd_pre_lock);
258         if (d->opd_pre_grow_count > d->opd_pre_max_grow_count / 2)
259                 d->opd_pre_grow_count = d->opd_pre_max_grow_count / 2;
260         grow = d->opd_pre_grow_count;
261         spin_unlock(&d->opd_pre_lock);
262
263         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
264         LASSERT(body);
265         body->oa.o_id = d->opd_pre_last_created + grow;
266         body->oa.o_seq = FID_SEQ_OST_MDT0; /* XXX: support for CMD? */
267         body->oa.o_valid = OBD_MD_FLGROUP;
268
269         ptlrpc_request_set_replen(req);
270
271         rc = ptlrpc_queue_wait(req);
272         if (rc) {
273                 CERROR("%s: can't precreate: rc = %d\n",
274                        d->opd_obd->obd_name, rc);
275                 GOTO(out_req, rc);
276         }
277         LASSERT(req->rq_transno == 0);
278
279         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
280         if (body == NULL)
281                 GOTO(out_req, rc = -EPROTO);
282
283         CDEBUG(D_HA, "%s: new last_created "LPU64"\n", d->opd_obd->obd_name,
284                body->oa.o_id);
285         LASSERT(body->oa.o_id > d->opd_pre_used_id);
286
287         diff = body->oa.o_id - d->opd_pre_last_created;
288
289         spin_lock(&d->opd_pre_lock);
290         if (diff < grow) {
291                 /* the OST has not managed to create all the
292                  * objects we asked for */
293                 d->opd_pre_grow_count = max(diff, OST_MIN_PRECREATE);
294                 d->opd_pre_grow_slow = 1;
295         } else {
296                 /* the OST is able to keep up with the work,
297                  * we could consider increasing grow_count
298                  * next time if needed */
299                 d->opd_pre_grow_slow = 0;
300         }
301         d->opd_pre_last_created = body->oa.o_id;
302         spin_unlock(&d->opd_pre_lock);
303         CDEBUG(D_OTHER, "current precreated pool: %llu-%llu\n",
304                d->opd_pre_used_id, d->opd_pre_last_created);
305
306 out_req:
307         /* now we can wakeup all users awaiting for objects */
308         osp_pre_update_status(d, rc);
309         cfs_waitq_signal(&d->opd_pre_user_waitq);
310
311         ptlrpc_req_finished(req);
312         RETURN(rc);
313 }
314
315
316 static int osp_get_lastid_from_ost(struct osp_device *d)
317 {
318         struct ptlrpc_request   *req;
319         struct obd_import       *imp;
320         obd_id                  *reply;
321         char                    *tmp;
322         int                      rc;
323
324         imp = d->opd_obd->u.cli.cl_import;
325         LASSERT(imp);
326
327         req = ptlrpc_request_alloc(imp, &RQF_OST_GET_INFO_LAST_ID);
328         if (req == NULL)
329                 RETURN(-ENOMEM);
330
331         req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
332                              RCL_CLIENT, sizeof(KEY_LAST_ID));
333         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
334         if (rc) {
335                 ptlrpc_request_free(req);
336                 RETURN(rc);
337         }
338
339         tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
340         memcpy(tmp, KEY_LAST_ID, sizeof(KEY_LAST_ID));
341
342         req->rq_no_delay = req->rq_no_resend = 1;
343         ptlrpc_request_set_replen(req);
344         rc = ptlrpc_queue_wait(req);
345         if (rc) {
346                 /* bad-bad OST.. let sysadm sort this out */
347                 ptlrpc_set_import_active(imp, 0);
348                 GOTO(out, rc);
349         }
350
351         reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
352         if (reply == NULL)
353                 GOTO(out, rc = -EPROTO);
354
355         d->opd_last_used_id = *reply;
356         CDEBUG(D_HA, "%s: got last_id "LPU64" from OST\n",
357                d->opd_obd->obd_name, d->opd_last_used_id);
358
359 out:
360         ptlrpc_req_finished(req);
361         RETURN(rc);
362
363 }
364
365 /**
366  * asks OST to clean precreate orphans
367  * and gets next id for new objects
368  */
369 static int osp_precreate_cleanup_orphans(struct osp_device *d)
370 {
371         struct ptlrpc_request   *req = NULL;
372         struct obd_import       *imp;
373         struct ost_body         *body;
374         struct l_wait_info       lwi = { 0 };
375         int                      update_status = 0;
376         int                      rc;
377
378         ENTRY;
379
380         /*
381          * wait for local recovery to finish, so we can cleanup orphans
382          * orphans are all objects since "last used" (assigned), but
383          * there might be objects reserved and in some cases they won't
384          * be used. we can't cleanup them till we're sure they won't be
385          * used. also can't we allow new reservations because they may
386          * end up getting orphans being cleaned up below. so we block
387          * new reservations and wait till all reserved objects either
388          * user or released.
389          */
390         spin_lock(&d->opd_pre_lock);
391         d->opd_pre_recovering = 1;
392         spin_unlock(&d->opd_pre_lock);
393         /*
394          * The locking above makes sure the opd_pre_reserved check below will
395          * catch all osp_precreate_reserve() calls who find
396          * "!opd_pre_recovering".
397          */
398         l_wait_event(d->opd_pre_waitq,
399                      (!d->opd_pre_reserved && d->opd_recovery_completed) ||
400                      !osp_precreate_running(d) || d->opd_got_disconnected,
401                      &lwi);
402         if (!osp_precreate_running(d) || d->opd_got_disconnected)
403                 GOTO(out, rc = -EAGAIN);
404
405         CDEBUG(D_HA, "%s: going to cleanup orphans since "LPU64"\n",
406                 d->opd_obd->obd_name, d->opd_last_used_id);
407
408         if (d->opd_last_used_id < 2) {
409                 /* lastid looks strange... ask OST */
410                 rc = osp_get_lastid_from_ost(d);
411                 if (rc)
412                         GOTO(out, rc);
413         }
414
415         imp = d->opd_obd->u.cli.cl_import;
416         LASSERT(imp);
417
418         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
419         if (req == NULL)
420                 GOTO(out, rc = -ENOMEM);
421
422         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
423         if (rc) {
424                 ptlrpc_request_free(req);
425                 GOTO(out, rc);
426         }
427
428         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
429         if (body == NULL)
430                 GOTO(out, rc = -EPROTO);
431
432         body->oa.o_flags = OBD_FL_DELORPHAN;
433         body->oa.o_valid = OBD_MD_FLFLAGS | OBD_MD_FLGROUP;
434         body->oa.o_seq = FID_SEQ_OST_MDT0;
435
436         body->oa.o_id = d->opd_last_used_id;
437
438         ptlrpc_request_set_replen(req);
439
440         /* Don't resend the delorphan req */
441         req->rq_no_resend = req->rq_no_delay = 1;
442
443         rc = ptlrpc_queue_wait(req);
444         if (rc) {
445                 update_status = 1;
446                 GOTO(out, rc);
447         }
448
449         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
450         if (body == NULL)
451                 GOTO(out, rc = -EPROTO);
452
453         /*
454          * OST provides us with id new pool starts from in body->oa.o_id
455          */
456         spin_lock(&d->opd_pre_lock);
457         if (le64_to_cpu(d->opd_last_used_id) > body->oa.o_id) {
458                 d->opd_pre_grow_count = OST_MIN_PRECREATE +
459                                         le64_to_cpu(d->opd_last_used_id) -
460                                         body->oa.o_id;
461                 d->opd_pre_last_created = le64_to_cpu(d->opd_last_used_id);
462         } else {
463                 d->opd_pre_grow_count = OST_MIN_PRECREATE;
464                 d->opd_pre_last_created = body->oa.o_id;
465         }
466         /*
467          * This empties the pre-creation pool and effectively blocks any new
468          * reservations.
469          */
470         d->opd_pre_used_id = d->opd_pre_last_created;
471         d->opd_pre_grow_slow = 0;
472         spin_unlock(&d->opd_pre_lock);
473
474         CDEBUG(D_HA, "%s: Got last_id "LPU64" from OST, last_used is "LPU64
475                ", pre_used "LPU64"\n", d->opd_obd->obd_name, body->oa.o_id,
476                le64_to_cpu(d->opd_last_used_id), d->opd_pre_used_id);
477
478 out:
479         if (req)
480                 ptlrpc_req_finished(req);
481
482         d->opd_pre_recovering = 0;
483
484         /*
485          * If rc is zero, the pre-creation window should have been emptied.
486          * Since waking up the herd would be useless without pre-created
487          * objects, we defer the signal to osp_precreate_send() in that case.
488          */
489         if (rc != 0) {
490                 if (update_status) {
491                         CERROR("%s: cannot cleanup orphans: rc = %d\n",
492                                d->opd_obd->obd_name, rc);
493                         /* we can't proceed from here, OST seem to
494                          * be in a bad shape, better to wait for
495                          * a new instance of the server and repeat
496                          * from the beginning. notify possible waiters
497                          * this OSP isn't quite functional yet */
498                         osp_pre_update_status(d, rc);
499                 } else {
500                         cfs_waitq_signal(&d->opd_pre_user_waitq);
501                 }
502         }
503
504         RETURN(rc);
505 }
506
507 /*
508  * the function updates current precreation status used: functional or not
509  *
510  * rc is a last code from the transport, rc == 0 meaning transport works
511  * well and users of lod can use objects from this OSP
512  *
513  * the status depends on current usage of OST
514  */
515 void osp_pre_update_status(struct osp_device *d, int rc)
516 {
517         struct obd_statfs       *msfs = &d->opd_statfs;
518         int                      old = d->opd_pre_status;
519         __u64                    used;
520
521         d->opd_pre_status = rc;
522         if (rc)
523                 goto out;
524
525         /* Add a bit of hysteresis so this flag isn't continually flapping,
526          * and ensure that new files don't get extremely fragmented due to
527          * only a small amount of available space in the filesystem.
528          * We want to set the NOSPC flag when there is less than ~0.1% free
529          * and clear it when there is at least ~0.2% free space, so:
530          *                   avail < ~0.1% max          max = avail + used
531          *            1025 * avail < avail + used       used = blocks - free
532          *            1024 * avail < used
533          *            1024 * avail < blocks - free
534          *                   avail < ((blocks - free) >> 10)
535          *
536          * On very large disk, say 16TB 0.1% will be 16 GB. We don't want to
537          * lose that amount of space so in those cases we report no space left
538          * if their is less than 1 GB left.                             */
539         if (likely(msfs->os_type)) {
540                 used = min_t(__u64, (msfs->os_blocks - msfs->os_bfree) >> 10,
541                                     1 << 30);
542                 if ((msfs->os_ffree < 32) || (msfs->os_bavail < used)) {
543                         d->opd_pre_status = -ENOSPC;
544                         if (old != -ENOSPC)
545                                 CDEBUG(D_INFO, "%s: status: "LPU64" blocks, "
546                                        LPU64" free, "LPU64" used, "LPU64" "
547                                        "avail -> %d: rc = %d\n",
548                                        d->opd_obd->obd_name, msfs->os_blocks,
549                                        msfs->os_bfree, used, msfs->os_bavail,
550                                        d->opd_pre_status, rc);
551                         CDEBUG(D_INFO,
552                                "non-commited changes: %lu, in progress: %u\n",
553                                d->opd_syn_changes, d->opd_syn_rpc_in_progress);
554                 } else if (old == -ENOSPC) {
555                         d->opd_pre_status = 0;
556                         d->opd_pre_grow_slow = 0;
557                         d->opd_pre_grow_count = OST_MIN_PRECREATE;
558                         cfs_waitq_signal(&d->opd_pre_waitq);
559                         CDEBUG(D_INFO, "%s: no space: "LPU64" blocks, "LPU64
560                                " free, "LPU64" used, "LPU64" avail -> %d: "
561                                "rc = %d\n", d->opd_obd->obd_name,
562                                msfs->os_blocks, msfs->os_bfree, used,
563                                msfs->os_bavail, d->opd_pre_status, rc);
564                 }
565         }
566
567 out:
568         cfs_waitq_signal(&d->opd_pre_user_waitq);
569 }
570
571 static int osp_precreate_thread(void *_arg)
572 {
573         struct osp_device       *d = _arg;
574         struct ptlrpc_thread    *thread = &d->opd_pre_thread;
575         struct l_wait_info       lwi = { 0 };
576         char                     pname[16];
577         int                      rc;
578
579         ENTRY;
580
581         sprintf(pname, "osp-pre-%u\n", d->opd_index);
582         cfs_daemonize(pname);
583
584         spin_lock(&d->opd_pre_lock);
585         thread->t_flags = SVC_RUNNING;
586         spin_unlock(&d->opd_pre_lock);
587         cfs_waitq_signal(&thread->t_ctl_waitq);
588
589         while (osp_precreate_running(d)) {
590                 /*
591                  * need to be connected to OST
592                  */
593                 while (osp_precreate_running(d)) {
594                         l_wait_event(d->opd_pre_waitq,
595                                      !osp_precreate_running(d) ||
596                                      d->opd_new_connection, &lwi);
597
598                         if (!osp_precreate_running(d))
599                                 break;
600
601                         if (!d->opd_new_connection)
602                                 continue;
603
604                         /* got connected */
605                         d->opd_new_connection = 0;
606                         d->opd_got_disconnected = 0;
607                         break;
608                 }
609
610                 osp_statfs_update(d);
611
612                 /*
613                  * Clean up orphans or recreate missing objects.
614                  */
615                 rc = osp_precreate_cleanup_orphans(d);
616                 if (rc != 0)
617                         continue;
618
619                 /*
620                  * connected, can handle precreates now
621                  */
622                 while (osp_precreate_running(d)) {
623                         l_wait_event(d->opd_pre_waitq,
624                                      !osp_precreate_running(d) ||
625                                      osp_precreate_near_empty(d) ||
626                                      osp_statfs_need_update(d) ||
627                                      d->opd_got_disconnected, &lwi);
628
629                         if (!osp_precreate_running(d))
630                                 break;
631
632                         /* something happened to the connection
633                          * have to start from the beginning */
634                         if (d->opd_got_disconnected)
635                                 break;
636
637                         if (osp_statfs_need_update(d))
638                                 osp_statfs_update(d);
639
640                         if (osp_precreate_near_empty(d)) {
641                                 rc = osp_precreate_send(d);
642                                 /* osp_precreate_send() sets opd_pre_status
643                                  * in case of error, that prevent the using of
644                                  * failed device. */
645                                 if (rc != 0 && rc != -ENOSPC &&
646                                     rc != -ETIMEDOUT && rc != -ENOTCONN)
647                                         CERROR("%s: cannot precreate objects:"
648                                                " rc = %d\n",
649                                                d->opd_obd->obd_name, rc);
650                         }
651                 }
652         }
653
654         thread->t_flags = SVC_STOPPED;
655         cfs_waitq_signal(&thread->t_ctl_waitq);
656
657         RETURN(0);
658 }
659
660 static int osp_precreate_ready_condition(struct osp_device *d)
661 {
662         __u64 next;
663
664         if (d->opd_pre_recovering)
665                 return 0;
666
667         /* ready if got enough precreated objects */
668         /* we need to wait for others (opd_pre_reserved) and our object (+1) */
669         next = d->opd_pre_used_id + d->opd_pre_reserved + 1;
670         if (next <= d->opd_pre_last_created)
671                 return 1;
672
673         /* ready if OST reported no space and no destoys in progress */
674         if (d->opd_syn_changes + d->opd_syn_rpc_in_progress == 0 &&
675             d->opd_pre_status != 0)
676                 return 1;
677
678         return 0;
679 }
680
681 static int osp_precreate_timeout_condition(void *data)
682 {
683         struct osp_device *d = data;
684
685         LCONSOLE_WARN("%s: slow creates, last="LPU64", next="LPU64", "
686                       "reserved="LPU64", syn_changes=%lu, "
687                       "syn_rpc_in_progress=%d, status=%d\n",
688                       d->opd_obd->obd_name, d->opd_pre_last_created,
689                       d->opd_pre_used_id, d->opd_pre_reserved,
690                       d->opd_syn_changes, d->opd_syn_rpc_in_progress,
691                       d->opd_pre_status);
692
693         return 1;
694 }
695
696 /*
697  * called to reserve object in the pool
698  * return codes:
699  *  ENOSPC - no space on corresponded OST
700  *  EAGAIN - precreation is in progress, try later
701  *  EIO    - no access to OST
702  */
703 int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d)
704 {
705         struct l_wait_info       lwi;
706         cfs_time_t               expire = cfs_time_shift(obd_timeout);
707         int                      precreated, rc;
708         int                      count = 0;
709
710         ENTRY;
711
712         LASSERT(d->opd_pre_last_created >= d->opd_pre_used_id);
713
714         lwi = LWI_TIMEOUT(cfs_time_seconds(obd_timeout),
715                           osp_precreate_timeout_condition, d);
716
717         /*
718          * wait till:
719          *  - preallocation is done
720          *  - no free space expected soon
721          *  - can't connect to OST for too long (obd_timeout)
722          */
723         while ((rc = d->opd_pre_status) == 0 || rc == -ENOSPC ||
724                 rc == -ENODEV) {
725                 if (unlikely(rc == -ENODEV)) {
726                         if (cfs_time_aftereq(cfs_time_current(), expire))
727                                 break;
728                 }
729
730 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 3, 90, 0)
731                 /*
732                  * to address Andreas's concern on possible busy-loop
733                  * between this thread and osp_precreate_send()
734                  */
735                 if (unlikely(count++ == 1000)) {
736                         osp_precreate_timeout_condition(d);
737                         LBUG();
738                 }
739 #endif
740
741                 /*
742                  * increase number of precreations
743                  */
744                 if (d->opd_pre_grow_count < d->opd_pre_max_grow_count &&
745                     d->opd_pre_grow_slow == 0 &&
746                     (d->opd_pre_last_created - d->opd_pre_used_id <=
747                      d->opd_pre_grow_count / 4 + 1)) {
748                         spin_lock(&d->opd_pre_lock);
749                         d->opd_pre_grow_slow = 1;
750                         d->opd_pre_grow_count *= 2;
751                         spin_unlock(&d->opd_pre_lock);
752                 }
753
754                 spin_lock(&d->opd_pre_lock);
755                 precreated = d->opd_pre_last_created - d->opd_pre_used_id;
756                 if (precreated > d->opd_pre_reserved &&
757                     !d->opd_pre_recovering) {
758                         d->opd_pre_reserved++;
759                         spin_unlock(&d->opd_pre_lock);
760                         rc = 0;
761
762                         /* XXX: don't wake up if precreation is in progress */
763                         if (osp_precreate_near_empty_nolock(d))
764                                 cfs_waitq_signal(&d->opd_pre_waitq);
765
766                         break;
767                 }
768                 spin_unlock(&d->opd_pre_lock);
769
770                 /*
771                  * all precreated objects have been used and no-space
772                  * status leave us no chance to succeed very soon
773                  * but if there is destroy in progress, then we should
774                  * wait till that is done - some space might be released
775                  */
776                 if (unlikely(rc == -ENOSPC)) {
777                         if (d->opd_syn_changes) {
778                                 /* force local commit to release space */
779                                 dt_commit_async(env, d->opd_storage);
780                         }
781                         if (d->opd_syn_rpc_in_progress) {
782                                 /* just wait till destroys are done */
783                                 /* see l_wait_even() few lines below */
784                         }
785                         if (d->opd_syn_changes +
786                             d->opd_syn_rpc_in_progress == 0) {
787                                 /* no hope for free space */
788                                 break;
789                         }
790                 }
791
792                 /* XXX: don't wake up if precreation is in progress */
793                 cfs_waitq_signal(&d->opd_pre_waitq);
794
795                 l_wait_event(d->opd_pre_user_waitq,
796                              osp_precreate_ready_condition(d), &lwi);
797         }
798
799         RETURN(rc);
800 }
801
802 /*
803  * this function relies on reservation made before
804  */
805 __u64 osp_precreate_get_id(struct osp_device *d)
806 {
807         obd_id objid;
808
809         /* grab next id from the pool */
810         spin_lock(&d->opd_pre_lock);
811         LASSERT(d->opd_pre_used_id < d->opd_pre_last_created);
812         objid = ++d->opd_pre_used_id;
813         d->opd_pre_reserved--;
814         /*
815          * last_used_id must be changed along with getting new id otherwise
816          * we might miscalculate gap causing object loss or leak
817          */
818         osp_update_last_id(d, objid);
819         spin_unlock(&d->opd_pre_lock);
820
821         /*
822          * probably main thread suspended orphan cleanup till
823          * all reservations are released, see comment in
824          * osp_precreate_thread() just before orphan cleanup
825          */
826         if (unlikely(d->opd_pre_reserved == 0 && d->opd_pre_status))
827                 cfs_waitq_signal(&d->opd_pre_waitq);
828
829         return objid;
830 }
831
832 /*
833  *
834  */
835 int osp_object_truncate(const struct lu_env *env, struct dt_object *dt,
836                         __u64 size)
837 {
838         struct osp_device       *d = lu2osp_dev(dt->do_lu.lo_dev);
839         struct ptlrpc_request   *req = NULL;
840         struct obd_import       *imp;
841         struct ost_body         *body;
842         struct obdo             *oa = NULL;
843         int                      rc;
844
845         ENTRY;
846
847         imp = d->opd_obd->u.cli.cl_import;
848         LASSERT(imp);
849
850         req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
851         if (req == NULL)
852                 RETURN(-ENOMEM);
853
854         /* XXX: capa support? */
855         /* osc_set_capa_size(req, &RMF_CAPA1, capa); */
856         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
857         if (rc) {
858                 ptlrpc_request_free(req);
859                 RETURN(rc);
860         }
861
862         /*
863          * XXX: decide how do we do here with resend
864          * if we don't resend, then client may see wrong file size
865          * if we do resend, then MDS thread can get stuck for quite long
866          */
867         req->rq_no_resend = req->rq_no_delay = 1;
868
869         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
870         ptlrpc_at_set_req_timeout(req);
871
872         OBD_ALLOC_PTR(oa);
873         if (oa == NULL)
874                 GOTO(out, rc = -ENOMEM);
875
876         rc = fid_ostid_pack(lu_object_fid(&dt->do_lu), &oa->o_oi);
877         LASSERT(rc == 0);
878         oa->o_size = size;
879         oa->o_blocks = OBD_OBJECT_EOF;
880         oa->o_valid = OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
881                       OBD_MD_FLID | OBD_MD_FLGROUP;
882
883         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
884         LASSERT(body);
885         lustre_set_wire_obdo(&body->oa, oa);
886
887         /* XXX: capa support? */
888         /* osc_pack_capa(req, body, capa); */
889
890         ptlrpc_request_set_replen(req);
891
892         rc = ptlrpc_queue_wait(req);
893         if (rc)
894                 CERROR("can't punch object: %d\n", rc);
895 out:
896         ptlrpc_req_finished(req);
897         if (oa)
898                 OBD_FREE_PTR(oa);
899         RETURN(rc);
900 }
901
902 int osp_init_precreate(struct osp_device *d)
903 {
904         struct l_wait_info       lwi = { 0 };
905         int                      rc;
906
907         ENTRY;
908
909         /* initially precreation isn't ready */
910         d->opd_pre_status = -EAGAIN;
911         d->opd_pre_used_id = 0;
912         d->opd_pre_last_created = 0;
913         d->opd_pre_reserved = 0;
914         d->opd_got_disconnected = 1;
915         d->opd_pre_grow_slow = 0;
916         d->opd_pre_grow_count = OST_MIN_PRECREATE;
917         d->opd_pre_min_grow_count = OST_MIN_PRECREATE;
918         d->opd_pre_max_grow_count = OST_MAX_PRECREATE;
919
920         spin_lock_init(&d->opd_pre_lock);
921         cfs_waitq_init(&d->opd_pre_waitq);
922         cfs_waitq_init(&d->opd_pre_user_waitq);
923         cfs_waitq_init(&d->opd_pre_thread.t_ctl_waitq);
924
925         /*
926          * Initialize statfs-related things
927          */
928         d->opd_statfs_maxage = 5; /* default update interval */
929         d->opd_statfs_fresh_till = cfs_time_shift(-1000);
930         CDEBUG(D_OTHER, "current %llu, fresh till %llu\n",
931                (unsigned long long)cfs_time_current(),
932                (unsigned long long)d->opd_statfs_fresh_till);
933         cfs_timer_init(&d->opd_statfs_timer, osp_statfs_timer_cb, d);
934
935         /*
936          * start thread handling precreation and statfs updates
937          */
938         rc = cfs_create_thread(osp_precreate_thread, d, 0);
939         if (rc < 0) {
940                 CERROR("can't start precreate thread %d\n", rc);
941                 RETURN(rc);
942         }
943
944         l_wait_event(d->opd_pre_thread.t_ctl_waitq,
945                      osp_precreate_running(d) || osp_precreate_stopped(d),
946                      &lwi);
947
948         RETURN(0);
949 }
950
951 void osp_precreate_fini(struct osp_device *d)
952 {
953         struct ptlrpc_thread *thread = &d->opd_pre_thread;
954
955         ENTRY;
956
957         cfs_timer_disarm(&d->opd_statfs_timer);
958
959         thread->t_flags = SVC_STOPPING;
960         cfs_waitq_signal(&d->opd_pre_waitq);
961
962         cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
963
964         EXIT;
965 }
966