Whamcloud - gitweb
LU-1187 tests: Add DNE test cases in sanity.
[fs/lustre-release.git] / lustre / osp / osp_precreate.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/osp/osp_sync.c
37  *
38  * Lustre OST Proxy Device
39  *
40  * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
41  * Author: Mikhail Pershin <mike.pershin@intel.com>
42  */
43
44 #ifndef EXPORT_SYMTAB
45 # define EXPORT_SYMTAB
46 #endif
47 #define DEBUG_SUBSYSTEM S_MDS
48
49 #include "osp_internal.h"
50
51 /*
52  * there are two specific states to take care about:
53  *
54  * = import is disconnected =
55  *
56  * = import is inactive =
57  *   in this case osp_declare_object_create() returns an error
58  *
59  */
60
61 /*
62  * statfs
63  */
64 static inline int osp_statfs_need_update(struct osp_device *d)
65 {
66         return !cfs_time_before(cfs_time_current(),
67                                 d->opd_statfs_fresh_till);
68 }
69
70 static void osp_statfs_timer_cb(unsigned long _d)
71 {
72         struct osp_device *d = (struct osp_device *) _d;
73
74         LASSERT(d);
75         cfs_waitq_signal(&d->opd_pre_waitq);
76 }
77
78 static int osp_statfs_interpret(const struct lu_env *env,
79                                 struct ptlrpc_request *req,
80                                 union ptlrpc_async_args *aa, int rc)
81 {
82         struct obd_import       *imp = req->rq_import;
83         struct obd_statfs       *msfs;
84         struct osp_device       *d;
85
86         ENTRY;
87
88         aa = ptlrpc_req_async_args(req);
89         d = aa->pointer_arg[0];
90         LASSERT(d);
91
92         if (rc != 0)
93                 GOTO(out, rc);
94
95         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
96         if (msfs == NULL)
97                 GOTO(out, rc = -EPROTO);
98
99         d->opd_statfs = *msfs;
100
101         osp_pre_update_status(d, rc);
102
103         /* schedule next update */
104         d->opd_statfs_fresh_till = cfs_time_shift(d->opd_statfs_maxage);
105         cfs_timer_arm(&d->opd_statfs_timer, d->opd_statfs_fresh_till);
106         d->opd_statfs_update_in_progress = 0;
107
108         CDEBUG(D_CACHE, "updated statfs %p\n", d);
109
110         RETURN(0);
111 out:
112         /* couldn't update statfs, try again as soon as possible */
113         cfs_waitq_signal(&d->opd_pre_waitq);
114         if (req->rq_import_generation == imp->imp_generation)
115                 CDEBUG(D_CACHE, "%s: couldn't update statfs: rc = %d\n",
116                        d->opd_obd->obd_name, rc);
117         RETURN(rc);
118 }
119
120 static int osp_statfs_update(struct osp_device *d)
121 {
122         struct ptlrpc_request   *req;
123         struct obd_import       *imp;
124         union ptlrpc_async_args *aa;
125         int                      rc;
126
127         ENTRY;
128
129         CDEBUG(D_CACHE, "going to update statfs\n");
130
131         imp = d->opd_obd->u.cli.cl_import;
132         LASSERT(imp);
133
134         req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
135         if (req == NULL)
136                 RETURN(-ENOMEM);
137
138         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
139         if (rc) {
140                 ptlrpc_request_free(req);
141                 RETURN(rc);
142         }
143         ptlrpc_request_set_replen(req);
144         req->rq_request_portal = OST_CREATE_PORTAL;
145         ptlrpc_at_set_req_timeout(req);
146
147         req->rq_interpret_reply = (ptlrpc_interpterer_t)osp_statfs_interpret;
148         aa = ptlrpc_req_async_args(req);
149         aa->pointer_arg[0] = d;
150
151         /*
152          * no updates till reply
153          */
154         cfs_timer_disarm(&d->opd_statfs_timer);
155         d->opd_statfs_fresh_till = cfs_time_shift(obd_timeout * 1000);
156         d->opd_statfs_update_in_progress = 1;
157
158         ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
159
160         RETURN(0);
161 }
162
163 /*
164  * XXX: there might be a case where removed object(s) do not add free
165  * space (empty object). if the number of such deletions is high, then
166  * we can start to update statfs too often - a rpc storm
167  * TODO: some throttling is needed
168  */
169 void osp_statfs_need_now(struct osp_device *d)
170 {
171         if (!d->opd_statfs_update_in_progress) {
172                 /*
173                  * if current status is -ENOSPC (lack of free space on OST)
174                  * then we should poll OST immediately once object destroy
175                  * is replied
176                  */
177                 d->opd_statfs_fresh_till = cfs_time_shift(-1);
178                 cfs_timer_disarm(&d->opd_statfs_timer);
179                 cfs_waitq_signal(&d->opd_pre_waitq);
180         }
181 }
182
183
184 /*
185  * OSP tries to maintain pool of available objects so that calls to create
186  * objects don't block most of time
187  *
188  * each time OSP gets connected to OST, we should start from precreation cleanup
189  */
190 static inline int osp_precreate_running(struct osp_device *d)
191 {
192         return !!(d->opd_pre_thread.t_flags & SVC_RUNNING);
193 }
194
195 static inline int osp_precreate_stopped(struct osp_device *d)
196 {
197         return !!(d->opd_pre_thread.t_flags & SVC_STOPPED);
198 }
199
200 static inline int osp_precreate_near_empty_nolock(struct osp_device *d)
201 {
202         int window = d->opd_pre_last_created - d->opd_pre_used_id;
203
204         /* don't consider new precreation till OST is healty and
205          * has free space */
206         return ((window - d->opd_pre_reserved < d->opd_pre_grow_count / 2) &&
207                 (d->opd_pre_status == 0));
208 }
209
210 static inline int osp_precreate_near_empty(struct osp_device *d)
211 {
212         int rc;
213
214         /* XXX: do we really need locking here? */
215         spin_lock(&d->opd_pre_lock);
216         rc = osp_precreate_near_empty_nolock(d);
217         spin_unlock(&d->opd_pre_lock);
218         return rc;
219 }
220
221 static int osp_precreate_send(struct osp_device *d)
222 {
223         struct ptlrpc_request   *req;
224         struct obd_import       *imp;
225         struct ost_body         *body;
226         int                      rc, grow, diff;
227
228         ENTRY;
229
230         /* don't precreate new objects till OST healthy and has free space */
231         if (unlikely(d->opd_pre_status)) {
232                 CDEBUG(D_INFO, "%s: don't send new precreate: rc = %d\n",
233                        d->opd_obd->obd_name, d->opd_pre_status);
234                 RETURN(0);
235         }
236
237         /*
238          * if not connection/initialization is compeleted, ignore
239          */
240         imp = d->opd_obd->u.cli.cl_import;
241         LASSERT(imp);
242
243         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
244         if (req == NULL)
245                 RETURN(-ENOMEM);
246         req->rq_request_portal = OST_CREATE_PORTAL;
247         /* we should not resend create request - anyway we will have delorphan
248          * and kill these objects */
249         req->rq_no_delay = req->rq_no_resend = 1;
250
251         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
252         if (rc) {
253                 ptlrpc_request_free(req);
254                 RETURN(rc);
255         }
256
257         spin_lock(&d->opd_pre_lock);
258         if (d->opd_pre_grow_count > d->opd_pre_max_grow_count / 2)
259                 d->opd_pre_grow_count = d->opd_pre_max_grow_count / 2;
260         grow = d->opd_pre_grow_count;
261         spin_unlock(&d->opd_pre_lock);
262
263         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
264         LASSERT(body);
265         body->oa.o_id = d->opd_pre_last_created + grow;
266         body->oa.o_seq = FID_SEQ_OST_MDT0; /* XXX: support for CMD? */
267         body->oa.o_valid = OBD_MD_FLGROUP;
268
269         ptlrpc_request_set_replen(req);
270
271         rc = ptlrpc_queue_wait(req);
272         if (rc) {
273                 CERROR("%s: can't precreate: rc = %d\n",
274                        d->opd_obd->obd_name, rc);
275                 GOTO(out_req, rc);
276         }
277         LASSERT(req->rq_transno == 0);
278
279         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
280         if (body == NULL)
281                 GOTO(out_req, rc = -EPROTO);
282
283         CDEBUG(D_HA, "%s: new last_created "LPU64"\n", d->opd_obd->obd_name,
284                body->oa.o_id);
285         LASSERT(body->oa.o_id > d->opd_pre_used_id);
286
287         diff = body->oa.o_id - d->opd_pre_last_created;
288
289         spin_lock(&d->opd_pre_lock);
290         if (diff < grow) {
291                 /* the OST has not managed to create all the
292                  * objects we asked for */
293                 d->opd_pre_grow_count = max(diff, OST_MIN_PRECREATE);
294                 d->opd_pre_grow_slow = 1;
295         } else {
296                 /* the OST is able to keep up with the work,
297                  * we could consider increasing grow_count
298                  * next time if needed */
299                 d->opd_pre_grow_slow = 0;
300         }
301         d->opd_pre_last_created = body->oa.o_id;
302         spin_unlock(&d->opd_pre_lock);
303         CDEBUG(D_OTHER, "current precreated pool: %llu-%llu\n",
304                d->opd_pre_used_id, d->opd_pre_last_created);
305
306 out_req:
307         /* now we can wakeup all users awaiting for objects */
308         osp_pre_update_status(d, rc);
309         cfs_waitq_signal(&d->opd_pre_user_waitq);
310
311         ptlrpc_req_finished(req);
312         RETURN(rc);
313 }
314
315
316 static int osp_get_lastid_from_ost(struct osp_device *d)
317 {
318         struct ptlrpc_request   *req;
319         struct obd_import       *imp;
320         obd_id                  *reply;
321         char                    *tmp;
322         int                      rc;
323
324         imp = d->opd_obd->u.cli.cl_import;
325         LASSERT(imp);
326
327         req = ptlrpc_request_alloc(imp, &RQF_OST_GET_INFO_LAST_ID);
328         if (req == NULL)
329                 RETURN(-ENOMEM);
330
331         req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
332                              RCL_CLIENT, sizeof(KEY_LAST_ID));
333         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
334         if (rc) {
335                 ptlrpc_request_free(req);
336                 RETURN(rc);
337         }
338
339         tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
340         memcpy(tmp, KEY_LAST_ID, sizeof(KEY_LAST_ID));
341
342         req->rq_no_delay = req->rq_no_resend = 1;
343         ptlrpc_request_set_replen(req);
344         rc = ptlrpc_queue_wait(req);
345         if (rc) {
346                 /* bad-bad OST.. let sysadm sort this out */
347                 ptlrpc_set_import_active(imp, 0);
348                 GOTO(out, rc);
349         }
350
351         reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
352         if (reply == NULL)
353                 GOTO(out, rc = -EPROTO);
354
355         d->opd_last_used_id = *reply;
356         CDEBUG(D_HA, "%s: got last_id "LPU64" from OST\n",
357                d->opd_obd->obd_name, d->opd_last_used_id);
358
359 out:
360         ptlrpc_req_finished(req);
361         RETURN(rc);
362
363 }
364
365 /**
366  * asks OST to clean precreate orphans
367  * and gets next id for new objects
368  */
369 static int osp_precreate_cleanup_orphans(struct osp_device *d)
370 {
371         struct ptlrpc_request   *req = NULL;
372         struct obd_import       *imp;
373         struct ost_body         *body;
374         int                      rc;
375
376         ENTRY;
377
378         LASSERT(d->opd_recovery_completed);
379         LASSERT(d->opd_pre_reserved == 0);
380
381         CDEBUG(D_HA, "%s: going to cleanup orphans since "LPU64"\n",
382                 d->opd_obd->obd_name, d->opd_last_used_id);
383
384         if (d->opd_last_used_id < 2) {
385                 /* lastid looks strange... ask OST */
386                 rc = osp_get_lastid_from_ost(d);
387                 if (rc)
388                         GOTO(out, rc);
389         }
390
391         imp = d->opd_obd->u.cli.cl_import;
392         LASSERT(imp);
393
394         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
395         if (req == NULL)
396                 GOTO(out, rc = -ENOMEM);
397
398         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
399         if (rc) {
400                 ptlrpc_request_free(req);
401                 GOTO(out, rc);
402         }
403
404         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
405         if (body == NULL)
406                 GOTO(out, rc = -EPROTO);
407
408         body->oa.o_flags = OBD_FL_DELORPHAN;
409         body->oa.o_valid = OBD_MD_FLFLAGS | OBD_MD_FLGROUP;
410         body->oa.o_seq = FID_SEQ_OST_MDT0;
411
412         body->oa.o_id = d->opd_last_used_id;
413
414         ptlrpc_request_set_replen(req);
415
416         /* Don't resend the delorphan req */
417         req->rq_no_resend = req->rq_no_delay = 1;
418
419         rc = ptlrpc_queue_wait(req);
420         if (rc)
421                 GOTO(out, rc);
422
423         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
424         if (body == NULL)
425                 GOTO(out, rc = -EPROTO);
426
427         /*
428          * OST provides us with id new pool starts from in body->oa.o_id
429          */
430         spin_lock(&d->opd_pre_lock);
431         if (le64_to_cpu(d->opd_last_used_id) > body->oa.o_id) {
432                 d->opd_pre_grow_count = OST_MIN_PRECREATE +
433                                         le64_to_cpu(d->opd_last_used_id) -
434                                         body->oa.o_id;
435                 d->opd_pre_last_created = le64_to_cpu(d->opd_last_used_id);
436         } else {
437                 d->opd_pre_grow_count = OST_MIN_PRECREATE;
438                 d->opd_pre_last_created = body->oa.o_id;
439         }
440         d->opd_pre_used_id = d->opd_pre_last_created;
441         d->opd_pre_grow_slow = 0;
442         spin_unlock(&d->opd_pre_lock);
443
444         CDEBUG(D_HA, "%s: Got last_id "LPU64" from OST, last_used is "LPU64
445                ", pre_used "LPU64"\n", d->opd_obd->obd_name, body->oa.o_id,
446                le64_to_cpu(d->opd_last_used_id), d->opd_pre_used_id);
447
448 out:
449         if (req)
450                 ptlrpc_req_finished(req);
451
452         RETURN(rc);
453 }
454
455 /*
456  * the function updates current precreation status used: functional or not
457  *
458  * rc is a last code from the transport, rc == 0 meaning transport works
459  * well and users of lod can use objects from this OSP
460  *
461  * the status depends on current usage of OST
462  */
463 void osp_pre_update_status(struct osp_device *d, int rc)
464 {
465         struct obd_statfs       *msfs = &d->opd_statfs;
466         int                      old = d->opd_pre_status;
467         __u64                    used;
468
469         d->opd_pre_status = rc;
470         if (rc)
471                 goto out;
472
473         /* Add a bit of hysteresis so this flag isn't continually flapping,
474          * and ensure that new files don't get extremely fragmented due to
475          * only a small amount of available space in the filesystem.
476          * We want to set the NOSPC flag when there is less than ~0.1% free
477          * and clear it when there is at least ~0.2% free space, so:
478          *                   avail < ~0.1% max          max = avail + used
479          *            1025 * avail < avail + used       used = blocks - free
480          *            1024 * avail < used
481          *            1024 * avail < blocks - free
482          *                   avail < ((blocks - free) >> 10)
483          *
484          * On very large disk, say 16TB 0.1% will be 16 GB. We don't want to
485          * lose that amount of space so in those cases we report no space left
486          * if their is less than 1 GB left.                             */
487         if (likely(msfs->os_type)) {
488                 used = min_t(__u64, (msfs->os_blocks - msfs->os_bfree) >> 10,
489                                     1 << 30);
490                 if ((msfs->os_ffree < 32) || (msfs->os_bavail < used)) {
491                         d->opd_pre_status = -ENOSPC;
492                         if (old != -ENOSPC)
493                                 CDEBUG(D_INFO, "%s: status: "LPU64" blocks, "
494                                        LPU64" free, "LPU64" used, "LPU64" "
495                                        "avail -> %d: rc = %d\n",
496                                        d->opd_obd->obd_name, msfs->os_blocks,
497                                        msfs->os_bfree, used, msfs->os_bavail,
498                                        d->opd_pre_status, rc);
499                         CDEBUG(D_INFO,
500                                "non-commited changes: %lu, in progress: %u\n",
501                                d->opd_syn_changes, d->opd_syn_rpc_in_progress);
502                 } else if (old == -ENOSPC) {
503                         d->opd_pre_status = 0;
504                         d->opd_pre_grow_slow = 0;
505                         d->opd_pre_grow_count = OST_MIN_PRECREATE;
506                         cfs_waitq_signal(&d->opd_pre_waitq);
507                         CDEBUG(D_INFO, "%s: no space: "LPU64" blocks, "LPU64
508                                " free, "LPU64" used, "LPU64" avail -> %d: "
509                                "rc = %d\n", d->opd_obd->obd_name,
510                                msfs->os_blocks, msfs->os_bfree, used,
511                                msfs->os_bavail, d->opd_pre_status, rc);
512                 }
513         }
514
515 out:
516         cfs_waitq_signal(&d->opd_pre_user_waitq);
517 }
518
519 static int osp_precreate_thread(void *_arg)
520 {
521         struct osp_device       *d = _arg;
522         struct ptlrpc_thread    *thread = &d->opd_pre_thread;
523         struct l_wait_info       lwi = { 0 };
524         char                     pname[16];
525         int                      rc;
526
527         ENTRY;
528
529         sprintf(pname, "osp-pre-%u\n", d->opd_index);
530         cfs_daemonize(pname);
531
532         spin_lock(&d->opd_pre_lock);
533         thread->t_flags = SVC_RUNNING;
534         spin_unlock(&d->opd_pre_lock);
535         cfs_waitq_signal(&thread->t_ctl_waitq);
536
537         while (osp_precreate_running(d)) {
538                 /*
539                  * need to be connected to OST
540                  */
541                 while (osp_precreate_running(d)) {
542                         l_wait_event(d->opd_pre_waitq,
543                                      !osp_precreate_running(d) ||
544                                      d->opd_new_connection, &lwi);
545
546                         if (!osp_precreate_running(d))
547                                 break;
548
549                         if (!d->opd_new_connection)
550                                 continue;
551
552                         /* got connected */
553                         d->opd_new_connection = 0;
554                         d->opd_got_disconnected = 0;
555                         break;
556                 }
557
558                 osp_statfs_update(d);
559
560                 /*
561                  * wait for local recovery to finish, so we can cleanup orphans
562                  * orphans are all objects since "last used" (assigned), but
563                  * there might be objects reserved and in some cases they won't
564                  * be used. we can't cleanup them till we're sure they won't be
565                  * used. so we block new reservations and wait till all reserved
566                  * objects either user or released.
567                  */
568                 l_wait_event(d->opd_pre_waitq, (!d->opd_pre_reserved &&
569                                                 d->opd_recovery_completed) ||
570                              !osp_precreate_running(d) ||
571                              d->opd_got_disconnected, &lwi);
572
573                 if (osp_precreate_running(d) && !d->opd_got_disconnected) {
574                         rc = osp_precreate_cleanup_orphans(d);
575                         if (rc) {
576                                 CERROR("%s: cannot cleanup orphans: rc = %d\n",
577                                        d->opd_obd->obd_name,  rc);
578                                 /* we can't proceed from here, OST seem to
579                                  * be in a bad shape, better to wait for
580                                  * a new instance of the server and repeat
581                                  * from the beginning. notify possible waiters
582                                  * this OSP isn't quite functional yet */
583                                 osp_pre_update_status(d, rc);
584                                 cfs_waitq_signal(&d->opd_pre_user_waitq);
585                                 l_wait_event(d->opd_pre_waitq,
586                                              !osp_precreate_running(d) ||
587                                              d->opd_new_connection, &lwi);
588                                 continue;
589
590                         }
591                 }
592
593                 /*
594                  * connected, can handle precreates now
595                  */
596                 while (osp_precreate_running(d)) {
597                         l_wait_event(d->opd_pre_waitq,
598                                      !osp_precreate_running(d) ||
599                                      osp_precreate_near_empty(d) ||
600                                      osp_statfs_need_update(d) ||
601                                      d->opd_got_disconnected, &lwi);
602
603                         if (!osp_precreate_running(d))
604                                 break;
605
606                         /* something happened to the connection
607                          * have to start from the beginning */
608                         if (d->opd_got_disconnected)
609                                 break;
610
611                         if (osp_statfs_need_update(d))
612                                 osp_statfs_update(d);
613
614                         if (osp_precreate_near_empty(d)) {
615                                 rc = osp_precreate_send(d);
616                                 /* osp_precreate_send() sets opd_pre_status
617                                  * in case of error, that prevent the using of
618                                  * failed device. */
619                                 if (rc != 0 && rc != -ENOSPC &&
620                                     rc != -ETIMEDOUT && rc != -ENOTCONN)
621                                         CERROR("%s: cannot precreate objects:"
622                                                " rc = %d\n",
623                                                d->opd_obd->obd_name, rc);
624                         }
625                 }
626         }
627
628         thread->t_flags = SVC_STOPPED;
629         cfs_waitq_signal(&thread->t_ctl_waitq);
630
631         RETURN(0);
632 }
633
634 static int osp_precreate_ready_condition(struct osp_device *d)
635 {
636         __u64 next;
637
638         /* ready if got enough precreated objects */
639         /* we need to wait for others (opd_pre_reserved) and our object (+1) */
640         next = d->opd_pre_used_id + d->opd_pre_reserved + 1;
641         if (next <= d->opd_pre_last_created)
642                 return 1;
643
644         /* ready if OST reported no space and no destoys in progress */
645         if (d->opd_syn_changes + d->opd_syn_rpc_in_progress == 0 &&
646             d->opd_pre_status != 0)
647                 return 1;
648
649         return 0;
650 }
651
652 static int osp_precreate_timeout_condition(void *data)
653 {
654         struct osp_device *d = data;
655
656         LCONSOLE_WARN("%s: slow creates, last="LPU64", next="LPU64", "
657                       "reserved="LPU64", syn_changes=%lu, "
658                       "syn_rpc_in_progress=%d, status=%d\n",
659                       d->opd_obd->obd_name, d->opd_pre_last_created,
660                       d->opd_pre_used_id, d->opd_pre_reserved,
661                       d->opd_syn_changes, d->opd_syn_rpc_in_progress,
662                       d->opd_pre_status);
663
664         return 0;
665 }
666
667 /*
668  * called to reserve object in the pool
669  * return codes:
670  *  ENOSPC - no space on corresponded OST
671  *  EAGAIN - precreation is in progress, try later
672  *  EIO    - no access to OST
673  */
674 int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d)
675 {
676         struct l_wait_info       lwi;
677         cfs_time_t               expire = cfs_time_shift(obd_timeout);
678         int                      precreated, rc;
679         int                      count = 0;
680
681         ENTRY;
682
683         LASSERT(d->opd_pre_last_created >= d->opd_pre_used_id);
684
685         lwi = LWI_TIMEOUT(cfs_time_seconds(obd_timeout),
686                           osp_precreate_timeout_condition, d);
687
688         /*
689          * wait till:
690          *  - preallocation is done
691          *  - no free space expected soon
692          *  - can't connect to OST for too long (obd_timeout)
693          */
694         while ((rc = d->opd_pre_status) == 0 || rc == -ENOSPC ||
695                 rc == -ENODEV) {
696                 if (unlikely(rc == -ENODEV)) {
697                         if (cfs_time_aftereq(cfs_time_current(), expire))
698                                 break;
699                 }
700
701 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 3, 90, 0)
702                 /*
703                  * to address Andreas's concern on possible busy-loop
704                  * between this thread and osp_precreate_send()
705                  */
706                 if (unlikely(count++ == 1000)) {
707                         osp_precreate_timeout_condition(d);
708                         LBUG();
709                 }
710 #endif
711
712                 /*
713                  * increase number of precreations
714                  */
715                 if (d->opd_pre_grow_count < d->opd_pre_max_grow_count &&
716                     d->opd_pre_grow_slow == 0 &&
717                     (d->opd_pre_last_created - d->opd_pre_used_id <=
718                      d->opd_pre_grow_count / 4 + 1)) {
719                         spin_lock(&d->opd_pre_lock);
720                         d->opd_pre_grow_slow = 1;
721                         d->opd_pre_grow_count *= 2;
722                         spin_unlock(&d->opd_pre_lock);
723                 }
724
725                 spin_lock(&d->opd_pre_lock);
726                 precreated = d->opd_pre_last_created - d->opd_pre_used_id;
727                 if (precreated > d->opd_pre_reserved) {
728                         d->opd_pre_reserved++;
729                         spin_unlock(&d->opd_pre_lock);
730                         rc = 0;
731
732                         /* XXX: don't wake up if precreation is in progress */
733                         if (osp_precreate_near_empty_nolock(d))
734                                 cfs_waitq_signal(&d->opd_pre_waitq);
735
736                         break;
737                 }
738                 spin_unlock(&d->opd_pre_lock);
739
740                 /*
741                  * all precreated objects have been used and no-space
742                  * status leave us no chance to succeed very soon
743                  * but if there is destroy in progress, then we should
744                  * wait till that is done - some space might be released
745                  */
746                 if (unlikely(rc == -ENOSPC)) {
747                         if (d->opd_syn_changes) {
748                                 /* force local commit to release space */
749                                 dt_commit_async(env, d->opd_storage);
750                         }
751                         if (d->opd_syn_rpc_in_progress) {
752                                 /* just wait till destroys are done */
753                                 /* see l_wait_even() few lines below */
754                         }
755                         if (d->opd_syn_changes +
756                             d->opd_syn_rpc_in_progress == 0) {
757                                 /* no hope for free space */
758                                 break;
759                         }
760                 }
761
762                 /* XXX: don't wake up if precreation is in progress */
763                 cfs_waitq_signal(&d->opd_pre_waitq);
764
765                 l_wait_event(d->opd_pre_user_waitq,
766                              osp_precreate_ready_condition(d), &lwi);
767         }
768
769         RETURN(rc);
770 }
771
772 /*
773  * this function relies on reservation made before
774  */
775 __u64 osp_precreate_get_id(struct osp_device *d)
776 {
777         obd_id objid;
778
779         /* grab next id from the pool */
780         spin_lock(&d->opd_pre_lock);
781         LASSERT(d->opd_pre_used_id < d->opd_pre_last_created);
782         objid = ++d->opd_pre_used_id;
783         d->opd_pre_reserved--;
784         /*
785          * last_used_id must be changed along with getting new id otherwise
786          * we might miscalculate gap causing object loss or leak
787          */
788         osp_update_last_id(d, objid);
789         spin_unlock(&d->opd_pre_lock);
790
791         /*
792          * probably main thread suspended orphan cleanup till
793          * all reservations are released, see comment in
794          * osp_precreate_thread() just before orphan cleanup
795          */
796         if (unlikely(d->opd_pre_reserved == 0 && d->opd_pre_status))
797                 cfs_waitq_signal(&d->opd_pre_waitq);
798
799         return objid;
800 }
801
802 /*
803  *
804  */
805 int osp_object_truncate(const struct lu_env *env, struct dt_object *dt,
806                         __u64 size)
807 {
808         struct osp_device       *d = lu2osp_dev(dt->do_lu.lo_dev);
809         struct ptlrpc_request   *req = NULL;
810         struct obd_import       *imp;
811         struct ost_body         *body;
812         struct obdo             *oa = NULL;
813         int                      rc;
814
815         ENTRY;
816
817         imp = d->opd_obd->u.cli.cl_import;
818         LASSERT(imp);
819
820         req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
821         if (req == NULL)
822                 RETURN(-ENOMEM);
823
824         /* XXX: capa support? */
825         /* osc_set_capa_size(req, &RMF_CAPA1, capa); */
826         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
827         if (rc) {
828                 ptlrpc_request_free(req);
829                 RETURN(rc);
830         }
831
832         /*
833          * XXX: decide how do we do here with resend
834          * if we don't resend, then client may see wrong file size
835          * if we do resend, then MDS thread can get stuck for quite long
836          */
837         req->rq_no_resend = req->rq_no_delay = 1;
838
839         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
840         ptlrpc_at_set_req_timeout(req);
841
842         OBD_ALLOC_PTR(oa);
843         if (oa == NULL)
844                 GOTO(out, rc = -ENOMEM);
845
846         rc = fid_ostid_pack(lu_object_fid(&dt->do_lu), &oa->o_oi);
847         LASSERT(rc == 0);
848         oa->o_size = size;
849         oa->o_blocks = OBD_OBJECT_EOF;
850         oa->o_valid = OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
851                       OBD_MD_FLID | OBD_MD_FLGROUP;
852
853         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
854         LASSERT(body);
855         lustre_set_wire_obdo(&body->oa, oa);
856
857         /* XXX: capa support? */
858         /* osc_pack_capa(req, body, capa); */
859
860         ptlrpc_request_set_replen(req);
861
862         rc = ptlrpc_queue_wait(req);
863         if (rc)
864                 CERROR("can't punch object: %d\n", rc);
865 out:
866         ptlrpc_req_finished(req);
867         if (oa)
868                 OBD_FREE_PTR(oa);
869         RETURN(rc);
870 }
871
872 int osp_init_precreate(struct osp_device *d)
873 {
874         struct l_wait_info       lwi = { 0 };
875         int                      rc;
876
877         ENTRY;
878
879         /* initially precreation isn't ready */
880         d->opd_pre_status = -EAGAIN;
881         d->opd_pre_used_id = 0;
882         d->opd_pre_last_created = 0;
883         d->opd_pre_reserved = 0;
884         d->opd_got_disconnected = 1;
885         d->opd_pre_grow_slow = 0;
886         d->opd_pre_grow_count = OST_MIN_PRECREATE;
887         d->opd_pre_min_grow_count = OST_MIN_PRECREATE;
888         d->opd_pre_max_grow_count = OST_MAX_PRECREATE;
889
890         spin_lock_init(&d->opd_pre_lock);
891         cfs_waitq_init(&d->opd_pre_waitq);
892         cfs_waitq_init(&d->opd_pre_user_waitq);
893         cfs_waitq_init(&d->opd_pre_thread.t_ctl_waitq);
894
895         /*
896          * Initialize statfs-related things
897          */
898         d->opd_statfs_maxage = 5; /* default update interval */
899         d->opd_statfs_fresh_till = cfs_time_shift(-1000);
900         CDEBUG(D_OTHER, "current %llu, fresh till %llu\n",
901                (unsigned long long)cfs_time_current(),
902                (unsigned long long)d->opd_statfs_fresh_till);
903         cfs_timer_init(&d->opd_statfs_timer, osp_statfs_timer_cb, d);
904
905         /*
906          * start thread handling precreation and statfs updates
907          */
908         rc = cfs_create_thread(osp_precreate_thread, d, 0);
909         if (rc < 0) {
910                 CERROR("can't start precreate thread %d\n", rc);
911                 RETURN(rc);
912         }
913
914         l_wait_event(d->opd_pre_thread.t_ctl_waitq,
915                      osp_precreate_running(d) || osp_precreate_stopped(d),
916                      &lwi);
917
918         RETURN(0);
919 }
920
921 void osp_precreate_fini(struct osp_device *d)
922 {
923         struct ptlrpc_thread *thread = &d->opd_pre_thread;
924
925         ENTRY;
926
927         cfs_timer_disarm(&d->opd_statfs_timer);
928
929         thread->t_flags = SVC_STOPPING;
930         cfs_waitq_signal(&d->opd_pre_waitq);
931
932         cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
933
934         EXIT;
935 }
936