Whamcloud - gitweb
LU-1303 osp: land precreate code
[fs/lustre-release.git] / lustre / osp / osp_precreate.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Intel, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/osp/osp_sync.c
37  *
38  * Lustre OST Proxy Device
39  *
40  * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
41  * Author: Mikhail Pershin <mike.pershin@intel.com>
42  */
43
44 #ifndef EXPORT_SYMTAB
45 # define EXPORT_SYMTAB
46 #endif
47 #define DEBUG_SUBSYSTEM S_MDS
48
49 #include "osp_internal.h"
50
51 /*
52  * there are two specific states to take care about:
53  *
54  * = import is disconnected =
55  *
56  * = import is inactive =
57  *   in this case osp_declare_object_create() returns an error
58  *
59  */
60
61 /*
62  * statfs
63  */
64 static inline int osp_statfs_need_update(struct osp_device *d)
65 {
66         return !cfs_time_before(cfs_time_current(),
67                                 d->opd_statfs_fresh_till);
68 }
69
70 static void osp_statfs_timer_cb(unsigned long _d)
71 {
72         struct osp_device *d = (struct osp_device *) _d;
73
74         LASSERT(d);
75         cfs_waitq_signal(&d->opd_pre_waitq);
76 }
77
78 static int osp_statfs_interpret(const struct lu_env *env,
79                                 struct ptlrpc_request *req,
80                                 union ptlrpc_async_args *aa, int rc)
81 {
82         struct obd_import       *imp = req->rq_import;
83         struct obd_statfs       *msfs;
84         struct osp_device       *d;
85
86         ENTRY;
87
88         aa = ptlrpc_req_async_args(req);
89         d = aa->pointer_arg[0];
90         LASSERT(d);
91
92         if (rc != 0)
93                 GOTO(out, rc);
94
95         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
96         if (msfs == NULL)
97                 GOTO(out, rc = -EPROTO);
98
99         d->opd_statfs = *msfs;
100
101         osp_pre_update_status(d, rc);
102
103         /* schedule next update */
104         d->opd_statfs_fresh_till = cfs_time_shift(d->opd_statfs_maxage);
105         cfs_timer_arm(&d->opd_statfs_timer, d->opd_statfs_fresh_till);
106         d->opd_statfs_update_in_progress = 0;
107
108         CDEBUG(D_CACHE, "updated statfs %p\n", d);
109
110         RETURN(0);
111 out:
112         /* couldn't update statfs, try again as soon as possible */
113         cfs_waitq_signal(&d->opd_pre_waitq);
114         if (req->rq_import_generation == imp->imp_generation)
115                 CERROR("%s: couldn't update statfs: rc = %d\n",
116                        d->opd_obd->obd_name, rc);
117         RETURN(rc);
118 }
119
120 static int osp_statfs_update(struct osp_device *d)
121 {
122         struct ptlrpc_request   *req;
123         struct obd_import       *imp;
124         union ptlrpc_async_args *aa;
125         int                      rc;
126
127         ENTRY;
128
129         CDEBUG(D_CACHE, "going to update statfs\n");
130
131         imp = d->opd_obd->u.cli.cl_import;
132         LASSERT(imp);
133
134         req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
135         if (req == NULL)
136                 RETURN(-ENOMEM);
137
138         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
139         if (rc) {
140                 ptlrpc_request_free(req);
141                 RETURN(rc);
142         }
143         ptlrpc_request_set_replen(req);
144         req->rq_request_portal = OST_CREATE_PORTAL;
145         ptlrpc_at_set_req_timeout(req);
146
147         req->rq_interpret_reply = (ptlrpc_interpterer_t)osp_statfs_interpret;
148         aa = ptlrpc_req_async_args(req);
149         aa->pointer_arg[0] = d;
150
151         ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
152
153         cfs_timer_disarm(&d->opd_statfs_timer);
154
155         /*
156          * no updates till reply
157          */
158         d->opd_statfs_fresh_till = cfs_time_shift(obd_timeout * 1000);
159         d->opd_statfs_update_in_progress = 1;
160
161         RETURN(0);
162 }
163
164 /*
165  * XXX: there might be a case where removed object(s) do not add free
166  * space (empty object). if the number of such deletions is high, then
167  * we can start to update statfs too often - a rpc storm
168  * TODO: some throttling is needed
169  */
170 void osp_statfs_need_now(struct osp_device *d)
171 {
172         if (!d->opd_statfs_update_in_progress) {
173                 /*
174                  * if current status is -ENOSPC (lack of free space on OST)
175                  * then we should poll OST immediately once object destroy
176                  * is replied
177                  */
178                 d->opd_statfs_fresh_till = cfs_time_shift(-1);
179                 cfs_timer_disarm(&d->opd_statfs_timer);
180                 cfs_waitq_signal(&d->opd_pre_waitq);
181         }
182 }
183
184
185 /*
186  * OSP tries to maintain pool of available objects so that calls to create
187  * objects don't block most of time
188  *
189  * each time OSP gets connected to OST, we should start from precreation cleanup
190  */
191 static inline int osp_precreate_running(struct osp_device *d)
192 {
193         return !!(d->opd_pre_thread.t_flags & SVC_RUNNING);
194 }
195
196 static inline int osp_precreate_stopped(struct osp_device *d)
197 {
198         return !!(d->opd_pre_thread.t_flags & SVC_STOPPED);
199 }
200
201 static inline int osp_precreate_near_empty_nolock(struct osp_device *d)
202 {
203         int window = d->opd_pre_last_created - d->opd_pre_next;
204
205         /* don't consider new precreation till OST is healty and
206          * has free space */
207         return ((window - d->opd_pre_reserved < d->opd_pre_grow_count / 2) &&
208                 (d->opd_pre_status == 0));
209 }
210
211 static inline int osp_precreate_near_empty(struct osp_device *d)
212 {
213         int rc;
214
215         /* XXX: do we really need locking here? */
216         cfs_spin_lock(&d->opd_pre_lock);
217         rc = osp_precreate_near_empty_nolock(d);
218         cfs_spin_unlock(&d->opd_pre_lock);
219         return rc;
220 }
221
222 static int osp_precreate_send(struct osp_device *d)
223 {
224         struct ptlrpc_request   *req;
225         struct obd_import       *imp;
226         struct ost_body         *body;
227         int                      rc, grow, diff;
228
229         ENTRY;
230
231         /* don't precreate new objects till OST healthy and has free space */
232         if (unlikely(d->opd_pre_status)) {
233                 CDEBUG(D_INFO, "%s: don't send new precreate: rc = %d\n",
234                        d->opd_obd->obd_name, d->opd_pre_status);
235                 RETURN(0);
236         }
237
238         /*
239          * if not connection/initialization is compeleted, ignore
240          */
241         imp = d->opd_obd->u.cli.cl_import;
242         LASSERT(imp);
243
244         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
245         if (req == NULL)
246                 RETURN(-ENOMEM);
247         req->rq_request_portal = OST_CREATE_PORTAL;
248         /* we should not resend create request - anyway we will have delorphan
249          * and kill these objects */
250         req->rq_no_delay = req->rq_no_resend = 1;
251
252         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
253         if (rc) {
254                 ptlrpc_request_free(req);
255                 RETURN(rc);
256         }
257
258         cfs_spin_lock(&d->opd_pre_lock);
259         if (d->opd_pre_grow_count > d->opd_pre_max_grow_count / 2)
260                 d->opd_pre_grow_count = d->opd_pre_max_grow_count / 2;
261         grow = d->opd_pre_grow_count;
262         cfs_spin_unlock(&d->opd_pre_lock);
263
264         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
265         LASSERT(body);
266         body->oa.o_id = d->opd_pre_last_created + grow;
267         body->oa.o_seq = FID_SEQ_OST_MDT0; /* XXX: support for CMD? */
268         body->oa.o_valid = OBD_MD_FLGROUP;
269
270         ptlrpc_request_set_replen(req);
271
272         rc = ptlrpc_queue_wait(req);
273         if (rc) {
274                 CERROR("%s: can't precreate: rc = %d\n",
275                        d->opd_obd->obd_name, rc);
276                 GOTO(out_req, rc);
277         }
278         LASSERT(req->rq_transno == 0);
279
280         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
281         if (body == NULL)
282                 GOTO(out_req, rc = -EPROTO);
283
284         CDEBUG(D_HA, "new last_created %lu\n", (unsigned long) body->oa.o_id);
285         LASSERT(body->oa.o_id > d->opd_pre_next);
286
287         diff = body->oa.o_id - d->opd_pre_last_created;
288
289         cfs_spin_lock(&d->opd_pre_lock);
290         if (diff < grow) {
291                 /* the OST has not managed to create all the
292                  * objects we asked for */
293                 d->opd_pre_grow_count = max(diff, OST_MIN_PRECREATE);
294                 d->opd_pre_grow_slow = 1;
295         } else {
296                 /* the OST is able to keep up with the work,
297                  * we could consider increasing grow_count
298                  * next time if needed */
299                 d->opd_pre_grow_slow = 0;
300         }
301         d->opd_pre_last_created = body->oa.o_id;
302         cfs_spin_unlock(&d->opd_pre_lock);
303         CDEBUG(D_OTHER, "current precreated pool: %llu-%llu\n",
304                d->opd_pre_next, d->opd_pre_last_created);
305
306 out_req:
307         /* now we can wakeup all users awaiting for objects */
308         osp_pre_update_status(d, rc);
309         cfs_waitq_signal(&d->opd_pre_user_waitq);
310
311         ptlrpc_req_finished(req);
312         RETURN(rc);
313 }
314
315 /**
316  * asks OST to clean precreate orphans
317  * and gets next id for new objects
318  */
319 static int osp_precreate_cleanup_orphans(struct osp_device *d)
320 {
321         struct ptlrpc_request   *req = NULL;
322         struct obd_import       *imp;
323         struct ost_body         *body;
324         int                      rc;
325
326         ENTRY;
327
328         LASSERT(d->opd_recovery_completed);
329         LASSERT(d->opd_pre_reserved == 0);
330
331         imp = d->opd_obd->u.cli.cl_import;
332         LASSERT(imp);
333
334         req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
335         if (req == NULL)
336                 RETURN(-ENOMEM);
337
338         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
339         if (rc) {
340                 ptlrpc_request_free(req);
341                 RETURN(rc);
342         }
343
344         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
345         if (body == NULL)
346                 GOTO(out_req, rc = -EPROTO);
347
348         body->oa.o_flags = OBD_FL_DELORPHAN;
349         body->oa.o_valid = OBD_MD_FLFLAGS | OBD_MD_FLGROUP;
350         body->oa.o_seq = FID_SEQ_OST_MDT0;
351
352         /* remove from NEXT after used one */
353         body->oa.o_id = d->opd_last_used_id + 1;
354
355         ptlrpc_request_set_replen(req);
356
357         /* Don't resend the delorphan req */
358         req->rq_no_resend = req->rq_no_delay = 1;
359
360         rc = ptlrpc_queue_wait(req);
361         if (rc)
362                 GOTO(out_req, rc);
363
364         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
365         if (body == NULL)
366                 GOTO(out_req, rc = -EPROTO);
367
368         /*
369          * OST provides us with id new pool starts from in body->oa.o_id
370          */
371         cfs_spin_lock(&d->opd_pre_lock);
372         if (le64_to_cpu(d->opd_last_used_id) > body->oa.o_id) {
373                 d->opd_pre_grow_count = OST_MIN_PRECREATE +
374                                         le64_to_cpu(d->opd_last_used_id) -
375                                         body->oa.o_id;
376                 d->opd_pre_last_created = le64_to_cpu(d->opd_last_used_id) + 1;
377         } else {
378                 d->opd_pre_grow_count = OST_MIN_PRECREATE;
379                 d->opd_pre_last_created = body->oa.o_id + 1;
380         }
381         d->opd_pre_next = d->opd_pre_last_created;
382         d->opd_pre_grow_slow = 0;
383         cfs_spin_unlock(&d->opd_pre_lock);
384
385         /* now we can wakeup all users awaiting for objects */
386         osp_pre_update_status(d, rc);
387         cfs_waitq_signal(&d->opd_pre_user_waitq);
388
389         CDEBUG(D_HA, "Got last_id "LPU64" from OST, last_used is "LPU64
390                ", next "LPU64"\n", body->oa.o_id,
391                le64_to_cpu(d->opd_last_used_id), d->opd_pre_next);
392
393 out_req:
394         ptlrpc_req_finished(req);
395         RETURN(rc);
396 }
397
398 /*
399  * the function updates current precreation status used: functional or not
400  *
401  * rc is a last code from the transport, rc == 0 meaning transport works
402  * well and users of lod can use objects from this OSP
403  *
404  * the status depends on current usage of OST
405  */
406 void osp_pre_update_status(struct osp_device *d, int rc)
407 {
408         struct obd_statfs       *msfs = &d->opd_statfs;
409         int                      old = d->opd_pre_status;
410         __u64                    used;
411
412         d->opd_pre_status = rc;
413         if (rc)
414                 goto out;
415
416         if (likely(msfs->os_type)) {
417                 used = min_t(__u64, (msfs->os_blocks - msfs->os_bfree) >> 10,
418                                     1 << 30);
419                 if ((msfs->os_ffree < 32) || (msfs->os_bavail < used)) {
420                         d->opd_pre_status = -ENOSPC;
421                         if (old != -ENOSPC)
422                                 CDEBUG(D_INFO, "%s: status: "LPU64" blocks, "
423                                        LPU64" free, "LPU64" used, "LPU64" "
424                                        "avail -> %d: rc = %d\n",
425                                        d->opd_obd->obd_name, msfs->os_blocks,
426                                        msfs->os_bfree, used, msfs->os_bavail,
427                                        d->opd_pre_status, rc);
428                 } else if (old == -ENOSPC) {
429                         d->opd_pre_status = 0;
430                         d->opd_pre_grow_slow = 0;
431                         d->opd_pre_grow_count = OST_MIN_PRECREATE;
432                         cfs_waitq_signal(&d->opd_pre_waitq);
433                         CDEBUG(D_INFO, "%s: no space: "LPU64" blocks, "LPU64
434                                " free, "LPU64" used, "LPU64" avail -> %d: "
435                                "rc = %d\n", d->opd_obd->obd_name,
436                                msfs->os_blocks, msfs->os_bfree, used,
437                                msfs->os_bavail, d->opd_pre_status, rc);
438                 }
439         }
440
441 out:
442         cfs_waitq_signal(&d->opd_pre_user_waitq);
443 }
444
445 static int osp_precreate_thread(void *_arg)
446 {
447         struct osp_device       *d = _arg;
448         struct ptlrpc_thread    *thread = &d->opd_pre_thread;
449         struct l_wait_info       lwi = { 0 };
450         char                     pname[16];
451         int                      rc;
452
453         ENTRY;
454
455         sprintf(pname, "osp-pre-%u\n", d->opd_index);
456         cfs_daemonize(pname);
457
458         cfs_spin_lock(&d->opd_pre_lock);
459         thread->t_flags = SVC_RUNNING;
460         cfs_spin_unlock(&d->opd_pre_lock);
461         cfs_waitq_signal(&thread->t_ctl_waitq);
462
463         while (osp_precreate_running(d)) {
464                 /*
465                  * need to be connected to OST
466                  */
467                 while (osp_precreate_running(d)) {
468                         l_wait_event(d->opd_pre_waitq,
469                                      !osp_precreate_running(d) ||
470                                      d->opd_new_connection, &lwi);
471
472                         if (!osp_precreate_running(d))
473                                 break;
474
475                         if (!d->opd_new_connection)
476                                 continue;
477
478                         /* got connected */
479                         d->opd_new_connection = 0;
480                         d->opd_got_disconnected = 0;
481                         break;
482                 }
483
484                 osp_statfs_update(d);
485
486                 /*
487                  * wait for local recovery to finish, so we can cleanup orphans
488                  * orphans are all objects since "last used" (assigned), but
489                  * there might be objects reserved and in some cases they won't
490                  * be used. we can't cleanup them till we're sure they won't be
491                  * used. so we block new reservations and wait till all reserved
492                  * objects either user or released.
493                  */
494                 l_wait_event(d->opd_pre_waitq, (!d->opd_pre_reserved &&
495                                                 d->opd_recovery_completed) ||
496                              !osp_precreate_running(d) ||
497                              d->opd_got_disconnected, &lwi);
498
499                 if (osp_precreate_running(d) && !d->opd_got_disconnected) {
500                         rc = osp_precreate_cleanup_orphans(d);
501                         if (rc) {
502                                 CERROR("%s: cannot cleanup orphans: rc = %d\n",
503                                        d->opd_obd->obd_name,  rc);
504                         }
505                 }
506
507                 /*
508                  * connected, can handle precreates now
509                  */
510                 while (osp_precreate_running(d)) {
511                         l_wait_event(d->opd_pre_waitq,
512                                      !osp_precreate_running(d) ||
513                                      osp_precreate_near_empty(d) ||
514                                      osp_statfs_need_update(d) ||
515                                      d->opd_got_disconnected, &lwi);
516
517                         if (!osp_precreate_running(d))
518                                 break;
519
520                         /* something happened to the connection
521                          * have to start from the beginning */
522                         if (d->opd_got_disconnected)
523                                 break;
524
525                         if (osp_statfs_need_update(d))
526                                 osp_statfs_update(d);
527
528                         if (osp_precreate_near_empty(d)) {
529                                 rc = osp_precreate_send(d);
530                                 /* osp_precreate_send() sets opd_pre_status
531                                  * in case of error, that prevent the using of
532                                  * failed device. */
533                                 if (rc != 0 && rc != -ENOSPC &&
534                                     rc != -ETIMEDOUT && rc != -ENOTCONN)
535                                         CERROR("%s: cannot precreate objects:"
536                                                " rc = %d\n",
537                                                d->opd_obd->obd_name, rc);
538                         }
539                 }
540         }
541
542         thread->t_flags = SVC_STOPPED;
543         cfs_waitq_signal(&thread->t_ctl_waitq);
544
545         RETURN(0);
546 }
547
548 static int osp_precreate_ready_condition(struct osp_device *d)
549 {
550         /* ready if got enough precreated objects */
551         if (d->opd_pre_next + d->opd_pre_reserved < d->opd_pre_last_created)
552                 return 1;
553
554         /* ready if OST reported no space */
555         if (d->opd_pre_status != 0)
556                 return 1;
557
558         return 0;
559 }
560
561 static int osp_precreate_timeout_condition(void *data)
562 {
563         struct osp_device *d = data;
564
565         LCONSOLE_WARN("%s: slow creates, last="LPU64", next="LPU64", "
566                       "reserved="LPU64", status=%d\n",
567                       d->opd_obd->obd_name, d->opd_pre_last_created,
568                       d->opd_pre_next, d->opd_pre_reserved,
569                       d->opd_pre_status);
570
571         return 0;
572 }
573
574 /*
575  * called to reserve object in the pool
576  * return codes:
577  *  ENOSPC - no space on corresponded OST
578  *  EAGAIN - precreation is in progress, try later
579  *  EIO    - no access to OST
580  */
581 int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d)
582 {
583         struct l_wait_info       lwi;
584         cfs_time_t               expire = cfs_time_shift(obd_timeout);
585         int                      precreated, rc;
586
587         ENTRY;
588
589         LASSERT(d->opd_pre_last_created >= d->opd_pre_next);
590
591         lwi = LWI_TIMEOUT(cfs_time_seconds(obd_timeout),
592                           osp_precreate_timeout_condition, d);
593
594         /*
595          * wait till:
596          *  - preallocation is done
597          *  - no free space expected soon
598          *  - can't connect to OST for too long (obd_timeout)
599          */
600         while ((rc = d->opd_pre_status) == 0 || rc == -ENOSPC ||
601                 rc == -ENODEV) {
602                 if (unlikely(rc == -ENODEV)) {
603                         if (cfs_time_aftereq(cfs_time_current(), expire))
604                                 break;
605                 }
606
607                 /*
608                  * increase number of precreations
609                  */
610                 if (d->opd_pre_grow_count < d->opd_pre_max_grow_count &&
611                     d->opd_pre_grow_slow == 0 &&
612                     (d->opd_pre_last_created - d->opd_pre_next <=
613                      d->opd_pre_grow_count / 4 + 1)) {
614                         cfs_spin_lock(&d->opd_pre_lock);
615                         d->opd_pre_grow_slow = 1;
616                         d->opd_pre_grow_count *= 2;
617                         cfs_spin_unlock(&d->opd_pre_lock);
618                 }
619
620                 /*
621                  * we never use the last object in the window
622                  */
623                 cfs_spin_lock(&d->opd_pre_lock);
624                 precreated = d->opd_pre_last_created - d->opd_pre_next;
625                 if (precreated > d->opd_pre_reserved) {
626                         d->opd_pre_reserved++;
627                         cfs_spin_unlock(&d->opd_pre_lock);
628                         rc = 0;
629
630                         /* XXX: don't wake up if precreation is in progress */
631                         if (osp_precreate_near_empty_nolock(d))
632                                 cfs_waitq_signal(&d->opd_pre_waitq);
633
634                         break;
635                 }
636                 cfs_spin_unlock(&d->opd_pre_lock);
637
638                 /* XXX: don't wake up if precreation is in progress */
639                 cfs_waitq_signal(&d->opd_pre_waitq);
640
641                 l_wait_event(d->opd_pre_user_waitq,
642                              osp_precreate_ready_condition(d), &lwi);
643         }
644
645         RETURN(rc);
646 }
647
648 /*
649  * this function relies on reservation made before
650  */
651 __u64 osp_precreate_get_id(struct osp_device *d)
652 {
653         obd_id objid;
654
655         /* grab next id from the pool */
656         cfs_spin_lock(&d->opd_pre_lock);
657         LASSERT(d->opd_pre_next <= d->opd_pre_last_created);
658         objid = d->opd_pre_next++;
659         d->opd_pre_reserved--;
660         /*
661          * last_used_id must be changed along with getting new id otherwise
662          * we might miscalculate gap causing object loss or leak
663          */
664         osp_update_last_id(d, objid);
665         cfs_spin_unlock(&d->opd_pre_lock);
666
667         /*
668          * probably main thread suspended orphan cleanup till
669          * all reservations are released, see comment in
670          * osp_precreate_thread() just before orphan cleanup
671          */
672         if (unlikely(d->opd_pre_reserved == 0 && d->opd_pre_status))
673                 cfs_waitq_signal(&d->opd_pre_waitq);
674
675         return objid;
676 }
677
678 /*
679  *
680  */
681 int osp_object_truncate(const struct lu_env *env, struct dt_object *dt,
682                         __u64 size)
683 {
684         struct osp_device       *d = lu2osp_dev(dt->do_lu.lo_dev);
685         struct ptlrpc_request   *req = NULL;
686         struct obd_import       *imp;
687         struct ost_body         *body;
688         struct obdo             *oa = NULL;
689         int                      rc;
690
691         ENTRY;
692
693         imp = d->opd_obd->u.cli.cl_import;
694         LASSERT(imp);
695
696         req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
697         if (req == NULL)
698                 RETURN(-ENOMEM);
699
700         /* XXX: capa support? */
701         /* osc_set_capa_size(req, &RMF_CAPA1, capa); */
702         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
703         if (rc) {
704                 ptlrpc_request_free(req);
705                 RETURN(rc);
706         }
707
708         /*
709          * XXX: decide how do we do here with resend
710          * if we don't resend, then client may see wrong file size
711          * if we do resend, then MDS thread can get stuck for quite long
712          */
713         req->rq_no_resend = req->rq_no_delay = 1;
714
715         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
716         ptlrpc_at_set_req_timeout(req);
717
718         OBD_ALLOC_PTR(oa);
719         if (oa == NULL)
720                 GOTO(out, rc = -ENOMEM);
721
722         rc = fid_ostid_pack(lu_object_fid(&dt->do_lu), &oa->o_oi);
723         LASSERT(rc == 0);
724         oa->o_size = size;
725         oa->o_blocks = OBD_OBJECT_EOF;
726         oa->o_valid = OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
727                       OBD_MD_FLID | OBD_MD_FLGROUP;
728
729         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
730         LASSERT(body);
731         lustre_set_wire_obdo(&body->oa, oa);
732
733         /* XXX: capa support? */
734         /* osc_pack_capa(req, body, capa); */
735
736         ptlrpc_request_set_replen(req);
737
738         rc = ptlrpc_queue_wait(req);
739         if (rc)
740                 CERROR("can't punch object: %d\n", rc);
741 out:
742         ptlrpc_req_finished(req);
743         if (oa)
744                 OBD_FREE_PTR(oa);
745         RETURN(rc);
746 }
747
748 int osp_init_precreate(struct osp_device *d)
749 {
750         struct l_wait_info       lwi = { 0 };
751         int                      rc;
752
753         ENTRY;
754
755         /* initially precreation isn't ready */
756         d->opd_pre_status = -EAGAIN;
757         d->opd_pre_next = 1;
758         d->opd_pre_last_created = 1;
759         d->opd_pre_reserved = 0;
760         d->opd_got_disconnected = 1;
761         d->opd_pre_grow_slow = 0;
762         d->opd_pre_grow_count = OST_MIN_PRECREATE;
763         d->opd_pre_min_grow_count = OST_MIN_PRECREATE;
764         d->opd_pre_max_grow_count = OST_MAX_PRECREATE;
765
766         cfs_spin_lock_init(&d->opd_pre_lock);
767         cfs_waitq_init(&d->opd_pre_waitq);
768         cfs_waitq_init(&d->opd_pre_user_waitq);
769         cfs_waitq_init(&d->opd_pre_thread.t_ctl_waitq);
770
771         /*
772          * Initialize statfs-related things
773          */
774         d->opd_statfs_maxage = 5; /* default update interval */
775         d->opd_statfs_fresh_till = cfs_time_shift(-1000);
776         CDEBUG(D_OTHER, "current %llu, fresh till %llu\n",
777                (unsigned long long)cfs_time_current(),
778                (unsigned long long)d->opd_statfs_fresh_till);
779         cfs_timer_init(&d->opd_statfs_timer, osp_statfs_timer_cb, d);
780
781         /*
782          * start thread handling precreation and statfs updates
783          */
784         rc = cfs_create_thread(osp_precreate_thread, d, 0);
785         if (rc < 0) {
786                 CERROR("can't start precreate thread %d\n", rc);
787                 RETURN(rc);
788         }
789
790         l_wait_event(d->opd_pre_thread.t_ctl_waitq,
791                      osp_precreate_running(d) || osp_precreate_stopped(d),
792                      &lwi);
793
794         RETURN(0);
795 }
796
797 void osp_precreate_fini(struct osp_device *d)
798 {
799         struct ptlrpc_thread *thread = &d->opd_pre_thread;
800
801         ENTRY;
802
803         cfs_timer_disarm(&d->opd_statfs_timer);
804
805         thread->t_flags = SVC_STOPPING;
806         cfs_waitq_signal(&d->opd_pre_waitq);
807
808         cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
809
810         EXIT;
811 }
812