4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/osp/osp_sync.c
38 * Lustre OST Proxy Device
40 * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
41 * Author: Mikhail Pershin <mike.pershin@intel.com>
45 # define EXPORT_SYMTAB
47 #define DEBUG_SUBSYSTEM S_MDS
49 #include "osp_internal.h"
52 * there are two specific states to take care about:
54 * = import is disconnected =
56 * = import is inactive =
57 * in this case osp_declare_object_create() returns an error
64 static inline int osp_statfs_need_update(struct osp_device *d)
66 return !cfs_time_before(cfs_time_current(),
67 d->opd_statfs_fresh_till);
70 static void osp_statfs_timer_cb(unsigned long _d)
72 struct osp_device *d = (struct osp_device *) _d;
75 cfs_waitq_signal(&d->opd_pre_waitq);
78 static int osp_statfs_interpret(const struct lu_env *env,
79 struct ptlrpc_request *req,
80 union ptlrpc_async_args *aa, int rc)
82 struct obd_import *imp = req->rq_import;
83 struct obd_statfs *msfs;
88 aa = ptlrpc_req_async_args(req);
89 d = aa->pointer_arg[0];
95 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
97 GOTO(out, rc = -EPROTO);
99 d->opd_statfs = *msfs;
101 osp_pre_update_status(d, rc);
103 /* schedule next update */
104 d->opd_statfs_fresh_till = cfs_time_shift(d->opd_statfs_maxage);
105 cfs_timer_arm(&d->opd_statfs_timer, d->opd_statfs_fresh_till);
106 d->opd_statfs_update_in_progress = 0;
108 CDEBUG(D_CACHE, "updated statfs %p\n", d);
112 /* couldn't update statfs, try again as soon as possible */
113 cfs_waitq_signal(&d->opd_pre_waitq);
114 if (req->rq_import_generation == imp->imp_generation)
115 CERROR("%s: couldn't update statfs: rc = %d\n",
116 d->opd_obd->obd_name, rc);
120 static int osp_statfs_update(struct osp_device *d)
122 struct ptlrpc_request *req;
123 struct obd_import *imp;
124 union ptlrpc_async_args *aa;
129 CDEBUG(D_CACHE, "going to update statfs\n");
131 imp = d->opd_obd->u.cli.cl_import;
134 req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
138 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
140 ptlrpc_request_free(req);
143 ptlrpc_request_set_replen(req);
144 req->rq_request_portal = OST_CREATE_PORTAL;
145 ptlrpc_at_set_req_timeout(req);
147 req->rq_interpret_reply = (ptlrpc_interpterer_t)osp_statfs_interpret;
148 aa = ptlrpc_req_async_args(req);
149 aa->pointer_arg[0] = d;
152 * no updates till reply
154 cfs_timer_disarm(&d->opd_statfs_timer);
155 d->opd_statfs_fresh_till = cfs_time_shift(obd_timeout * 1000);
156 d->opd_statfs_update_in_progress = 1;
158 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
164 * XXX: there might be a case where removed object(s) do not add free
165 * space (empty object). if the number of such deletions is high, then
166 * we can start to update statfs too often - a rpc storm
167 * TODO: some throttling is needed
169 void osp_statfs_need_now(struct osp_device *d)
171 if (!d->opd_statfs_update_in_progress) {
173 * if current status is -ENOSPC (lack of free space on OST)
174 * then we should poll OST immediately once object destroy
177 d->opd_statfs_fresh_till = cfs_time_shift(-1);
178 cfs_timer_disarm(&d->opd_statfs_timer);
179 cfs_waitq_signal(&d->opd_pre_waitq);
185 * OSP tries to maintain pool of available objects so that calls to create
186 * objects don't block most of time
188 * each time OSP gets connected to OST, we should start from precreation cleanup
190 static inline int osp_precreate_running(struct osp_device *d)
192 return !!(d->opd_pre_thread.t_flags & SVC_RUNNING);
195 static inline int osp_precreate_stopped(struct osp_device *d)
197 return !!(d->opd_pre_thread.t_flags & SVC_STOPPED);
200 static inline int osp_precreate_near_empty_nolock(struct osp_device *d)
202 int window = d->opd_pre_last_created - d->opd_pre_used_id;
204 /* don't consider new precreation till OST is healty and
206 return ((window - d->opd_pre_reserved < d->opd_pre_grow_count / 2) &&
207 (d->opd_pre_status == 0));
210 static inline int osp_precreate_near_empty(struct osp_device *d)
214 /* XXX: do we really need locking here? */
215 spin_lock(&d->opd_pre_lock);
216 rc = osp_precreate_near_empty_nolock(d);
217 spin_unlock(&d->opd_pre_lock);
221 static int osp_precreate_send(struct osp_device *d)
223 struct ptlrpc_request *req;
224 struct obd_import *imp;
225 struct ost_body *body;
230 /* don't precreate new objects till OST healthy and has free space */
231 if (unlikely(d->opd_pre_status)) {
232 CDEBUG(D_INFO, "%s: don't send new precreate: rc = %d\n",
233 d->opd_obd->obd_name, d->opd_pre_status);
238 * if not connection/initialization is compeleted, ignore
240 imp = d->opd_obd->u.cli.cl_import;
243 req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
246 req->rq_request_portal = OST_CREATE_PORTAL;
247 /* we should not resend create request - anyway we will have delorphan
248 * and kill these objects */
249 req->rq_no_delay = req->rq_no_resend = 1;
251 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
253 ptlrpc_request_free(req);
257 spin_lock(&d->opd_pre_lock);
258 if (d->opd_pre_grow_count > d->opd_pre_max_grow_count / 2)
259 d->opd_pre_grow_count = d->opd_pre_max_grow_count / 2;
260 grow = d->opd_pre_grow_count;
261 spin_unlock(&d->opd_pre_lock);
263 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
265 body->oa.o_id = d->opd_pre_last_created + grow;
266 body->oa.o_seq = FID_SEQ_OST_MDT0; /* XXX: support for CMD? */
267 body->oa.o_valid = OBD_MD_FLGROUP;
269 ptlrpc_request_set_replen(req);
271 rc = ptlrpc_queue_wait(req);
273 CERROR("%s: can't precreate: rc = %d\n",
274 d->opd_obd->obd_name, rc);
277 LASSERT(req->rq_transno == 0);
279 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
281 GOTO(out_req, rc = -EPROTO);
283 CDEBUG(D_HA, "%s: new last_created "LPU64"\n", d->opd_obd->obd_name,
285 LASSERT(body->oa.o_id > d->opd_pre_used_id);
287 diff = body->oa.o_id - d->opd_pre_last_created;
289 spin_lock(&d->opd_pre_lock);
291 /* the OST has not managed to create all the
292 * objects we asked for */
293 d->opd_pre_grow_count = max(diff, OST_MIN_PRECREATE);
294 d->opd_pre_grow_slow = 1;
296 /* the OST is able to keep up with the work,
297 * we could consider increasing grow_count
298 * next time if needed */
299 d->opd_pre_grow_slow = 0;
301 d->opd_pre_last_created = body->oa.o_id;
302 spin_unlock(&d->opd_pre_lock);
303 CDEBUG(D_OTHER, "current precreated pool: %llu-%llu\n",
304 d->opd_pre_used_id, d->opd_pre_last_created);
307 /* now we can wakeup all users awaiting for objects */
308 osp_pre_update_status(d, rc);
309 cfs_waitq_signal(&d->opd_pre_user_waitq);
311 ptlrpc_req_finished(req);
316 static int osp_get_lastid_from_ost(struct osp_device *d)
318 struct ptlrpc_request *req;
319 struct obd_import *imp;
324 imp = d->opd_obd->u.cli.cl_import;
327 req = ptlrpc_request_alloc(imp, &RQF_OST_GET_INFO_LAST_ID);
331 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
332 RCL_CLIENT, sizeof(KEY_LAST_ID));
333 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
335 ptlrpc_request_free(req);
339 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
340 memcpy(tmp, KEY_LAST_ID, sizeof(KEY_LAST_ID));
342 req->rq_no_delay = req->rq_no_resend = 1;
343 ptlrpc_request_set_replen(req);
344 rc = ptlrpc_queue_wait(req);
346 /* bad-bad OST.. let sysadm sort this out */
347 ptlrpc_set_import_active(imp, 0);
351 reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
353 GOTO(out, rc = -EPROTO);
355 d->opd_last_used_id = *reply;
356 CDEBUG(D_HA, "%s: got last_id "LPU64" from OST\n",
357 d->opd_obd->obd_name, d->opd_last_used_id);
360 ptlrpc_req_finished(req);
366 * asks OST to clean precreate orphans
367 * and gets next id for new objects
369 static int osp_precreate_cleanup_orphans(struct osp_device *d)
371 struct ptlrpc_request *req = NULL;
372 struct obd_import *imp;
373 struct ost_body *body;
378 LASSERT(d->opd_recovery_completed);
379 LASSERT(d->opd_pre_reserved == 0);
381 CDEBUG(D_HA, "%s: going to cleanup orphans since "LPU64"\n",
382 d->opd_obd->obd_name, d->opd_last_used_id);
384 if (d->opd_last_used_id < 2) {
385 /* lastid looks strange... ask OST */
386 rc = osp_get_lastid_from_ost(d);
391 imp = d->opd_obd->u.cli.cl_import;
394 req = ptlrpc_request_alloc(imp, &RQF_OST_CREATE);
396 GOTO(out, rc = -ENOMEM);
398 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
400 ptlrpc_request_free(req);
404 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
406 GOTO(out, rc = -EPROTO);
408 body->oa.o_flags = OBD_FL_DELORPHAN;
409 body->oa.o_valid = OBD_MD_FLFLAGS | OBD_MD_FLGROUP;
410 body->oa.o_seq = FID_SEQ_OST_MDT0;
412 body->oa.o_id = d->opd_last_used_id;
414 ptlrpc_request_set_replen(req);
416 /* Don't resend the delorphan req */
417 req->rq_no_resend = req->rq_no_delay = 1;
419 rc = ptlrpc_queue_wait(req);
423 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
425 GOTO(out, rc = -EPROTO);
428 * OST provides us with id new pool starts from in body->oa.o_id
430 spin_lock(&d->opd_pre_lock);
431 if (le64_to_cpu(d->opd_last_used_id) > body->oa.o_id) {
432 d->opd_pre_grow_count = OST_MIN_PRECREATE +
433 le64_to_cpu(d->opd_last_used_id) -
435 d->opd_pre_last_created = le64_to_cpu(d->opd_last_used_id);
437 d->opd_pre_grow_count = OST_MIN_PRECREATE;
438 d->opd_pre_last_created = body->oa.o_id;
440 d->opd_pre_used_id = d->opd_pre_last_created;
441 d->opd_pre_grow_slow = 0;
442 spin_unlock(&d->opd_pre_lock);
444 CDEBUG(D_HA, "%s: Got last_id "LPU64" from OST, last_used is "LPU64
445 ", pre_used "LPU64"\n", d->opd_obd->obd_name, body->oa.o_id,
446 le64_to_cpu(d->opd_last_used_id), d->opd_pre_used_id);
450 ptlrpc_req_finished(req);
456 * the function updates current precreation status used: functional or not
458 * rc is a last code from the transport, rc == 0 meaning transport works
459 * well and users of lod can use objects from this OSP
461 * the status depends on current usage of OST
463 void osp_pre_update_status(struct osp_device *d, int rc)
465 struct obd_statfs *msfs = &d->opd_statfs;
466 int old = d->opd_pre_status;
469 d->opd_pre_status = rc;
473 /* Add a bit of hysteresis so this flag isn't continually flapping,
474 * and ensure that new files don't get extremely fragmented due to
475 * only a small amount of available space in the filesystem.
476 * We want to set the NOSPC flag when there is less than ~0.1% free
477 * and clear it when there is at least ~0.2% free space, so:
478 * avail < ~0.1% max max = avail + used
479 * 1025 * avail < avail + used used = blocks - free
480 * 1024 * avail < used
481 * 1024 * avail < blocks - free
482 * avail < ((blocks - free) >> 10)
484 * On very large disk, say 16TB 0.1% will be 16 GB. We don't want to
485 * lose that amount of space so in those cases we report no space left
486 * if their is less than 1 GB left. */
487 if (likely(msfs->os_type)) {
488 used = min_t(__u64, (msfs->os_blocks - msfs->os_bfree) >> 10,
490 if ((msfs->os_ffree < 32) || (msfs->os_bavail < used)) {
491 d->opd_pre_status = -ENOSPC;
493 CDEBUG(D_INFO, "%s: status: "LPU64" blocks, "
494 LPU64" free, "LPU64" used, "LPU64" "
495 "avail -> %d: rc = %d\n",
496 d->opd_obd->obd_name, msfs->os_blocks,
497 msfs->os_bfree, used, msfs->os_bavail,
498 d->opd_pre_status, rc);
500 "non-commited changes: %lu, in progress: %u\n",
501 d->opd_syn_changes, d->opd_syn_rpc_in_progress);
502 } else if (old == -ENOSPC) {
503 d->opd_pre_status = 0;
504 d->opd_pre_grow_slow = 0;
505 d->opd_pre_grow_count = OST_MIN_PRECREATE;
506 cfs_waitq_signal(&d->opd_pre_waitq);
507 CDEBUG(D_INFO, "%s: no space: "LPU64" blocks, "LPU64
508 " free, "LPU64" used, "LPU64" avail -> %d: "
509 "rc = %d\n", d->opd_obd->obd_name,
510 msfs->os_blocks, msfs->os_bfree, used,
511 msfs->os_bavail, d->opd_pre_status, rc);
516 cfs_waitq_signal(&d->opd_pre_user_waitq);
519 static int osp_precreate_thread(void *_arg)
521 struct osp_device *d = _arg;
522 struct ptlrpc_thread *thread = &d->opd_pre_thread;
523 struct l_wait_info lwi = { 0 };
529 sprintf(pname, "osp-pre-%u\n", d->opd_index);
530 cfs_daemonize(pname);
532 spin_lock(&d->opd_pre_lock);
533 thread->t_flags = SVC_RUNNING;
534 spin_unlock(&d->opd_pre_lock);
535 cfs_waitq_signal(&thread->t_ctl_waitq);
537 while (osp_precreate_running(d)) {
539 * need to be connected to OST
541 while (osp_precreate_running(d)) {
542 l_wait_event(d->opd_pre_waitq,
543 !osp_precreate_running(d) ||
544 d->opd_new_connection, &lwi);
546 if (!osp_precreate_running(d))
549 if (!d->opd_new_connection)
553 d->opd_new_connection = 0;
554 d->opd_got_disconnected = 0;
558 osp_statfs_update(d);
561 * wait for local recovery to finish, so we can cleanup orphans
562 * orphans are all objects since "last used" (assigned), but
563 * there might be objects reserved and in some cases they won't
564 * be used. we can't cleanup them till we're sure they won't be
565 * used. so we block new reservations and wait till all reserved
566 * objects either user or released.
568 l_wait_event(d->opd_pre_waitq, (!d->opd_pre_reserved &&
569 d->opd_recovery_completed) ||
570 !osp_precreate_running(d) ||
571 d->opd_got_disconnected, &lwi);
573 if (osp_precreate_running(d) && !d->opd_got_disconnected) {
574 rc = osp_precreate_cleanup_orphans(d);
576 CERROR("%s: cannot cleanup orphans: rc = %d\n",
577 d->opd_obd->obd_name, rc);
578 /* we can't proceed from here, OST seem to
579 * be in a bad shape, better to wait for
580 * a new instance of the server and repeat
581 * from the beginning. notify possible waiters
582 * this OSP isn't quite functional yet */
583 osp_pre_update_status(d, rc);
584 cfs_waitq_signal(&d->opd_pre_user_waitq);
585 l_wait_event(d->opd_pre_waitq,
586 !osp_precreate_running(d) ||
587 d->opd_new_connection, &lwi);
594 * connected, can handle precreates now
596 while (osp_precreate_running(d)) {
597 l_wait_event(d->opd_pre_waitq,
598 !osp_precreate_running(d) ||
599 osp_precreate_near_empty(d) ||
600 osp_statfs_need_update(d) ||
601 d->opd_got_disconnected, &lwi);
603 if (!osp_precreate_running(d))
606 /* something happened to the connection
607 * have to start from the beginning */
608 if (d->opd_got_disconnected)
611 if (osp_statfs_need_update(d))
612 osp_statfs_update(d);
614 if (osp_precreate_near_empty(d)) {
615 rc = osp_precreate_send(d);
616 /* osp_precreate_send() sets opd_pre_status
617 * in case of error, that prevent the using of
619 if (rc != 0 && rc != -ENOSPC &&
620 rc != -ETIMEDOUT && rc != -ENOTCONN)
621 CERROR("%s: cannot precreate objects:"
623 d->opd_obd->obd_name, rc);
628 thread->t_flags = SVC_STOPPED;
629 cfs_waitq_signal(&thread->t_ctl_waitq);
634 static int osp_precreate_ready_condition(struct osp_device *d)
638 /* ready if got enough precreated objects */
639 /* we need to wait for others (opd_pre_reserved) and our object (+1) */
640 next = d->opd_pre_used_id + d->opd_pre_reserved + 1;
641 if (next <= d->opd_pre_last_created)
644 /* ready if OST reported no space and no destoys in progress */
645 if (d->opd_syn_changes + d->opd_syn_rpc_in_progress == 0 &&
646 d->opd_pre_status != 0)
652 static int osp_precreate_timeout_condition(void *data)
654 struct osp_device *d = data;
656 LCONSOLE_WARN("%s: slow creates, last="LPU64", next="LPU64", "
657 "reserved="LPU64", syn_changes=%lu, "
658 "syn_rpc_in_progress=%d, status=%d\n",
659 d->opd_obd->obd_name, d->opd_pre_last_created,
660 d->opd_pre_used_id, d->opd_pre_reserved,
661 d->opd_syn_changes, d->opd_syn_rpc_in_progress,
668 * called to reserve object in the pool
670 * ENOSPC - no space on corresponded OST
671 * EAGAIN - precreation is in progress, try later
672 * EIO - no access to OST
674 int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d)
676 struct l_wait_info lwi;
677 cfs_time_t expire = cfs_time_shift(obd_timeout);
683 LASSERT(d->opd_pre_last_created >= d->opd_pre_used_id);
685 lwi = LWI_TIMEOUT(cfs_time_seconds(obd_timeout),
686 osp_precreate_timeout_condition, d);
690 * - preallocation is done
691 * - no free space expected soon
692 * - can't connect to OST for too long (obd_timeout)
694 while ((rc = d->opd_pre_status) == 0 || rc == -ENOSPC ||
696 if (unlikely(rc == -ENODEV)) {
697 if (cfs_time_aftereq(cfs_time_current(), expire))
701 #if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 3, 90, 0)
703 * to address Andreas's concern on possible busy-loop
704 * between this thread and osp_precreate_send()
706 if (unlikely(count++ == 1000)) {
707 osp_precreate_timeout_condition(d);
713 * increase number of precreations
715 if (d->opd_pre_grow_count < d->opd_pre_max_grow_count &&
716 d->opd_pre_grow_slow == 0 &&
717 (d->opd_pre_last_created - d->opd_pre_used_id <=
718 d->opd_pre_grow_count / 4 + 1)) {
719 spin_lock(&d->opd_pre_lock);
720 d->opd_pre_grow_slow = 1;
721 d->opd_pre_grow_count *= 2;
722 spin_unlock(&d->opd_pre_lock);
725 spin_lock(&d->opd_pre_lock);
726 precreated = d->opd_pre_last_created - d->opd_pre_used_id;
727 if (precreated > d->opd_pre_reserved) {
728 d->opd_pre_reserved++;
729 spin_unlock(&d->opd_pre_lock);
732 /* XXX: don't wake up if precreation is in progress */
733 if (osp_precreate_near_empty_nolock(d))
734 cfs_waitq_signal(&d->opd_pre_waitq);
738 spin_unlock(&d->opd_pre_lock);
741 * all precreated objects have been used and no-space
742 * status leave us no chance to succeed very soon
743 * but if there is destroy in progress, then we should
744 * wait till that is done - some space might be released
746 if (unlikely(rc == -ENOSPC)) {
747 if (d->opd_syn_changes) {
748 /* force local commit to release space */
749 dt_commit_async(env, d->opd_storage);
751 if (d->opd_syn_rpc_in_progress) {
752 /* just wait till destroys are done */
753 /* see l_wait_even() few lines below */
755 if (d->opd_syn_changes +
756 d->opd_syn_rpc_in_progress == 0) {
757 /* no hope for free space */
762 /* XXX: don't wake up if precreation is in progress */
763 cfs_waitq_signal(&d->opd_pre_waitq);
765 l_wait_event(d->opd_pre_user_waitq,
766 osp_precreate_ready_condition(d), &lwi);
773 * this function relies on reservation made before
775 __u64 osp_precreate_get_id(struct osp_device *d)
779 /* grab next id from the pool */
780 spin_lock(&d->opd_pre_lock);
781 LASSERT(d->opd_pre_used_id < d->opd_pre_last_created);
782 objid = ++d->opd_pre_used_id;
783 d->opd_pre_reserved--;
785 * last_used_id must be changed along with getting new id otherwise
786 * we might miscalculate gap causing object loss or leak
788 osp_update_last_id(d, objid);
789 spin_unlock(&d->opd_pre_lock);
792 * probably main thread suspended orphan cleanup till
793 * all reservations are released, see comment in
794 * osp_precreate_thread() just before orphan cleanup
796 if (unlikely(d->opd_pre_reserved == 0 && d->opd_pre_status))
797 cfs_waitq_signal(&d->opd_pre_waitq);
805 int osp_object_truncate(const struct lu_env *env, struct dt_object *dt,
808 struct osp_device *d = lu2osp_dev(dt->do_lu.lo_dev);
809 struct ptlrpc_request *req = NULL;
810 struct obd_import *imp;
811 struct ost_body *body;
812 struct obdo *oa = NULL;
817 imp = d->opd_obd->u.cli.cl_import;
820 req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
824 /* XXX: capa support? */
825 /* osc_set_capa_size(req, &RMF_CAPA1, capa); */
826 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
828 ptlrpc_request_free(req);
833 * XXX: decide how do we do here with resend
834 * if we don't resend, then client may see wrong file size
835 * if we do resend, then MDS thread can get stuck for quite long
837 req->rq_no_resend = req->rq_no_delay = 1;
839 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
840 ptlrpc_at_set_req_timeout(req);
844 GOTO(out, rc = -ENOMEM);
846 rc = fid_ostid_pack(lu_object_fid(&dt->do_lu), &oa->o_oi);
849 oa->o_blocks = OBD_OBJECT_EOF;
850 oa->o_valid = OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
851 OBD_MD_FLID | OBD_MD_FLGROUP;
853 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
855 lustre_set_wire_obdo(&body->oa, oa);
857 /* XXX: capa support? */
858 /* osc_pack_capa(req, body, capa); */
860 ptlrpc_request_set_replen(req);
862 rc = ptlrpc_queue_wait(req);
864 CERROR("can't punch object: %d\n", rc);
866 ptlrpc_req_finished(req);
872 int osp_init_precreate(struct osp_device *d)
874 struct l_wait_info lwi = { 0 };
879 /* initially precreation isn't ready */
880 d->opd_pre_status = -EAGAIN;
881 d->opd_pre_used_id = 0;
882 d->opd_pre_last_created = 0;
883 d->opd_pre_reserved = 0;
884 d->opd_got_disconnected = 1;
885 d->opd_pre_grow_slow = 0;
886 d->opd_pre_grow_count = OST_MIN_PRECREATE;
887 d->opd_pre_min_grow_count = OST_MIN_PRECREATE;
888 d->opd_pre_max_grow_count = OST_MAX_PRECREATE;
890 spin_lock_init(&d->opd_pre_lock);
891 cfs_waitq_init(&d->opd_pre_waitq);
892 cfs_waitq_init(&d->opd_pre_user_waitq);
893 cfs_waitq_init(&d->opd_pre_thread.t_ctl_waitq);
896 * Initialize statfs-related things
898 d->opd_statfs_maxage = 5; /* default update interval */
899 d->opd_statfs_fresh_till = cfs_time_shift(-1000);
900 CDEBUG(D_OTHER, "current %llu, fresh till %llu\n",
901 (unsigned long long)cfs_time_current(),
902 (unsigned long long)d->opd_statfs_fresh_till);
903 cfs_timer_init(&d->opd_statfs_timer, osp_statfs_timer_cb, d);
906 * start thread handling precreation and statfs updates
908 rc = cfs_create_thread(osp_precreate_thread, d, 0);
910 CERROR("can't start precreate thread %d\n", rc);
914 l_wait_event(d->opd_pre_thread.t_ctl_waitq,
915 osp_precreate_running(d) || osp_precreate_stopped(d),
921 void osp_precreate_fini(struct osp_device *d)
923 struct ptlrpc_thread *thread = &d->opd_pre_thread;
927 cfs_timer_disarm(&d->opd_statfs_timer);
929 thread->t_flags = SVC_STOPPING;
930 cfs_waitq_signal(&d->opd_pre_waitq);
932 cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);