1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #define DEBUG_SUBSYSTEM S_RPC
41 #include <liblustre.h>
44 #include <obd_support.h>
45 #include <obd_class.h>
46 #include <lustre_lib.h>
47 #include <lustre_ha.h>
48 #include <lustre_import.h>
50 #include "ptlrpc_internal.h"
52 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
53 struct ptlrpc_client *cl)
55 cl->cli_request_portal = req_portal;
56 cl->cli_reply_portal = rep_portal;
60 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
62 struct ptlrpc_connection *c;
64 lnet_process_id_t peer;
67 err = ptlrpc_uuid_to_peer(uuid, &peer, &self);
69 CERROR("cannot find peer %s!\n", uuid->uuid);
73 c = ptlrpc_connection_get(peer, self, uuid);
75 memcpy(c->c_remote_uuid.uuid,
76 uuid->uuid, sizeof(c->c_remote_uuid.uuid));
79 CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
84 static inline struct ptlrpc_bulk_desc *new_bulk(int npages, int type, int portal)
86 struct ptlrpc_bulk_desc *desc;
88 OBD_ALLOC(desc, offsetof (struct ptlrpc_bulk_desc, bd_iov[npages]));
92 spin_lock_init(&desc->bd_lock);
93 cfs_waitq_init(&desc->bd_waitq);
94 desc->bd_max_iov = npages;
95 desc->bd_iov_count = 0;
96 desc->bd_md_h = LNET_INVALID_HANDLE;
97 desc->bd_portal = portal;
103 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
104 int npages, int type, int portal)
106 struct obd_import *imp = req->rq_import;
107 struct ptlrpc_bulk_desc *desc;
110 LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
111 desc = new_bulk(npages, type, portal);
115 desc->bd_import_generation = req->rq_import_generation;
116 desc->bd_import = class_import_get(imp);
119 desc->bd_cbid.cbid_fn = client_bulk_callback;
120 desc->bd_cbid.cbid_arg = desc;
122 /* This makes req own desc, and free it when she frees herself */
128 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
129 int npages, int type, int portal)
131 struct obd_export *exp = req->rq_export;
132 struct ptlrpc_bulk_desc *desc;
135 LASSERT(type == BULK_PUT_SOURCE || type == BULK_GET_SINK);
137 desc = new_bulk(npages, type, portal);
141 desc->bd_export = class_export_get(exp);
144 desc->bd_cbid.cbid_fn = server_bulk_callback;
145 desc->bd_cbid.cbid_arg = desc;
147 /* NB we don't assign rq_bulk here; server-side requests are
148 * re-used, and the handler frees the bulk desc explicitly. */
153 void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
154 cfs_page_t *page, int pageoffset, int len)
156 LASSERT(desc->bd_iov_count < desc->bd_max_iov);
157 LASSERT(page != NULL);
158 LASSERT(pageoffset >= 0);
160 LASSERT(pageoffset + len <= CFS_PAGE_SIZE);
165 ptlrpc_add_bulk_page(desc, page, pageoffset, len);
168 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
173 LASSERT(desc != NULL);
174 LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
175 LASSERT(!desc->bd_network_rw); /* network hands off or */
176 LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
178 class_export_put(desc->bd_export);
180 class_import_put(desc->bd_import);
182 for (i = 0; i < desc->bd_iov_count ; i++)
183 cfs_page_unpin(desc->bd_iov[i].kiov_page);
185 OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
186 bd_iov[desc->bd_max_iov]));
190 /* Set server timelimit for this req */
191 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req)
197 LASSERT(req->rq_import);
200 /* non-AT settings */
201 req->rq_timeout = req->rq_import->imp_server_timeout ?
202 obd_timeout / 2 : obd_timeout;
204 at = &req->rq_import->imp_at;
205 idx = import_at_get_index(req->rq_import,
206 req->rq_request_portal);
207 serv_est = at_get(&at->iat_service_estimate[idx]);
208 req->rq_timeout = at_est2timeout(serv_est);
211 /* We could get even fancier here, using history to predict increased
214 /* Let the server know what this RPC timeout is by putting it in the
216 lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
219 /* Adjust max service estimate based on server value */
220 static void ptlrpc_at_adj_service(struct ptlrpc_request *req,
221 unsigned int serv_est)
227 LASSERT(req->rq_import);
228 at = &req->rq_import->imp_at;
230 idx = import_at_get_index(req->rq_import, req->rq_request_portal);
231 /* max service estimates are tracked on the server side,
232 so just keep minimal history here */
233 oldse = at_add(&at->iat_service_estimate[idx], serv_est);
235 CDEBUG(D_ADAPTTO, "The RPC service estimate for %s ptl %d "
236 "has changed from %d to %d\n",
237 req->rq_import->imp_obd->obd_name,req->rq_request_portal,
238 oldse, at_get(&at->iat_service_estimate[idx]));
241 /* Expected network latency per remote node (secs) */
242 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req)
244 return AT_OFF ? 0 : at_get(&req->rq_import->imp_at.iat_net_latency);
247 /* Adjust expected network latency */
248 static void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
249 unsigned int service_time)
251 unsigned int nl, oldnl;
253 time_t now = cfs_time_current_sec();
255 LASSERT(req->rq_import);
256 at = &req->rq_import->imp_at;
258 /* Network latency is total time less server processing time */
259 nl = max_t(int, now - req->rq_sent - service_time, 0) + 1/*st rounding*/;
260 if (service_time > now - req->rq_sent + 3 /* bz16408 */)
261 CWARN("Reported service time %u > total measured time %ld\n",
262 service_time, now - req->rq_sent);
264 oldnl = at_add(&at->iat_net_latency, nl);
266 CDEBUG(D_ADAPTTO, "The network latency for %s (nid %s) "
267 "has changed from %d to %d\n",
268 req->rq_import->imp_obd->obd_name,
270 &req->rq_import->imp_connection->c_remote_uuid),
271 oldnl, at_get(&at->iat_net_latency));
274 static int unpack_reply_common(struct ptlrpc_request *req)
278 req->rq_rep_swab_mask = 0;
279 rc = lustre_unpack_msg(req->rq_repmsg, req->rq_nob_received);
281 DEBUG_REQ(D_ERROR, req, "unpack_rep failed: %d", rc);
286 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
291 static int unpack_reply(struct ptlrpc_request *req)
295 rc = unpack_reply_common(req);
299 rc = lustre_unpack_rep_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
301 DEBUG_REQ(D_ERROR, req, "unpack ptlrpc body failed: %d", rc);
307 static inline void unpack_reply_free_msg(struct lustre_msg *msg, int len)
312 static int unpack_reply_copy_msg(struct ptlrpc_request *req,
313 struct lustre_msg **msg, int *len)
315 struct lustre_msg *msgcpy;
316 __u32 csum_calc, csum_get;
320 LASSERT_SPIN_LOCKED(&req->rq_lock);
324 /* Swabbing required when rc == 1 */
325 rc = unpack_reply_common(req);
329 lencpy = req->rq_replen;
330 spin_unlock(&req->rq_lock);
332 OBD_ALLOC(msgcpy, lencpy);
334 spin_lock(&req->rq_lock);
337 spin_lock(&req->rq_lock);
339 /* Checksum must be calculated before being unswabbed. If the magic
340 * in the copy is unswabbed discard like the checksum failure case */
341 memcpy(msgcpy, req->rq_repmsg, lencpy);
342 if (lustre_msg_need_swab(msgcpy)) {
343 DEBUG_REQ(D_NET, req, "incorrect message magic: %08x\n",
345 GOTO(err, rc = -EINVAL);
348 csum_calc = lustre_msg_calc_cksum(msgcpy);
350 /* Unpack the copy the original rq_repmsg is untouched */
351 rc = lustre_unpack_msg_ptlrpc_body(msgcpy, MSG_PTLRPC_BODY_OFF, rc);
353 DEBUG_REQ(D_ERROR, req, "unpack msg copy failed: %d", rc);
354 GOTO(err, rc = -EPROTO);
357 /* For early replies the LND may update repmsg outside req->rq_lock
358 * resulting in a checksum failure which is non-harmful */
359 csum_get = lustre_msg_get_cksum(msgcpy);
360 if (csum_calc != csum_get) {
361 DEBUG_REQ(D_NET, req, "checksum mismatch: %x != %x\n",
362 csum_calc, csum_get);
363 GOTO(err, rc = -EINVAL);
370 unpack_reply_free_msg(msgcpy, lencpy);
374 /* Handle an early reply message. To prevent a real reply from arriving
375 * and changing req->rq_repmsg this func is called under the rq_lock */
376 static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req) {
377 struct lustre_msg *msg;
382 LASSERT_SPIN_LOCKED(&req->rq_lock);
385 /* All early replys for this request use a single repbuf which can
386 * be updated outside the req->rq_lock. To prevent racing we create
387 * a copy of the repmsg and verify its checksum before it is used. */
388 rc = unpack_reply_copy_msg(req, &msg, &len);
390 /* Let's just ignore it - same as if it never got here */
391 CDEBUG(D_ADAPTTO, "Discarding racing early reply: %d\n", rc);
395 /* Expecting to increase the service time estimate here */
396 ptlrpc_at_adj_service(req, lustre_msg_get_timeout(msg));
397 ptlrpc_at_adj_net_latency(req, lustre_msg_get_service_time(msg));
398 /* Adjust the local timeout for this req */
399 ptlrpc_at_set_req_timeout(req);
401 olddl = req->rq_deadline;
402 /* Server assumes it now has rq_timeout from when it sent the
403 early reply, so client should give it at least that long. */
404 req->rq_deadline = cfs_time_current_sec() + req->rq_timeout +
405 ptlrpc_at_get_net_latency(req);
407 DEBUG_REQ(D_ADAPTTO, req,
408 "Early reply #%d, new deadline in %lds (%+lds)",
409 req->rq_early_count, req->rq_deadline -
410 cfs_time_current_sec(), req->rq_deadline - olddl);
412 unpack_reply_free_msg(msg, len);
416 void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool)
418 struct list_head *l, *tmp;
419 struct ptlrpc_request *req;
421 LASSERT(pool != NULL);
423 spin_lock(&pool->prp_lock);
424 list_for_each_safe(l, tmp, &pool->prp_req_list) {
425 req = list_entry(l, struct ptlrpc_request, rq_list);
426 list_del(&req->rq_list);
427 LASSERT(req->rq_reqmsg);
428 OBD_FREE(req->rq_reqmsg, pool->prp_rq_size);
429 OBD_FREE(req, sizeof(*req));
431 spin_unlock(&pool->prp_lock);
432 OBD_FREE(pool, sizeof(*pool));
435 void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq)
440 while (size < pool->prp_rq_size)
443 LASSERTF(list_empty(&pool->prp_req_list) || size == pool->prp_rq_size,
444 "Trying to change pool size with nonempty pool "
445 "from %d to %d bytes\n", pool->prp_rq_size, size);
447 spin_lock(&pool->prp_lock);
448 pool->prp_rq_size = size;
449 for (i = 0; i < num_rq; i++) {
450 struct ptlrpc_request *req;
451 struct lustre_msg *msg;
453 spin_unlock(&pool->prp_lock);
454 OBD_ALLOC(req, sizeof(struct ptlrpc_request));
457 OBD_ALLOC_GFP(msg, size, CFS_ALLOC_STD);
459 OBD_FREE(req, sizeof(struct ptlrpc_request));
462 req->rq_reqmsg = msg;
464 spin_lock(&pool->prp_lock);
465 list_add_tail(&req->rq_list, &pool->prp_req_list);
467 spin_unlock(&pool->prp_lock);
471 struct ptlrpc_request_pool *ptlrpc_init_rq_pool(int num_rq, int msgsize,
472 void (*populate_pool)(struct ptlrpc_request_pool *, int))
474 struct ptlrpc_request_pool *pool;
476 OBD_ALLOC(pool, sizeof (struct ptlrpc_request_pool));
480 /* Request next power of two for the allocation, because internally
481 kernel would do exactly this */
483 spin_lock_init(&pool->prp_lock);
484 CFS_INIT_LIST_HEAD(&pool->prp_req_list);
485 pool->prp_rq_size = msgsize;
486 pool->prp_populate = populate_pool;
488 populate_pool(pool, num_rq);
490 if (list_empty(&pool->prp_req_list)) {
491 /* have not allocated a single request for the pool */
492 OBD_FREE(pool, sizeof (struct ptlrpc_request_pool));
498 static struct ptlrpc_request *ptlrpc_prep_req_from_pool(struct ptlrpc_request_pool *pool)
500 struct ptlrpc_request *request;
501 struct lustre_msg *reqmsg;
506 spin_lock(&pool->prp_lock);
508 /* See if we have anything in a pool, and bail out if nothing,
509 * in writeout path, where this matters, this is safe to do, because
510 * nothing is lost in this case, and when some in-flight requests
511 * complete, this code will be called again. */
512 if (unlikely(list_empty(&pool->prp_req_list))) {
513 spin_unlock(&pool->prp_lock);
517 request = list_entry(pool->prp_req_list.next, struct ptlrpc_request,
519 list_del_init(&request->rq_list);
520 spin_unlock(&pool->prp_lock);
522 LASSERT(request->rq_reqmsg);
523 LASSERT(request->rq_pool);
525 reqmsg = request->rq_reqmsg;
526 memset(request, 0, sizeof(*request));
527 request->rq_reqmsg = reqmsg;
528 request->rq_pool = pool;
529 request->rq_reqlen = pool->prp_rq_size;
533 struct ptlrpc_request *
534 ptlrpc_prep_req_pool(struct obd_import *imp, __u32 version, int opcode,
535 int count, __u32 *lengths, char **bufs,
536 struct ptlrpc_request_pool *pool)
538 struct ptlrpc_request *request = NULL;
542 /* The obd disconnected */
546 LASSERT(imp != LP_POISON);
547 LASSERT((unsigned long)imp->imp_client > 0x1000);
548 LASSERT(imp->imp_client != LP_POISON);
551 request = ptlrpc_prep_req_from_pool(pool);
554 OBD_ALLOC(request, sizeof(*request));
557 CERROR("request allocation out of memory\n");
561 rc = lustre_pack_request(request, imp->imp_msg_magic, count, lengths,
564 LASSERT(!request->rq_pool);
565 OBD_FREE(request, sizeof(*request));
569 lustre_msg_add_version(request->rq_reqmsg, version);
570 request->rq_send_state = LUSTRE_IMP_FULL;
571 request->rq_type = PTL_RPC_MSG_REQUEST;
572 request->rq_import = class_import_get(imp);
573 request->rq_export = NULL;
575 request->rq_req_cbid.cbid_fn = request_out_callback;
576 request->rq_req_cbid.cbid_arg = request;
578 request->rq_reply_cbid.cbid_fn = reply_in_callback;
579 request->rq_reply_cbid.cbid_arg = request;
581 request->rq_reply_deadline = 0;
582 request->rq_phase = RQ_PHASE_NEW;
583 request->rq_next_phase = RQ_PHASE_UNDEFINED;
585 request->rq_request_portal = imp->imp_client->cli_request_portal;
586 request->rq_reply_portal = imp->imp_client->cli_reply_portal;
588 ptlrpc_at_set_req_timeout(request);
590 spin_lock_init(&request->rq_lock);
591 CFS_INIT_LIST_HEAD(&request->rq_list);
592 CFS_INIT_LIST_HEAD(&request->rq_replay_list);
593 CFS_INIT_LIST_HEAD(&request->rq_set_chain);
594 CFS_INIT_LIST_HEAD(&request->rq_history_list);
595 CFS_INIT_LIST_HEAD(&request->rq_exp_list);
596 cfs_waitq_init(&request->rq_reply_waitq);
597 request->rq_xid = ptlrpc_next_xid();
598 atomic_set(&request->rq_refcount, 1);
600 lustre_msg_set_opc(request->rq_reqmsg, opcode);
605 struct ptlrpc_request *
606 ptlrpc_prep_req(struct obd_import *imp, __u32 version, int opcode, int count,
607 __u32 *lengths, char **bufs)
609 return ptlrpc_prep_req_pool(imp, version, opcode, count, lengths, bufs,
613 struct ptlrpc_request *ptlrpc_prep_fakereq(struct obd_import *imp,
614 unsigned int timeout,
615 int (*interpreter)(struct ptlrpc_request *,
618 struct ptlrpc_request *request = NULL;
621 OBD_ALLOC(request, sizeof(*request));
623 CERROR("request allocation out of memory\n");
627 request->rq_send_state = LUSTRE_IMP_FULL;
628 request->rq_type = PTL_RPC_MSG_REQUEST;
629 request->rq_import = class_import_get(imp);
630 request->rq_export = NULL;
632 request->rq_sent = cfs_time_current_sec();
633 request->rq_reply_deadline = request->rq_sent + timeout;
634 request->rq_interpret_reply = interpreter;
635 request->rq_phase = RQ_PHASE_RPC;
636 request->rq_next_phase = RQ_PHASE_INTERPRET;
637 /* don't want reply */
638 request->rq_receiving_reply = 0;
639 request->rq_must_unlink = 0;
640 request->rq_no_delay = request->rq_no_resend = 1;
641 request->rq_fake = 1;
643 spin_lock_init(&request->rq_lock);
644 CFS_INIT_LIST_HEAD(&request->rq_list);
645 CFS_INIT_LIST_HEAD(&request->rq_replay_list);
646 CFS_INIT_LIST_HEAD(&request->rq_set_chain);
647 CFS_INIT_LIST_HEAD(&request->rq_history_list);
648 CFS_INIT_LIST_HEAD(&request->rq_exp_list);
649 cfs_waitq_init(&request->rq_reply_waitq);
651 request->rq_xid = ptlrpc_next_xid();
652 atomic_set(&request->rq_refcount, 1);
657 void ptlrpc_fakereq_finished(struct ptlrpc_request *req)
659 /* if we kill request before timeout - need adjust counter */
660 if (req->rq_phase == RQ_PHASE_RPC) {
661 struct ptlrpc_request_set *set = req->rq_set;
664 set->set_remaining --;
667 ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
668 list_del_init(&req->rq_list);
672 struct ptlrpc_request_set *ptlrpc_prep_set(void)
674 struct ptlrpc_request_set *set;
677 OBD_ALLOC(set, sizeof *set);
680 CFS_INIT_LIST_HEAD(&set->set_requests);
681 cfs_waitq_init(&set->set_waitq);
682 set->set_remaining = 0;
683 spin_lock_init(&set->set_new_req_lock);
684 CFS_INIT_LIST_HEAD(&set->set_new_requests);
685 CFS_INIT_LIST_HEAD(&set->set_cblist);
690 /* Finish with this set; opposite of prep_set. */
691 void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
693 struct list_head *tmp;
694 struct list_head *next;
699 /* Requests on the set should either all be completed, or all be new */
700 expected_phase = (set->set_remaining == 0) ?
701 RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
702 list_for_each (tmp, &set->set_requests) {
703 struct ptlrpc_request *req =
704 list_entry(tmp, struct ptlrpc_request, rq_set_chain);
706 LASSERT(req->rq_phase == expected_phase);
710 LASSERTF(set->set_remaining == 0 || set->set_remaining == n, "%d / %d\n",
711 set->set_remaining, n);
713 list_for_each_safe(tmp, next, &set->set_requests) {
714 struct ptlrpc_request *req =
715 list_entry(tmp, struct ptlrpc_request, rq_set_chain);
716 list_del_init(&req->rq_set_chain);
718 LASSERT(req->rq_phase == expected_phase);
720 if (req->rq_phase == RQ_PHASE_NEW) {
722 if (req->rq_interpret_reply != NULL) {
723 int (*interpreter)(struct ptlrpc_request *,
725 req->rq_interpret_reply;
727 /* higher level (i.e. LOV) failed;
728 * let the sub reqs clean up */
729 req->rq_status = -EBADR;
730 interpreter(req, &req->rq_async_args,
733 set->set_remaining--;
737 ptlrpc_req_finished (req);
740 LASSERT(set->set_remaining == 0);
742 OBD_FREE(set, sizeof(*set));
746 int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
747 set_interpreter_func fn, void *data)
749 struct ptlrpc_set_cbdata *cbdata;
751 OBD_SLAB_ALLOC(cbdata, ptlrpc_cbdata_slab,
752 CFS_ALLOC_STD, sizeof(*cbdata));
756 cbdata->psc_interpret = fn;
757 cbdata->psc_data = data;
758 list_add_tail(&cbdata->psc_item, &set->set_cblist);
763 void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
764 struct ptlrpc_request *req)
766 /* The set takes over the caller's request reference */
767 list_add_tail(&req->rq_set_chain, &set->set_requests);
769 set->set_remaining++;
773 * Lock so many callers can add things, the context that owns the set
774 * is supposed to notice these and move them into the set proper.
776 int ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc,
777 struct ptlrpc_request *req)
779 struct ptlrpc_request_set *set = pc->pc_set;
782 * Let caller know that we stopped and will not handle this request.
783 * It needs to take care itself of request.
785 if (test_bit(LIOD_STOP, &pc->pc_flags))
788 spin_lock(&set->set_new_req_lock);
790 * The set takes over the caller's request reference.
792 list_add_tail(&req->rq_set_chain, &set->set_new_requests);
794 spin_unlock(&set->set_new_req_lock);
797 * Let thead know that we added something and better it to wake up
800 cfs_waitq_signal(&set->set_waitq);
805 * Based on the current state of the import, determine if the request
806 * can be sent, is an error, or should be delayed.
808 * Returns true if this request should be delayed. If false, and
809 * *status is set, then the request can not be sent and *status is the
810 * error code. If false and status is 0, then request can be sent.
812 * The imp->imp_lock must be held.
814 static int ptlrpc_import_delay_req(struct obd_import *imp,
815 struct ptlrpc_request *req, int *status)
820 LASSERT (status != NULL);
823 if (imp->imp_state == LUSTRE_IMP_NEW) {
824 DEBUG_REQ(D_ERROR, req, "Uninitialized import.");
827 } else if (imp->imp_state == LUSTRE_IMP_CLOSED) {
828 DEBUG_REQ(D_ERROR, req, "IMP_CLOSED ");
830 } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING &&
831 imp->imp_state == LUSTRE_IMP_CONNECTING) {
832 /* allow CONNECT even if import is invalid */ ;
833 if (atomic_read(&imp->imp_inval_count) != 0) {
834 DEBUG_REQ(D_ERROR, req, "invalidate in flight");
837 } else if ((imp->imp_invalid && (!imp->imp_recon_bk)) ||
838 imp->imp_obd->obd_no_recov) {
839 /* If the import has been invalidated (such as by an OST
840 * failure), and if the import(MGC) tried all of its connection
841 * list (Bug 13464), the request must fail with -ESHUTDOWN.
842 * This indicates the requests should be discarded; an -EIO
843 * may result in a resend of the request. */
844 if (!imp->imp_deactive)
845 DEBUG_REQ(D_ERROR, req, "IMP_INVALID");
846 *status = -ESHUTDOWN; /* bz 12940 */
847 } else if (req->rq_import_generation != imp->imp_generation) {
848 DEBUG_REQ(D_ERROR, req, "req wrong generation:");
850 } else if (req->rq_send_state != imp->imp_state) {
851 /* invalidate in progress - any requests should be drop */
852 if (atomic_read(&imp->imp_inval_count) != 0) {
853 DEBUG_REQ(D_ERROR, req, "invalidate in flight");
855 } else if (imp->imp_dlm_fake || req->rq_no_delay) {
856 *status = -EWOULDBLOCK;
865 static int ptlrpc_check_reply(struct ptlrpc_request *req)
868 const char *what = "";
871 /* serialise with network callback */
872 spin_lock(&req->rq_lock);
874 if (ptlrpc_client_replied(req)) {
879 if (req->rq_net_err && !req->rq_timedout) {
881 spin_unlock(&req->rq_lock);
882 rc = ptlrpc_expire_one_request(req, 0);
883 spin_lock(&req->rq_lock);
892 if (req->rq_resend) {
897 if (req->rq_restart) {
902 if (ptlrpc_client_early(req)) {
904 ptlrpc_at_recv_early_reply(req);
905 GOTO(out, rc = 0); /* keep waiting */
910 spin_unlock(&req->rq_lock);
911 DEBUG_REQ(D_NET, req, "%src = %d for", what, rc);
915 /* Conditionally suppress specific console messages */
916 static int ptlrpc_console_allow(struct ptlrpc_request *req)
918 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
921 /* Suppress particular reconnect errors which are to be expected. No
922 * errors are suppressed for the initial connection on an import */
923 if ((lustre_handle_is_used(&req->rq_import->imp_remote_handle)) &&
924 (opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT)) {
926 /* Suppress timed out reconnect requests */
927 if (req->rq_timedout)
930 /* Suppress unavailable/again reconnect requests */
931 err = lustre_msg_get_status(req->rq_repmsg);
932 if (err == -ENODEV || err == -EAGAIN)
939 static int ptlrpc_check_status(struct ptlrpc_request *req)
944 err = lustre_msg_get_status(req->rq_repmsg);
946 DEBUG_REQ(D_INFO, req, "status is %d", err);
947 } else if (err > 0) {
948 /* XXX: translate this error from net to host */
949 DEBUG_REQ(D_INFO, req, "status is %d", err);
952 if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
953 struct obd_import *imp = req->rq_import;
954 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
956 if (ptlrpc_console_allow(req))
957 LCONSOLE_ERROR_MSG(0x011,"an error occurred while "
958 "communicating with %s. The %s "
959 "operation failed with %d\n",
961 imp->imp_connection->c_peer.nid),
962 ll_opcode2str(opc), err);
964 RETURN(err < 0 ? err : -EINVAL);
970 /* VBR: we should save pre-versions for replay*/
971 static void ptlrpc_save_versions(struct ptlrpc_request *req)
973 struct lustre_msg *repmsg = req->rq_repmsg;
974 struct lustre_msg *reqmsg = req->rq_reqmsg;
975 __u64 *versions = lustre_msg_get_versions(repmsg);
977 /* Interoperability with 1.6. This should be changed to LASSERT in HEAD */
978 if (versions == NULL)
981 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
984 lustre_msg_set_versions(reqmsg, versions);
985 CDEBUG(D_INFO, "Client save versions ["LPX64"/"LPX64"]\n",
986 versions[0], versions[1]);
991 static int after_reply(struct ptlrpc_request *req)
993 struct obd_import *imp = req->rq_import;
994 struct obd_device *obd = req->rq_import->imp_obd;
996 struct timeval work_start;
1000 LASSERT(!req->rq_receiving_reply);
1003 /* NB Until this point, the whole of the incoming message,
1004 * including buflens, status etc is in the sender's byte order. */
1006 if (req->rq_reply_truncate && !req->rq_no_resend) {
1008 OBD_FREE(req->rq_repbuf, req->rq_replen);
1009 req->rq_repbuf = NULL;
1010 req->rq_replen = req->rq_nob_received;
1014 LASSERT (req->rq_nob_received <= req->rq_replen);
1015 rc = unpack_reply(req);
1019 do_gettimeofday(&work_start);
1020 timediff = cfs_timeval_sub(&work_start, &req->rq_arrival_time, NULL);
1021 if (obd->obd_svc_stats != NULL) {
1022 lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
1024 ptlrpc_lprocfs_rpc_sent(req, timediff);
1027 OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, obd_fail_val);
1028 ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg));
1029 ptlrpc_at_adj_net_latency(req, lustre_msg_get_service_time(req->rq_repmsg));
1031 if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY &&
1032 lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) {
1033 DEBUG_REQ(D_ERROR, req, "invalid packet received (type=%u)",
1034 lustre_msg_get_type(req->rq_repmsg));
1038 rc = ptlrpc_check_status(req);
1040 /* Either we've been evicted, or the server has failed for
1041 * some reason. Try to reconnect, and if that fails, punt to
1043 if (ll_rpc_recoverable_error(rc)) {
1044 if (req->rq_send_state != LUSTRE_IMP_FULL ||
1045 imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
1048 ptlrpc_request_handle_notconn(req);
1052 /* Let's look if server sent slv. Do it only for RPC with
1054 ldlm_cli_update_pool(req);
1057 /* Store transno in reqmsg for replay. */
1058 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) {
1059 req->rq_transno = lustre_msg_get_transno(req->rq_repmsg);
1060 lustre_msg_set_transno(req->rq_reqmsg, req->rq_transno);
1063 if (imp->imp_replayable) {
1064 spin_lock(&imp->imp_lock);
1065 /* no point in adding already-committed requests to the replay
1066 * list, we will just remove them immediately. b=9829 */
1067 if (req->rq_transno != 0 &&
1069 lustre_msg_get_last_committed(req->rq_repmsg) ||
1071 /* version recovery */
1072 ptlrpc_save_versions(req);
1073 ptlrpc_retain_replayable_request(req, imp);
1074 } else if (req->rq_commit_cb != NULL) {
1075 spin_unlock(&imp->imp_lock);
1076 req->rq_commit_cb(req);
1077 spin_lock(&imp->imp_lock);
1080 /* Replay-enabled imports return commit-status information. */
1081 if (lustre_msg_get_last_committed(req->rq_repmsg))
1082 imp->imp_peer_committed_transno =
1083 lustre_msg_get_last_committed(req->rq_repmsg);
1084 ptlrpc_free_committed(imp);
1086 if (req->rq_transno > imp->imp_peer_committed_transno)
1087 ptlrpc_pinger_sending_on_import(imp);
1088 spin_unlock(&imp->imp_lock);
1094 static int ptlrpc_send_new_req(struct ptlrpc_request *req)
1096 struct obd_import *imp;
1100 LASSERT(req->rq_phase == RQ_PHASE_NEW);
1101 if (req->rq_sent && (req->rq_sent > CURRENT_SECONDS))
1104 ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
1106 imp = req->rq_import;
1107 spin_lock(&imp->imp_lock);
1109 req->rq_import_generation = imp->imp_generation;
1111 if (ptlrpc_import_delay_req(imp, req, &rc)) {
1112 spin_lock(&req->rq_lock);
1113 req->rq_waiting = 1;
1114 spin_unlock(&req->rq_lock);
1116 DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: "
1117 "(%s != %s)", lustre_msg_get_status(req->rq_reqmsg),
1118 ptlrpc_import_state_name(req->rq_send_state),
1119 ptlrpc_import_state_name(imp->imp_state));
1120 LASSERT(list_empty(&req->rq_list));
1121 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1122 atomic_inc(&req->rq_import->imp_inflight);
1123 spin_unlock(&imp->imp_lock);
1128 spin_unlock(&imp->imp_lock);
1129 req->rq_status = rc;
1130 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1134 LASSERT(list_empty(&req->rq_list));
1135 list_add_tail(&req->rq_list, &imp->imp_sending_list);
1136 atomic_inc(&req->rq_import->imp_inflight);
1137 spin_unlock(&imp->imp_lock);
1139 lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid());
1140 CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc"
1141 " %s:%s:%d:x"LPU64":%s:%d\n", cfs_curproc_comm(),
1142 imp->imp_obd->obd_uuid.uuid,
1143 lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
1144 libcfs_nid2str(imp->imp_connection->c_peer.nid),
1145 lustre_msg_get_opc(req->rq_reqmsg));
1147 rc = ptl_send_rpc(req, 0);
1149 DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
1150 req->rq_net_err = 1;
1156 /* this sends any unsent RPCs in @set and returns TRUE if all are sent */
1157 int ptlrpc_check_set(struct ptlrpc_request_set *set)
1159 struct list_head *tmp;
1160 int force_timer_recalc = 0;
1163 if (set->set_remaining == 0)
1166 list_for_each(tmp, &set->set_requests) {
1167 struct ptlrpc_request *req =
1168 list_entry(tmp, struct ptlrpc_request, rq_set_chain);
1169 struct obd_import *imp = req->rq_import;
1172 if (req->rq_phase == RQ_PHASE_NEW &&
1173 ptlrpc_send_new_req(req)) {
1174 force_timer_recalc = 1;
1177 /* delayed send - skip */
1178 if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent)
1181 if (!(req->rq_phase == RQ_PHASE_RPC ||
1182 req->rq_phase == RQ_PHASE_BULK ||
1183 req->rq_phase == RQ_PHASE_INTERPRET ||
1184 req->rq_phase == RQ_PHASE_UNREGISTERING ||
1185 req->rq_phase == RQ_PHASE_COMPLETE)) {
1186 DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
1190 if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
1191 LASSERT(req->rq_next_phase != req->rq_phase);
1192 LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED);
1194 /* Skip processing until reply is unlinked. We
1195 * can't return to pool before that and we can't
1196 * call interpret before that. We need to make
1197 * sure that all rdma transfers finished and will
1198 * not corrupt any data. */
1199 if (ptlrpc_client_recv_or_unlink(req) ||
1200 ptlrpc_client_bulk_active(req))
1203 /* Turn repl fail_loc off to prevent it from looping
1205 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK)) {
1206 OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK |
1210 /* Turn off bulk fail_loc. */
1211 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK)) {
1212 OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK |
1216 /* Move to next phase if reply was successfully
1218 ptlrpc_rqphase_move(req, req->rq_next_phase);
1221 if (req->rq_phase == RQ_PHASE_COMPLETE)
1224 if (req->rq_phase == RQ_PHASE_INTERPRET)
1225 GOTO(interpret, req->rq_status);
1227 /* Note that this also will start async reply unlink */
1228 if (req->rq_net_err && !req->rq_timedout) {
1229 ptlrpc_expire_one_request(req, 1);
1231 /* Check if we still need to wait for unlink. */
1232 if (ptlrpc_client_recv_or_unlink(req) ||
1233 ptlrpc_client_bulk_active(req))
1238 if (req->rq_status == 0)
1239 req->rq_status = -EIO;
1240 GOTO(interpret, req->rq_status);
1243 /* ptlrpc_queue_wait->l_wait_event guarantees that rq_intr
1244 * will only be set after rq_timedout, but the oig waiting
1245 * path sets rq_intr irrespective of whether ptlrpcd has
1246 * seen a timeout. our policy is to only interpret
1247 * interrupted rpcs after they have timed out */
1248 if (req->rq_intr && (req->rq_timedout || req->rq_waiting)) {
1249 req->rq_status = -EINTR;
1250 GOTO(interpret, req->rq_status);
1253 if (req->rq_phase == RQ_PHASE_RPC) {
1254 if (req->rq_timedout||req->rq_waiting||req->rq_resend) {
1257 if (!ptlrpc_unregister_reply(req, 1))
1260 spin_lock(&imp->imp_lock);
1261 if (ptlrpc_import_delay_req(imp, req, &status)){
1262 /* put on delay list - only if we wait
1263 * recovery finished - before send */
1264 list_del_init(&req->rq_list);
1265 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1266 spin_unlock(&imp->imp_lock);
1271 req->rq_status = status;
1272 spin_unlock(&imp->imp_lock);
1273 GOTO(interpret, req->rq_status);
1275 if (req->rq_no_resend) {
1276 req->rq_status = -ENOTCONN;
1277 spin_unlock(&imp->imp_lock);
1278 GOTO(interpret, req->rq_status);
1281 list_del_init(&req->rq_list);
1282 list_add_tail(&req->rq_list,
1283 &imp->imp_sending_list);
1285 spin_unlock(&imp->imp_lock);
1287 req->rq_waiting = 0;
1289 if (req->rq_timedout||req->rq_resend) {
1290 /* This is re-sending anyways,
1291 * let's mark req as resend. */
1296 if (!ptlrpc_unregister_bulk(req, 1))
1299 /* ensure previous bulk fails */
1300 old_xid = req->rq_xid;
1301 req->rq_xid = ptlrpc_next_xid();
1302 CDEBUG(D_HA, "resend bulk "
1305 old_xid, req->rq_xid);
1309 rc = ptl_send_rpc(req, 0);
1311 DEBUG_REQ(D_HA, req, "send failed (%d)",
1313 force_timer_recalc = 1;
1314 req->rq_net_err = 1;
1316 /* need to reset the timeout */
1317 force_timer_recalc = 1;
1320 spin_lock(&req->rq_lock);
1322 if (ptlrpc_client_early(req)) {
1323 ptlrpc_at_recv_early_reply(req);
1324 spin_unlock(&req->rq_lock);
1328 /* Still waiting for a reply? */
1329 if (ptlrpc_client_recv(req)) {
1330 spin_unlock(&req->rq_lock);
1334 /* Did we actually receive a reply? */
1335 if (!ptlrpc_client_replied(req)) {
1336 spin_unlock(&req->rq_lock);
1340 spin_unlock(&req->rq_lock);
1342 req->rq_status = after_reply(req);
1346 /* If there is no bulk associated with this request,
1347 * then we're done and should let the interpreter
1348 * process the reply. Similarly if the RPC returned
1349 * an error, and therefore the bulk will never arrive.
1351 if (req->rq_bulk == NULL || req->rq_status != 0)
1352 GOTO(interpret, req->rq_status);
1354 ptlrpc_rqphase_move(req, RQ_PHASE_BULK);
1357 LASSERT(req->rq_phase == RQ_PHASE_BULK);
1358 if (ptlrpc_client_bulk_active(req))
1361 if (!req->rq_bulk->bd_success) {
1362 /* The RPC reply arrived OK, but the bulk screwed
1363 * up! Dead wierd since the server told us the RPC
1364 * was good after getting the REPLY for her GET or
1365 * the ACK for her PUT. */
1366 DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
1367 req->rq_status = -EIO;
1368 GOTO(interpret, req->rq_status);
1371 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
1373 /* This moves to "unregistering" phase we need to wait for
1375 if (!ptlrpc_unregister_reply(req, 1))
1378 if (!ptlrpc_unregister_bulk(req, 1))
1381 /* When calling interpret receiving already should be
1383 LASSERT(!req->rq_receiving_reply);
1385 ptlrpc_req_interpret(req, req->rq_status);
1387 ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE);
1389 CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:nid:"
1390 "opc %s:%s:%d:x"LPU64":%s:%d\n", cfs_curproc_comm(),
1391 imp->imp_obd->obd_uuid.uuid,
1392 req->rq_reqmsg ? lustre_msg_get_status(req->rq_reqmsg):-1,
1394 libcfs_nid2str(imp->imp_connection->c_peer.nid),
1395 req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : -1);
1397 spin_lock(&imp->imp_lock);
1398 /* Request already may be not on sending or delaying list. This
1399 * may happen in the case of marking it errorneous for the case
1400 * ptlrpc_import_delay_req(req, status) find it impossible to
1401 * allow sending this rpc and returns *status != 0. */
1402 if (!list_empty(&req->rq_list)) {
1403 list_del_init(&req->rq_list);
1404 atomic_dec(&imp->imp_inflight);
1406 spin_unlock(&imp->imp_lock);
1408 set->set_remaining--;
1409 cfs_waitq_broadcast(&imp->imp_recovery_waitq);
1412 /* If we hit an error, we want to recover promptly. */
1413 RETURN(set->set_remaining == 0 || force_timer_recalc);
1416 /* Return 1 if we should give up, else 0 */
1417 int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
1419 struct obd_import *imp = req->rq_import;
1423 DEBUG_REQ(req->rq_fake ? D_INFO : D_WARNING, req,
1424 "Request x"LPU64" sent from %s to NID %s"
1425 " %lus ago has %s (%lds prior to deadline).\n", req->rq_xid,
1426 imp ? imp->imp_obd->obd_name : "<?>",
1427 imp ? libcfs_nid2str(imp->imp_connection->c_peer.nid) : "<?>",
1428 cfs_time_current_sec() - req->rq_sent,
1429 req->rq_net_err ? "failed due to network error" : "timed out",
1430 req->rq_deadline - req->rq_sent);
1432 if (imp != NULL && obd_debug_peer_on_timeout)
1433 LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer);
1435 spin_lock(&req->rq_lock);
1436 req->rq_timedout = 1;
1437 spin_unlock(&req->rq_lock);
1439 ptlrpc_unregister_reply(req, async_unlink);
1440 ptlrpc_unregister_bulk(req, async_unlink);
1442 if (obd_dump_on_timeout)
1443 libcfs_debug_dumplog();
1446 DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
1453 atomic_inc(&imp->imp_timeouts);
1455 /* The DLM server doesn't want recovery run on its imports. */
1456 if (imp->imp_dlm_fake)
1459 /* If this request is for recovery or other primordial tasks,
1460 * then error it out here. */
1461 if (req->rq_send_state != LUSTRE_IMP_FULL ||
1462 imp->imp_obd->obd_no_recov) {
1463 DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)",
1464 ptlrpc_import_state_name(req->rq_send_state),
1465 ptlrpc_import_state_name(imp->imp_state));
1466 spin_lock(&req->rq_lock);
1467 req->rq_status = -ETIMEDOUT;
1469 spin_unlock(&req->rq_lock);
1473 /* if a request can't be resent we can't wait for an answer after
1475 if (req->rq_no_resend) {
1476 DEBUG_REQ(D_RPCTRACE, req, "TIMEOUT-NORESEND:");
1480 ptlrpc_fail_import(imp, lustre_msg_get_conn_cnt(req->rq_reqmsg));
1485 int ptlrpc_expired_set(void *data)
1487 struct ptlrpc_request_set *set = data;
1488 struct list_head *tmp;
1489 time_t now = cfs_time_current_sec();
1492 LASSERT(set != NULL);
1494 /* A timeout expired; see which reqs it applies to... */
1495 list_for_each(tmp, &set->set_requests) {
1496 struct ptlrpc_request *req =
1497 list_entry(tmp, struct ptlrpc_request, rq_set_chain);
1499 /* Request in-flight? */
1500 if (!((req->rq_phase == RQ_PHASE_RPC &&
1501 !req->rq_waiting && !req->rq_resend) ||
1502 (req->rq_phase == RQ_PHASE_BULK)))
1505 if (req->rq_timedout || /* already dealt with */
1506 req->rq_deadline > now) /* not expired */
1509 /* Deal with this guy. Do it asynchronously to not block
1510 * ptlrpcd thread. */
1511 ptlrpc_expire_one_request(req, 1);
1514 /* When waiting for a whole set, we always to break out of the
1515 * sleep so we can recalculate the timeout, or enable interrupts
1516 * if everyone's timed out. */
1520 void ptlrpc_mark_interrupted(struct ptlrpc_request *req)
1522 spin_lock(&req->rq_lock);
1524 spin_unlock(&req->rq_lock);
1527 void ptlrpc_interrupted_set(void *data)
1529 struct ptlrpc_request_set *set = data;
1530 struct list_head *tmp;
1532 LASSERT(set != NULL);
1533 CERROR("INTERRUPTED SET %p\n", set);
1535 list_for_each(tmp, &set->set_requests) {
1536 struct ptlrpc_request *req =
1537 list_entry(tmp, struct ptlrpc_request, rq_set_chain);
1539 if (req->rq_phase != RQ_PHASE_RPC &&
1540 req->rq_phase != RQ_PHASE_UNREGISTERING)
1543 ptlrpc_mark_interrupted(req);
1547 /* get the smallest timeout in the set; this does NOT set a timeout. */
1548 int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
1550 struct list_head *tmp;
1551 time_t now = cfs_time_current_sec();
1553 struct ptlrpc_request *req;
1557 SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
1559 list_for_each(tmp, &set->set_requests) {
1560 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
1562 /* request in-flight? */
1563 if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) ||
1564 (req->rq_phase == RQ_PHASE_BULK) ||
1565 (req->rq_phase == RQ_PHASE_NEW)))
1568 /* Already timed out. */
1569 if (req->rq_timedout)
1572 if (req->rq_phase == RQ_PHASE_NEW)
1573 deadline = req->rq_sent; /* delayed send */
1575 deadline = req->rq_deadline;
1577 if (deadline <= now) { /* actually expired already */
1578 timeout = 1; /* ASAP */
1581 if ((timeout == 0) || (timeout > (deadline - now)))
1582 timeout = deadline - now;
1587 int ptlrpc_set_wait(struct ptlrpc_request_set *set)
1589 struct list_head *tmp;
1590 struct ptlrpc_request *req;
1591 struct l_wait_info lwi;
1595 if (list_empty(&set->set_requests))
1598 list_for_each(tmp, &set->set_requests) {
1599 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
1600 if (req->rq_phase == RQ_PHASE_NEW)
1601 (void)ptlrpc_send_new_req(req);
1605 timeout = ptlrpc_set_next_timeout(set);
1607 /* wait until all complete, interrupted, or an in-flight
1609 CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n",
1611 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout ? timeout : 1),
1613 ptlrpc_interrupted_set, set);
1614 rc = l_wait_event(set->set_waitq, ptlrpc_check_set(set), &lwi);
1616 LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
1618 /* -EINTR => all requests have been flagged rq_intr so next
1620 * -ETIMEOUTD => someone timed out. When all reqs have
1621 * timed out, signals are enabled allowing completion with
1623 * I don't really care if we go once more round the loop in
1624 * the error cases -eeb. */
1625 } while (rc != 0 || set->set_remaining != 0);
1627 LASSERT(set->set_remaining == 0);
1630 list_for_each(tmp, &set->set_requests) {
1631 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
1633 LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
1634 if (req->rq_status != 0)
1635 rc = req->rq_status;
1638 if (set->set_interpret != NULL) {
1639 int (*interpreter)(struct ptlrpc_request_set *set,void *,int) =
1641 rc = interpreter (set, set->set_arg, rc);
1643 struct ptlrpc_set_cbdata *cbdata, *n;
1646 list_for_each_entry_safe(cbdata, n,
1647 &set->set_cblist, psc_item) {
1648 list_del_init(&cbdata->psc_item);
1649 err = cbdata->psc_interpret(set, cbdata->psc_data, rc);
1652 OBD_SLAB_FREE(cbdata, ptlrpc_cbdata_slab,
1660 static void __ptlrpc_free_req_to_pool(struct ptlrpc_request *request)
1662 struct ptlrpc_request_pool *pool = request->rq_pool;
1664 spin_lock(&pool->prp_lock);
1665 LASSERT(list_empty(&request->rq_list));
1666 LASSERT(!request->rq_receiving_reply);
1667 list_add_tail(&request->rq_list, &pool->prp_req_list);
1668 spin_unlock(&pool->prp_lock);
1671 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
1674 if (request == NULL) {
1679 LASSERTF(!request->rq_receiving_reply, "req %p\n", request);
1680 LASSERTF(request->rq_rqbd == NULL, "req %p\n",request);/* client-side */
1681 LASSERTF(list_empty(&request->rq_list), "req %p\n", request);
1682 LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request);
1683 LASSERTF(list_empty(&request->rq_exp_list), "req %p\n", request);
1684 LASSERTF(!request->rq_replay, "req %p\n", request);
1686 /* We must take it off the imp_replay_list first. Otherwise, we'll set
1687 * request->rq_reqmsg to NULL while osc_close is dereferencing it. */
1688 if (request->rq_import != NULL) {
1690 spin_lock(&request->rq_import->imp_lock);
1691 list_del_init(&request->rq_replay_list);
1693 spin_unlock(&request->rq_import->imp_lock);
1695 LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request);
1697 if (atomic_read(&request->rq_refcount) != 0) {
1698 DEBUG_REQ(D_ERROR, request,
1699 "freeing request with nonzero refcount");
1703 if (request->rq_repbuf != NULL) {
1704 OBD_FREE(request->rq_repbuf, request->rq_replen);
1705 request->rq_repbuf = NULL;
1706 request->rq_repmsg = NULL;
1708 if (request->rq_export != NULL) {
1709 class_export_put(request->rq_export);
1710 request->rq_export = NULL;
1712 if (request->rq_import != NULL) {
1713 class_import_put(request->rq_import);
1714 request->rq_import = NULL;
1716 if (request->rq_bulk != NULL)
1717 ptlrpc_free_bulk(request->rq_bulk);
1719 if (request->rq_pool) {
1720 __ptlrpc_free_req_to_pool(request);
1722 if (request->rq_reqmsg != NULL) {
1723 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
1724 request->rq_reqmsg = NULL;
1726 OBD_FREE(request, sizeof(*request));
1731 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
1732 void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
1734 LASSERT_SPIN_LOCKED(&request->rq_import->imp_lock);
1735 (void)__ptlrpc_req_finished(request, 1);
1738 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
1741 if (request == NULL)
1744 if (request == LP_POISON ||
1745 request->rq_reqmsg == LP_POISON) {
1746 CERROR("dereferencing freed request (bug 575)\n");
1751 DEBUG_REQ(D_INFO, request, "refcount now %u",
1752 atomic_read(&request->rq_refcount) - 1);
1754 if (atomic_dec_and_test(&request->rq_refcount)) {
1755 __ptlrpc_free_req(request, locked);
1762 void ptlrpc_req_finished(struct ptlrpc_request *request)
1764 __ptlrpc_req_finished(request, 0);
1767 __u64 ptlrpc_req_xid(struct ptlrpc_request *request)
1769 return request->rq_xid;
1771 EXPORT_SYMBOL(ptlrpc_req_xid);
1773 /* Disengage the client's reply buffer from the network
1774 * NB does _NOT_ unregister any client-side bulk.
1775 * IDEMPOTENT, but _not_ safe against concurrent callers.
1776 * The request owner (i.e. the thread doing the I/O) must call...
1778 int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
1782 struct l_wait_info lwi;
1786 LASSERT(!in_interrupt());
1788 /* Let's setup deadline for reply unlink. */
1789 if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
1790 async && request->rq_reply_deadline == 0)
1791 request->rq_reply_deadline = cfs_time_current_sec()+LONG_UNLINK;
1793 /* Nothing left to do. */
1794 if (!ptlrpc_client_recv_or_unlink(request))
1797 LNetMDUnlink(request->rq_reply_md_h);
1799 /* Let's check it once again. */
1800 if (!ptlrpc_client_recv_or_unlink(request))
1803 /* Move to "Unregistering" phase as reply was not unlinked yet. */
1804 ptlrpc_rqphase_move(request, RQ_PHASE_UNREGISTERING);
1806 /* Do not wait for unlink to finish. */
1810 /* We have to l_wait_event() whatever the result, to give liblustre
1811 * a chance to run reply_in_callback(), and to make sure we've
1812 * unlinked before returning a req to the pool */
1813 if (request->rq_set != NULL)
1814 wq = &request->rq_set->set_waitq;
1816 wq = &request->rq_reply_waitq;
1819 /* Network access will complete in finite time but the HUGE
1820 * timeout lets us CWARN for visibility of sluggish NALs */
1821 lwi = LWI_TIMEOUT_INTERVAL(cfs_time_seconds(LONG_UNLINK),
1822 cfs_time_seconds(1), NULL, NULL);
1823 rc = l_wait_event(*wq, !ptlrpc_client_recv_or_unlink(request),
1826 ptlrpc_rqphase_move(request, request->rq_next_phase);
1830 LASSERT(rc == -ETIMEDOUT);
1831 DEBUG_REQ(D_WARNING, request, "Unexpectedly long timeout "
1832 "rvcng=%d unlnk=%d", request->rq_receiving_reply,
1833 request->rq_must_unlink);
1838 /* caller must hold imp->imp_lock */
1839 void ptlrpc_free_committed(struct obd_import *imp)
1841 struct list_head *tmp, *saved;
1842 struct ptlrpc_request *req;
1843 struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
1846 LASSERT(imp != NULL);
1848 LASSERT_SPIN_LOCKED(&imp->imp_lock);
1851 if (imp->imp_peer_committed_transno == imp->imp_last_transno_checked &&
1852 imp->imp_generation == imp->imp_last_generation_checked) {
1853 CDEBUG(D_RPCTRACE, "%s: skip recheck: last_committed "LPU64"\n",
1854 imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
1859 CDEBUG(D_RPCTRACE, "%s: committing for last_committed "LPU64" gen %d\n",
1860 imp->imp_obd->obd_name, imp->imp_peer_committed_transno,
1861 imp->imp_generation);
1862 imp->imp_last_transno_checked = imp->imp_peer_committed_transno;
1863 imp->imp_last_generation_checked = imp->imp_generation;
1865 list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
1866 req = list_entry(tmp, struct ptlrpc_request, rq_replay_list);
1868 /* XXX ok to remove when 1357 resolved - rread 05/29/03 */
1869 LASSERT(req != last_req);
1872 if (req->rq_import_generation < imp->imp_generation) {
1873 DEBUG_REQ(D_RPCTRACE, req, "free request with old gen");
1877 if (req->rq_replay) {
1878 DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)");
1882 /* not yet committed */
1883 if (req->rq_transno > imp->imp_peer_committed_transno) {
1884 DEBUG_REQ(D_RPCTRACE, req, "stopping search");
1888 DEBUG_REQ(D_RPCTRACE, req, "commit (last_committed "LPU64")",
1889 imp->imp_peer_committed_transno);
1891 spin_lock(&req->rq_lock);
1893 spin_unlock(&req->rq_lock);
1894 if (req->rq_commit_cb != NULL)
1895 req->rq_commit_cb(req);
1896 list_del_init(&req->rq_replay_list);
1897 __ptlrpc_req_finished(req, 1);
1904 void ptlrpc_cleanup_client(struct obd_import *imp)
1911 void ptlrpc_resend_req(struct ptlrpc_request *req)
1913 DEBUG_REQ(D_HA, req, "going to resend");
1914 lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 });
1915 req->rq_status = -EAGAIN;
1917 spin_lock(&req->rq_lock);
1919 req->rq_net_err = 0;
1920 req->rq_timedout = 0;
1922 __u64 old_xid = req->rq_xid;
1924 /* ensure previous bulk fails */
1925 req->rq_xid = ptlrpc_next_xid();
1926 CDEBUG(D_HA, "resend bulk old x"LPU64" new x"LPU64"\n",
1927 old_xid, req->rq_xid);
1929 ptlrpc_client_wake_req(req);
1930 spin_unlock(&req->rq_lock);
1933 /* XXX: this function and rq_status are currently unused */
1934 void ptlrpc_restart_req(struct ptlrpc_request *req)
1936 DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
1937 req->rq_status = -ERESTARTSYS;
1939 spin_lock(&req->rq_lock);
1940 req->rq_restart = 1;
1941 req->rq_timedout = 0;
1942 ptlrpc_client_wake_req(req);
1943 spin_unlock(&req->rq_lock);
1946 static void interrupted_request(void *data)
1948 struct ptlrpc_request *req = data;
1949 DEBUG_REQ(D_HA, req, "request interrupted");
1950 spin_lock(&req->rq_lock);
1952 spin_unlock(&req->rq_lock);
1955 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
1958 atomic_inc(&req->rq_refcount);
1962 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
1963 struct obd_import *imp)
1965 struct list_head *tmp;
1967 LASSERT_SPIN_LOCKED(&imp->imp_lock);
1969 /* clear this for new requests that were resent as well
1970 as resent replayed requests. */
1971 lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
1973 /* don't re-add requests that have been replayed */
1974 if (!list_empty(&req->rq_replay_list))
1977 lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY);
1979 LASSERT(imp->imp_replayable);
1980 /* Balanced in ptlrpc_free_committed, usually. */
1981 ptlrpc_request_addref(req);
1982 list_for_each_prev(tmp, &imp->imp_replay_list) {
1983 struct ptlrpc_request *iter =
1984 list_entry(tmp, struct ptlrpc_request, rq_replay_list);
1986 /* We may have duplicate transnos if we create and then
1987 * open a file, or for closes retained if to match creating
1988 * opens, so use req->rq_xid as a secondary key.
1989 * (See bugs 684, 685, and 428.)
1990 * XXX no longer needed, but all opens need transnos!
1992 if (iter->rq_transno > req->rq_transno)
1995 if (iter->rq_transno == req->rq_transno) {
1996 LASSERT(iter->rq_xid != req->rq_xid);
1997 if (iter->rq_xid > req->rq_xid)
2001 list_add(&req->rq_replay_list, &iter->rq_replay_list);
2005 list_add_tail(&req->rq_replay_list, &imp->imp_replay_list);
2008 int ptlrpc_queue_wait(struct ptlrpc_request *req)
2012 struct l_wait_info lwi;
2013 struct obd_import *imp = req->rq_import;
2014 cfs_duration_t timeout = CFS_TICK;
2018 LASSERT(req->rq_set == NULL);
2019 LASSERT(!req->rq_receiving_reply);
2021 /* for distributed debugging */
2022 lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid());
2023 LASSERT(imp->imp_obd != NULL);
2024 CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc "
2025 "%s:%s:%d:x"LPU64":%s:%d\n", cfs_curproc_comm(),
2026 imp->imp_obd->obd_uuid.uuid,
2027 lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
2028 libcfs_nid2str(imp->imp_connection->c_peer.nid),
2029 lustre_msg_get_opc(req->rq_reqmsg));
2031 /* Mark phase here for a little debug help */
2032 ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
2034 spin_lock(&imp->imp_lock);
2035 req->rq_import_generation = imp->imp_generation;
2037 if (ptlrpc_import_delay_req(imp, req, &rc)) {
2038 list_del_init(&req->rq_list);
2039 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
2040 atomic_inc(&imp->imp_inflight);
2041 spin_unlock(&imp->imp_lock);
2043 DEBUG_REQ(D_HA, req, "\"%s\" waiting for recovery: (%s != %s)",
2045 ptlrpc_import_state_name(req->rq_send_state),
2046 ptlrpc_import_state_name(imp->imp_state));
2047 lwi = LWI_INTR(interrupted_request, req);
2048 rc = l_wait_event(req->rq_reply_waitq,
2049 (req->rq_send_state == imp->imp_state ||
2050 req->rq_err || req->rq_intr),
2052 DEBUG_REQ(D_HA, req, "\"%s\" awake: (%s == %s or %d/%d == 1)",
2054 ptlrpc_import_state_name(imp->imp_state),
2055 ptlrpc_import_state_name(req->rq_send_state),
2056 req->rq_err, req->rq_intr);
2058 spin_lock(&imp->imp_lock);
2059 list_del_init(&req->rq_list);
2060 atomic_dec(&imp->imp_inflight);
2063 /* rq_status was set locally */
2064 rc = req->rq_status ? req->rq_status : -EIO;
2066 else if (req->rq_intr) {
2069 else if (req->rq_no_resend) {
2078 spin_unlock(&imp->imp_lock);
2079 req->rq_status = rc; // XXX this ok?
2083 if (req->rq_resend) {
2084 if (req->rq_bulk != NULL) {
2085 ptlrpc_unregister_bulk(req, 0);
2087 /* bulk requests are supposed to be
2088 * idempotent, so we are free to bump the xid
2089 * here, which we need to do before
2090 * registering the bulk again (bug 6371).
2091 * print the old xid first for sanity.
2093 DEBUG_REQ(D_HA, req, "bumping xid for bulk: ");
2094 req->rq_xid = ptlrpc_next_xid();
2097 DEBUG_REQ(D_HA, req, "resending: ");
2100 /* XXX this is the same as ptlrpc_set_wait */
2101 LASSERT(list_empty(&req->rq_list));
2102 list_add_tail(&req->rq_list, &imp->imp_sending_list);
2103 atomic_inc(&imp->imp_inflight);
2104 spin_unlock(&imp->imp_lock);
2106 rc = ptl_send_rpc(req, 0);
2108 DEBUG_REQ(D_HA, req, "send failed (%d); recovering", rc);
2110 timeoutl = req->rq_deadline - cfs_time_current_sec();
2111 timeout = (timeoutl <= 0 || rc) ? CFS_TICK :
2112 cfs_time_seconds(timeoutl);
2113 DEBUG_REQ(D_NET, req,
2114 "-- sleeping for "CFS_DURATION_T" ticks", timeout);
2115 lwi = LWI_TIMEOUT_INTR(timeout, NULL, interrupted_request, req);
2116 brc = l_wait_event(req->rq_reply_waitq, ptlrpc_check_reply(req),
2118 /* Wait again if we changed deadline */
2119 } while ((brc == -ETIMEDOUT) &&
2120 (req->rq_deadline > cfs_time_current_sec()));
2122 if ((brc == -ETIMEDOUT) && !ptlrpc_expire_one_request(req, 0)) {
2123 /* Wait forever for reconnect / replay or failure */
2124 lwi = LWI_INTR(interrupted_request, req);
2125 brc = l_wait_event(req->rq_reply_waitq, ptlrpc_check_reply(req),
2129 CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:nid:opc "
2130 "%s:%s:%d:x"LPU64":%s:%d\n", cfs_curproc_comm(),
2131 imp->imp_obd->obd_uuid.uuid,
2132 lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
2133 libcfs_nid2str(imp->imp_connection->c_peer.nid),
2134 lustre_msg_get_opc(req->rq_reqmsg));
2136 /* If the reply was received normally, this just grabs the spinlock
2137 * (ensuring the reply callback has returned), sees that
2138 * req->rq_receiving_reply is clear and returns. */
2139 ptlrpc_unregister_reply(req, 0);
2141 spin_lock(&imp->imp_lock);
2142 list_del_init(&req->rq_list);
2143 atomic_dec(&imp->imp_inflight);
2144 spin_unlock(&imp->imp_lock);
2147 DEBUG_REQ(D_RPCTRACE, req, "err rc=%d status=%d",
2148 rc, req->rq_status);
2149 rc = rc ? rc : req->rq_status;
2150 GOTO(out, rc = rc ? rc : -EIO);
2154 /* Should only be interrupted if we timed out. */
2155 if (!req->rq_timedout)
2156 DEBUG_REQ(D_ERROR, req,
2157 "rq_intr set but rq_timedout not");
2158 GOTO(out, rc = -EINTR);
2161 /* Resend if we need to */
2162 if (req->rq_resend||req->rq_timedout) {
2163 /* ...unless we were specifically told otherwise. */
2164 if (req->rq_no_resend)
2165 GOTO(out, rc = -ETIMEDOUT);
2166 spin_lock(&imp->imp_lock);
2167 /* we can have rq_timeout on dlm fake import which not support
2168 * recovery - but me need resend request on this import instead
2169 * of return error */
2174 if (!ptlrpc_client_replied(req)) {
2175 /* How can this be? -eeb */
2176 DEBUG_REQ(D_ERROR, req, "!rq_replied: ");
2178 GOTO(out, rc = req->rq_status);
2181 rc = after_reply(req);
2182 /* NB may return +ve success rc */
2183 if (req->rq_resend) {
2184 spin_lock(&imp->imp_lock);
2189 if (req->rq_bulk != NULL) {
2191 /* success so far. Note that anything going wrong
2192 * with bulk now, is EXTREMELY strange, since the
2193 * server must have believed that the bulk
2194 * tranferred OK before she replied with success to
2196 lwi = LWI_TIMEOUT(timeout, NULL, NULL);
2197 brc = l_wait_event(req->rq_reply_waitq,
2198 !ptlrpc_client_bulk_active(req),
2200 LASSERT(brc == 0 || brc == -ETIMEDOUT);
2202 LASSERT(brc == -ETIMEDOUT);
2203 DEBUG_REQ(D_ERROR, req, "bulk timed out");
2205 } else if (!req->rq_bulk->bd_success) {
2206 DEBUG_REQ(D_ERROR, req, "bulk transfer failed");
2211 ptlrpc_unregister_bulk(req, 0);
2214 LASSERT(!req->rq_receiving_reply);
2215 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
2216 cfs_waitq_broadcast(&imp->imp_recovery_waitq);
2220 struct ptlrpc_replay_async_args {
2222 int praa_old_status;
2225 static int ptlrpc_replay_interpret(struct ptlrpc_request *req,
2226 void * data, int rc)
2228 struct ptlrpc_replay_async_args *aa = data;
2229 struct obd_import *imp = req->rq_import;
2232 atomic_dec(&imp->imp_replay_inflight);
2234 if (!ptlrpc_client_replied(req)) {
2235 CERROR("request replay timed out, restarting recovery\n");
2236 GOTO(out, rc = -ETIMEDOUT);
2239 if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR &&
2240 (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN ||
2241 lustre_msg_get_status(req->rq_repmsg) == -ENODEV))
2242 GOTO(out, rc = lustre_msg_get_status(req->rq_repmsg));
2244 /* VBR: check version failure */
2245 if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) {
2246 /* replay was failed due to version mismatch */
2247 DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n");
2248 spin_lock(&imp->imp_lock);
2249 imp->imp_vbr_failed = 1;
2250 spin_unlock(&imp->imp_lock);
2252 /* The transno had better not change over replay. */
2253 LASSERT(lustre_msg_get_transno(req->rq_reqmsg) ==
2254 lustre_msg_get_transno(req->rq_repmsg) ||
2255 lustre_msg_get_transno(req->rq_repmsg) == 0);
2258 spin_lock(&imp->imp_lock);
2259 imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg);
2260 spin_unlock(&imp->imp_lock);
2261 LASSERT(imp->imp_last_replay_transno);
2263 DEBUG_REQ(D_HA, req, "got rep");
2265 /* let the callback do fixups, possibly including in the request */
2266 if (req->rq_replay_cb)
2267 req->rq_replay_cb(req);
2269 if (ptlrpc_client_replied(req) &&
2270 lustre_msg_get_status(req->rq_repmsg) != aa->praa_old_status) {
2271 DEBUG_REQ(D_ERROR, req, "status %d, old was %d",
2272 lustre_msg_get_status(req->rq_repmsg),
2273 aa->praa_old_status);
2275 /* Put it back for re-replay. */
2276 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status);
2279 /* continue with recovery */
2280 rc = ptlrpc_import_recovery_state_machine(imp);
2282 req->rq_send_state = aa->praa_old_state;
2285 /* this replay failed, so restart recovery */
2286 ptlrpc_connect_import(imp, NULL);
2292 int ptlrpc_replay_req(struct ptlrpc_request *req)
2294 struct ptlrpc_replay_async_args *aa;
2297 LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
2299 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2300 aa = ptlrpc_req_async_args(req);
2301 memset(aa, 0, sizeof *aa);
2303 /* Prepare request to be resent with ptlrpcd */
2304 aa->praa_old_state = req->rq_send_state;
2305 req->rq_send_state = LUSTRE_IMP_REPLAY;
2306 req->rq_phase = RQ_PHASE_NEW;
2307 req->rq_next_phase = RQ_PHASE_UNDEFINED;
2309 aa->praa_old_status = lustre_msg_get_status(req->rq_repmsg);
2311 req->rq_interpret_reply = ptlrpc_replay_interpret;
2312 /* Readjust the timeout for current conditions */
2313 ptlrpc_at_set_req_timeout(req);
2315 DEBUG_REQ(D_HA, req, "REPLAY");
2317 atomic_inc(&req->rq_import->imp_replay_inflight);
2318 ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
2320 ptlrpcd_add_req(req);
2324 void ptlrpc_abort_inflight(struct obd_import *imp)
2326 struct list_head *tmp, *n;
2329 /* Make sure that no new requests get processed for this import.
2330 * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
2331 * this flag and then putting requests on sending_list or delayed_list.
2333 spin_lock(&imp->imp_lock);
2335 /* XXX locking? Maybe we should remove each request with the list
2336 * locked? Also, how do we know if the requests on the list are
2337 * being freed at this time?
2339 list_for_each_safe(tmp, n, &imp->imp_sending_list) {
2340 struct ptlrpc_request *req =
2341 list_entry(tmp, struct ptlrpc_request, rq_list);
2343 DEBUG_REQ(D_RPCTRACE, req, "inflight");
2345 spin_lock (&req->rq_lock);
2346 if (req->rq_import_generation < imp->imp_generation) {
2348 req->rq_status = -EINTR;
2349 ptlrpc_client_wake_req(req);
2351 spin_unlock (&req->rq_lock);
2354 list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
2355 struct ptlrpc_request *req =
2356 list_entry(tmp, struct ptlrpc_request, rq_list);
2358 DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req");
2360 spin_lock (&req->rq_lock);
2361 if (req->rq_import_generation < imp->imp_generation) {
2363 req->rq_status = -EINTR;
2364 ptlrpc_client_wake_req(req);
2366 spin_unlock (&req->rq_lock);
2369 /* Last chance to free reqs left on the replay list, but we
2370 * will still leak reqs that haven't committed. */
2371 if (imp->imp_replayable)
2372 ptlrpc_free_committed(imp);
2374 spin_unlock(&imp->imp_lock);
2379 void ptlrpc_abort_set(struct ptlrpc_request_set *set)
2381 struct list_head *tmp, *pos;
2383 LASSERT(set != NULL);
2385 list_for_each_safe(pos, tmp, &set->set_requests) {
2386 struct ptlrpc_request *req =
2387 list_entry(pos, struct ptlrpc_request, rq_set_chain);
2389 spin_lock(&req->rq_lock);
2390 if (req->rq_phase != RQ_PHASE_RPC) {
2391 spin_unlock(&req->rq_lock);
2396 req->rq_status = -EINTR;
2397 ptlrpc_client_wake_req(req);
2398 spin_unlock(&req->rq_lock);
2402 static __u64 ptlrpc_last_xid;
2403 static spinlock_t ptlrpc_last_xid_lock;
2405 /* Initialize the XID for the node. This is common among all requests on
2406 * this node, and only requires the property that it is monotonically
2407 * increasing. It does not need to be sequential. Since this is also used
2408 * as the RDMA match bits, it is important that a single client NOT have
2409 * the same match bits for two different in-flight requests, hence we do
2410 * NOT want to have an XID per target or similar.
2412 * To avoid an unlikely collision between match bits after a client reboot
2413 * (which would deliver old data into the wrong RDMA buffer) we initialize
2414 * the XID based on the current time, assuming a maximum RPC rate of 1M RPC/s.
2415 * If the time is clearly incorrect, we instead use a 62-bit random number.
2416 * In the worst case the random number will overflow 1M RPCs per second in
2417 * 9133 years, or permutations thereof.
2419 #define YEAR_2004 (1ULL << 30)
2420 void ptlrpc_init_xid(void)
2422 time_t now = cfs_time_current_sec();
2424 spin_lock_init(&ptlrpc_last_xid_lock);
2425 if (now < YEAR_2004) {
2426 ll_get_random_bytes(&ptlrpc_last_xid, sizeof(ptlrpc_last_xid));
2427 ptlrpc_last_xid >>= 2;
2428 ptlrpc_last_xid |= (1ULL << 61);
2430 ptlrpc_last_xid = (__u64)now << 20;
2434 __u64 ptlrpc_next_xid(void)
2437 spin_lock(&ptlrpc_last_xid_lock);
2438 tmp = ++ptlrpc_last_xid;
2439 spin_unlock(&ptlrpc_last_xid_lock);
2443 __u64 ptlrpc_sample_next_xid(void)
2445 if (sizeof(long) < 8) {
2446 /* need to avoid possible word tearing on 32-bit systems */
2448 spin_lock(&ptlrpc_last_xid_lock);
2449 tmp = ptlrpc_last_xid + 1;
2450 spin_unlock(&ptlrpc_last_xid_lock);
2453 /* No need to lock, since returned value is racy anyways */
2454 return ptlrpc_last_xid + 1;
2456 EXPORT_SYMBOL(ptlrpc_sample_next_xid);