1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_RPC
27 #include <liblustre.h>
30 #include <linux/obd_support.h>
31 #include <linux/obd_class.h>
32 #include <linux/lustre_lib.h>
33 #include <linux/lustre_ha.h>
34 #include <linux/lustre_import.h>
36 #include "ptlrpc_internal.h"
38 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
39 struct ptlrpc_client *cl)
41 cl->cli_request_portal = req_portal;
42 cl->cli_reply_portal = rep_portal;
46 struct obd_uuid *ptlrpc_req_to_uuid(struct ptlrpc_request *req)
48 return &req->rq_connection->c_remote_uuid;
51 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
53 struct ptlrpc_connection *c;
54 struct ptlrpc_peer peer;
57 err = ptlrpc_uuid_to_peer(uuid, &peer);
59 CERROR("cannot find peer %s!\n", uuid->uuid);
63 c = ptlrpc_get_connection(&peer, uuid);
65 memcpy(c->c_remote_uuid.uuid,
66 uuid->uuid, sizeof(c->c_remote_uuid.uuid));
70 CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
75 void ptlrpc_readdress_connection(struct ptlrpc_connection *conn,
76 struct obd_uuid *uuid)
78 struct ptlrpc_peer peer;
81 err = ptlrpc_uuid_to_peer(uuid, &peer);
83 CERROR("cannot find peer %s!\n", uuid->uuid);
87 memcpy(&conn->c_peer, &peer, sizeof (peer));
91 static inline struct ptlrpc_bulk_desc *new_bulk(void)
93 struct ptlrpc_bulk_desc *desc;
95 OBD_ALLOC(desc, sizeof(*desc));
99 spin_lock_init(&desc->bd_lock);
100 init_waitqueue_head(&desc->bd_waitq);
101 INIT_LIST_HEAD(&desc->bd_page_list);
102 desc->bd_md_h = PTL_HANDLE_NONE;
103 desc->bd_me_h = PTL_HANDLE_NONE;
108 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp (struct ptlrpc_request *req,
109 int type, int portal)
111 struct obd_import *imp = req->rq_import;
112 struct ptlrpc_bulk_desc *desc;
114 LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
120 desc->bd_import_generation = req->rq_import_generation;
121 desc->bd_import = class_import_get(imp);
123 desc->bd_type = type;
124 desc->bd_portal = portal;
126 /* This makes req own desc, and free it when she frees herself */
132 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp (struct ptlrpc_request *req,
133 int type, int portal)
135 struct obd_export *exp = req->rq_export;
136 struct ptlrpc_bulk_desc *desc;
138 LASSERT(type == BULK_PUT_SOURCE || type == BULK_GET_SINK);
144 desc->bd_export = class_export_get(exp);
146 desc->bd_type = type;
147 desc->bd_portal = portal;
149 /* NB we don't assign rq_bulk here; server-side requests are
150 * re-used, and the handler frees the bulk desc explicitly. */
155 int ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
156 struct page *page, int pageoffset, int len)
158 struct ptlrpc_bulk_page *bulk;
160 OBD_ALLOC(bulk, sizeof(*bulk));
164 LASSERT(page != NULL);
165 LASSERT(pageoffset >= 0);
167 LASSERT(pageoffset + len <= PAGE_SIZE);
169 bulk->bp_page = page;
170 bulk->bp_pageoffset = pageoffset;
171 bulk->bp_buflen = len;
173 bulk->bp_desc = desc;
174 list_add_tail(&bulk->bp_link, &desc->bd_page_list);
175 desc->bd_page_count++;
179 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
181 struct list_head *tmp, *next;
184 LASSERT(desc != NULL);
185 LASSERT(desc->bd_page_count != 0x5a5a5a5a); /* not freed already */
186 LASSERT(!desc->bd_network_rw); /* network hands off or */
188 list_for_each_safe(tmp, next, &desc->bd_page_list) {
189 struct ptlrpc_bulk_page *bulk;
190 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
191 ptlrpc_free_bulk_page(bulk);
194 LASSERT(desc->bd_page_count == 0);
195 LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
198 class_export_put(desc->bd_export);
200 class_import_put(desc->bd_import);
202 OBD_FREE(desc, sizeof(*desc));
206 void ptlrpc_free_bulk_page(struct ptlrpc_bulk_page *bulk)
208 LASSERT(bulk != NULL);
210 list_del(&bulk->bp_link);
211 bulk->bp_desc->bd_page_count--;
212 OBD_FREE(bulk, sizeof(*bulk));
215 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, int opcode,
216 int count, int *lengths, char **bufs)
218 struct ptlrpc_request *request;
222 LASSERT((unsigned long)imp > 0x1000);
224 OBD_ALLOC(request, sizeof(*request));
226 CERROR("request allocation out of memory\n");
230 rc = lustre_pack_msg(count, lengths, bufs,
231 &request->rq_reqlen, &request->rq_reqmsg);
233 CERROR("cannot pack request %d\n", rc);
234 OBD_FREE(request, sizeof(*request));
238 request->rq_timeout = obd_timeout;
239 request->rq_level = LUSTRE_CONN_FULL;
240 request->rq_type = PTL_RPC_MSG_REQUEST;
241 request->rq_import = class_import_get(imp);
242 request->rq_phase = RQ_PHASE_NEW;
244 /* XXX FIXME bug 249 */
245 request->rq_request_portal = imp->imp_client->cli_request_portal;
246 request->rq_reply_portal = imp->imp_client->cli_reply_portal;
248 request->rq_connection = ptlrpc_connection_addref(imp->imp_connection);
250 spin_lock_init(&request->rq_lock);
251 INIT_LIST_HEAD(&request->rq_list);
252 init_waitqueue_head(&request->rq_wait_for_rep);
253 request->rq_xid = ptlrpc_next_xid();
254 atomic_set(&request->rq_refcount, 1);
256 request->rq_reqmsg->opc = opcode;
257 request->rq_reqmsg->flags = 0;
262 struct ptlrpc_request_set *ptlrpc_prep_set(void)
264 struct ptlrpc_request_set *set;
266 OBD_ALLOC(set, sizeof *set);
269 INIT_LIST_HEAD(&set->set_requests);
270 init_waitqueue_head(&set->set_waitq);
271 set->set_remaining = 0;
276 /* Finish with this set; opposite of prep_set. */
277 void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
279 struct list_head *tmp;
280 struct list_head *next;
285 /* Requests on the set should either all be completed, or all be new */
286 expected_phase = (set->set_remaining == 0) ?
287 RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
288 list_for_each (tmp, &set->set_requests) {
289 struct ptlrpc_request *req =
290 list_entry(tmp, struct ptlrpc_request, rq_set_chain);
292 LASSERT(req->rq_phase == expected_phase);
296 LASSERT(set->set_remaining == 0 || set->set_remaining == n);
298 list_for_each_safe(tmp, next, &set->set_requests) {
299 struct ptlrpc_request *req =
300 list_entry(tmp, struct ptlrpc_request, rq_set_chain);
301 list_del_init(&req->rq_set_chain);
303 LASSERT(req->rq_phase == expected_phase);
305 if (req->rq_phase == RQ_PHASE_NEW) {
307 if (req->rq_interpret_reply != NULL) {
308 int (*interpreter)(struct ptlrpc_request *,
310 req->rq_interpret_reply;
312 /* higher level (i.e. LOV) failed;
313 * let the sub reqs clean up */
314 req->rq_status = -EBADR;
315 interpreter(req, &req->rq_async_args,
318 set->set_remaining--;
322 ptlrpc_req_finished (req);
325 LASSERT(set->set_remaining == 0);
327 OBD_FREE(set, sizeof(*set));
331 void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
332 struct ptlrpc_request *req)
334 /* The set takes over the caller's request reference */
335 list_add_tail(&req->rq_set_chain, &set->set_requests);
337 set->set_remaining++;
340 static int ptlrpc_check_reply(struct ptlrpc_request *req)
346 /* serialise with network callback */
347 spin_lock_irqsave (&req->rq_lock, flags);
349 if (req->rq_replied) {
350 DEBUG_REQ(D_NET, req, "REPLIED:");
355 DEBUG_REQ(D_ERROR, req, "ABORTED:");
359 if (req->rq_resend) {
360 DEBUG_REQ(D_ERROR, req, "RESEND:");
364 if (req->rq_restart) {
365 DEBUG_REQ(D_ERROR, req, "RESTART:");
370 spin_unlock_irqrestore (&req->rq_lock, flags);
371 DEBUG_REQ(D_NET, req, "rc = %d for", rc);
375 static int ptlrpc_check_status(struct ptlrpc_request *req)
380 err = req->rq_repmsg->status;
381 if (req->rq_repmsg->type == PTL_RPC_MSG_ERR) {
382 DEBUG_REQ(D_ERROR, req, "type == PTL_RPC_MSG_ERR (%d)", err);
384 CERROR("Error Reply has >= zero status\n");
385 RETURN(err < 0 ? err : -EINVAL);
389 DEBUG_REQ(D_INFO, req, "status is %d", err);
390 } else if (err > 0) {
391 /* XXX: translate this error from net to host */
392 DEBUG_REQ(D_INFO, req, "status is %d", err);
398 #warning this needs to change after robert fixes eviction handling
399 static int after_reply(struct ptlrpc_request *req, int *restartp)
402 struct obd_import *imp = req->rq_import;
406 LASSERT(!req->rq_receiving_reply);
407 LASSERT(req->rq_replied);
409 if (restartp != NULL)
412 /* NB Until this point, the whole of the incoming message,
413 * including buflens, status etc is in the sender's byte order. */
416 /* Clear reply swab mask; this is a new reply in sender's byte order */
417 req->rq_rep_swab_mask = 0;
419 rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
421 CERROR("unpack_rep failed: %d\n", rc);
425 if (req->rq_repmsg->type != PTL_RPC_MSG_REPLY &&
426 req->rq_repmsg->type != PTL_RPC_MSG_ERR) {
427 CERROR("invalid packet type received (type=%u)\n",
428 req->rq_repmsg->type);
432 /* Store transno in reqmsg for replay. */
433 req->rq_reqmsg->transno = req->rq_transno = req->rq_repmsg->transno;
435 rc = ptlrpc_check_status(req);
437 /* Either we've been evicted, or the server has failed for
438 * some reason. Try to reconnect, and if that fails, punt to the
440 if (rc == -ENOTCONN) {
441 if (req->rq_level < LUSTRE_CONN_FULL || req->rq_no_recov ||
442 imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
446 ptlrpc_request_handle_eviction(req);
451 if (req->rq_no_resend)
452 RETURN(rc); /* -ENOTCONN */
454 if (req->rq_resend) {
455 if (restartp == NULL)
456 LBUG(); /* async resend not supported yet */
457 spin_lock_irqsave (&req->rq_lock, flags);
459 spin_unlock_irqrestore (&req->rq_lock, flags);
461 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
462 DEBUG_REQ(D_HA, req, "resending: ");
466 CERROR("request should be err or resend: %p\n", req);
470 if (req->rq_import->imp_replayable) {
471 spin_lock_irqsave(&imp->imp_lock, flags);
472 if ((req->rq_replay || req->rq_transno != 0) && rc >= 0)
473 ptlrpc_retain_replayable_request(req, imp);
475 if (req->rq_transno > imp->imp_max_transno)
476 imp->imp_max_transno = req->rq_transno;
478 /* Replay-enabled imports return commit-status information. */
479 if (req->rq_repmsg->last_committed)
480 imp->imp_peer_committed_transno =
481 req->rq_repmsg->last_committed;
482 ptlrpc_free_committed(imp);
483 spin_unlock_irqrestore(&imp->imp_lock, flags);
489 int ptlrpc_check_set(struct ptlrpc_request_set *set)
492 struct list_head *tmp;
495 if (set->set_remaining == 0)
498 list_for_each(tmp, &set->set_requests) {
499 struct ptlrpc_request *req =
500 list_entry(tmp, struct ptlrpc_request, rq_set_chain);
501 struct obd_import *imp = req->rq_import;
504 if (!(req->rq_phase == RQ_PHASE_RPC ||
505 req->rq_phase == RQ_PHASE_BULK ||
506 req->rq_phase == RQ_PHASE_INTERPRET ||
507 req->rq_phase == RQ_PHASE_COMPLETE)) {
508 DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
512 if (req->rq_phase == RQ_PHASE_COMPLETE)
515 if (req->rq_phase == RQ_PHASE_INTERPRET)
516 GOTO(interpret, req->rq_status);
519 ptlrpc_unregister_reply(req);
520 if (req->rq_status == 0)
521 req->rq_status = -EIO;
522 req->rq_phase = RQ_PHASE_INTERPRET;
524 spin_lock_irqsave(&imp->imp_lock, flags);
525 list_del_init(&req->rq_list);
526 spin_unlock_irqrestore(&imp->imp_lock, flags);
528 GOTO(interpret, req->rq_status);
532 /* NB could be on delayed list */
533 ptlrpc_unregister_reply(req);
534 req->rq_status = -EINTR;
535 req->rq_phase = RQ_PHASE_INTERPRET;
537 spin_lock_irqsave(&imp->imp_lock, flags);
538 list_del_init(&req->rq_list);
539 spin_unlock_irqrestore(&imp->imp_lock, flags);
541 GOTO(interpret, req->rq_status);
544 if (req->rq_phase == RQ_PHASE_RPC) {
546 if (req->rq_waiting || req->rq_resend) {
547 spin_lock_irqsave(&imp->imp_lock, flags);
549 if (req->rq_level > imp->imp_level) {
550 spin_unlock_irqrestore(&imp->imp_lock,
555 list_del(&req->rq_list);
556 list_add_tail(&req->rq_list,
557 &imp->imp_sending_list);
559 if (req->rq_import_generation <
560 imp->imp_generation) {
561 req->rq_status = -EIO;
562 req->rq_phase = RQ_PHASE_INTERPRET;
563 spin_unlock_irqrestore(&imp->imp_lock,
565 GOTO(interpret, req->rq_status);
567 spin_unlock_irqrestore(&imp->imp_lock, flags);
570 if (req->rq_resend) {
571 lustre_msg_add_flags(req->rq_reqmsg,
573 spin_lock_irqsave(&req->rq_lock, flags);
575 spin_unlock_irqrestore(&req->rq_lock,
578 ptlrpc_unregister_reply(req);
580 ptlrpc_unregister_bulk(req);
583 rc = ptl_send_rpc(req);
586 req->rq_phase = RQ_PHASE_INTERPRET;
587 GOTO(interpret, req->rq_status);
592 /* Ensure the network callback returned */
593 spin_lock_irqsave (&req->rq_lock, flags);
594 if (!req->rq_replied) {
595 spin_unlock_irqrestore (&req->rq_lock, flags);
598 spin_unlock_irqrestore (&req->rq_lock, flags);
600 spin_lock_irqsave(&imp->imp_lock, flags);
601 list_del_init(&req->rq_list);
602 spin_unlock_irqrestore(&imp->imp_lock, flags);
604 req->rq_status = after_reply(req, &do_restart);
606 spin_lock_irqsave (&req->rq_lock, flags);
607 req->rq_resend = 1; /* ugh */
608 spin_unlock_irqrestore (&req->rq_lock, flags);
612 /* If there is no bulk associated with this request,
613 * then we're done and should let the interpreter
614 * process the reply. Similarly if the RPC returned
615 * an error, and therefore the bulk will never arrive.
617 if (req->rq_bulk == NULL || req->rq_status != 0) {
618 req->rq_phase = RQ_PHASE_INTERPRET;
619 GOTO(interpret, req->rq_status);
622 req->rq_phase = RQ_PHASE_BULK;
625 LASSERT(req->rq_phase == RQ_PHASE_BULK);
626 if (!ptlrpc_bulk_complete (req->rq_bulk))
629 req->rq_phase = RQ_PHASE_INTERPRET;
632 LASSERT(req->rq_phase == RQ_PHASE_INTERPRET);
633 LASSERT(!req->rq_receiving_reply);
635 ptlrpc_unregister_reply(req);
636 if (req->rq_bulk != NULL)
637 ptlrpc_unregister_bulk (req);
639 if (req->rq_interpret_reply != NULL) {
640 int (*interpreter)(struct ptlrpc_request *,void *,int) =
641 req->rq_interpret_reply;
642 req->rq_status = interpreter(req, &req->rq_async_args,
646 CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:ni:nid:"
647 "opc %s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
648 imp->imp_obd->obd_uuid.uuid, req->rq_reqmsg->status,
650 imp->imp_connection->c_peer.peer_ni->pni_name,
651 imp->imp_connection->c_peer.peer_nid,
652 req->rq_reqmsg->opc);
654 req->rq_phase = RQ_PHASE_COMPLETE;
655 set->set_remaining--;
658 RETURN(set->set_remaining == 0);
661 int ptlrpc_expire_one_request(struct ptlrpc_request *req)
664 struct obd_import *imp = req->rq_import;
667 DEBUG_REQ(D_ERROR, req, "timeout");
669 spin_lock_irqsave (&req->rq_lock, flags);
670 req->rq_timedout = 1;
671 spin_unlock_irqrestore (&req->rq_lock, flags);
673 ptlrpc_unregister_reply (req);
676 DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
680 /* The DLM server doesn't want recovery run on its imports. */
681 if (imp->imp_dlm_fake)
684 /* If this request is for recovery or other primordial tasks,
685 * don't go back to sleep, and don't start recovery again.. */
686 if (req->rq_level < LUSTRE_CONN_FULL || req->rq_no_recov ||
687 imp->imp_obd->obd_no_recov)
690 ptlrpc_fail_import(imp, req->rq_import_generation);
695 static int expired_set(void *data)
697 struct ptlrpc_request_set *set = data;
698 struct list_head *tmp;
699 time_t now = LTIME_S (CURRENT_TIME);
702 LASSERT(set != NULL);
704 /* A timeout expired; see which reqs it applies to... */
705 list_for_each (tmp, &set->set_requests) {
706 struct ptlrpc_request *req =
707 list_entry(tmp, struct ptlrpc_request, rq_set_chain);
709 /* request in-flight? */
710 if (!((req->rq_phase == RQ_PHASE_RPC && !req->rq_waiting) ||
711 (req->rq_phase == RQ_PHASE_BULK)))
714 if (req->rq_timedout || /* already dealt with */
715 req->rq_sent + req->rq_timeout > now) /* not expired */
718 /* deal with this guy */
719 ptlrpc_expire_one_request (req);
722 /* When waiting for a whole set, we always to break out of the
723 * sleep so we can recalculate the timeout, or enable interrupts
724 * iff everyone's timed out.
729 static void interrupted_set(void *data)
731 struct ptlrpc_request_set *set = data;
732 struct list_head *tmp;
735 LASSERT(set != NULL);
736 CERROR("INTERRUPTED SET %p\n", set);
738 list_for_each(tmp, &set->set_requests) {
739 struct ptlrpc_request *req =
740 list_entry(tmp, struct ptlrpc_request, rq_set_chain);
742 if (req->rq_phase != RQ_PHASE_RPC)
745 spin_lock_irqsave (&req->rq_lock, flags);
747 spin_unlock_irqrestore (&req->rq_lock, flags);
751 int ptlrpc_set_wait(struct ptlrpc_request_set *set)
753 struct list_head *tmp;
754 struct obd_import *imp;
755 struct ptlrpc_request *req;
756 struct l_wait_info lwi;
764 SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
765 LASSERT(!list_empty(&set->set_requests));
766 list_for_each(tmp, &set->set_requests) {
767 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
769 LASSERT(req->rq_level == LUSTRE_CONN_FULL);
770 LASSERT(req->rq_phase == RQ_PHASE_NEW);
771 req->rq_phase = RQ_PHASE_RPC;
773 imp = req->rq_import;
774 spin_lock_irqsave(&imp->imp_lock, flags);
776 if (imp->imp_invalid) {
777 spin_unlock_irqrestore(&imp->imp_lock, flags);
778 req->rq_status = -EIO;
779 req->rq_phase = RQ_PHASE_INTERPRET;
783 req->rq_import_generation = imp->imp_generation;
785 if (req->rq_level > imp->imp_level) {
786 if (req->rq_no_recov || imp->imp_obd->obd_no_recov ||
788 spin_unlock_irqrestore(&imp->imp_lock, flags);
789 req->rq_status = -EWOULDBLOCK;
790 req->rq_phase = RQ_PHASE_INTERPRET;
794 spin_lock (&req->rq_lock);
796 spin_unlock (&req->rq_lock);
797 LASSERT(list_empty (&req->rq_list));
798 // list_del(&req->rq_list);
799 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
800 spin_unlock_irqrestore(&imp->imp_lock, flags);
804 /* XXX this is the same as ptlrpc_queue_wait */
805 LASSERT(list_empty(&req->rq_list));
806 list_add_tail(&req->rq_list, &imp->imp_sending_list);
807 spin_unlock_irqrestore(&imp->imp_lock, flags);
809 req->rq_reqmsg->status = current->pid;
810 CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:ni:nid:opc"
811 " %s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
812 imp->imp_obd->obd_uuid.uuid, req->rq_reqmsg->status,
814 imp->imp_connection->c_peer.peer_ni->pni_name,
815 imp->imp_connection->c_peer.peer_nid,
816 req->rq_reqmsg->opc);
818 rc = ptl_send_rpc(req);
821 req->rq_phase = RQ_PHASE_INTERPRET;
826 now = LTIME_S (CURRENT_TIME);
828 list_for_each (tmp, &set->set_requests) {
829 req = list_entry(tmp, struct ptlrpc_request,
832 /* request in-flight? */
833 if (!((req->rq_phase == RQ_PHASE_RPC &&
835 (req->rq_phase == RQ_PHASE_BULK)))
838 if (req->rq_timedout) /* already timed out */
841 deadline = req->rq_sent + req->rq_timeout;
842 if (deadline <= now) /* actually expired already */
843 timeout = 1; /* ASAP */
844 else if (timeout == 0 || timeout > deadline - now)
845 timeout = deadline - now;
848 /* wait until all complete, interrupted, or an in-flight
850 CDEBUG(D_HA, "set %p going to sleep for %d seconds\n",
852 lwi = LWI_TIMEOUT_INTR((timeout ? timeout : 1) * HZ,
853 expired_set, interrupted_set, set);
854 rc = l_wait_event(set->set_waitq, ptlrpc_check_set(set), &lwi);
856 LASSERT(rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
858 /* -EINTR => all requests have been flagged rq_intr so next
860 * -ETIMEOUTD => someone timed out. When all reqs have
861 * timed out, signals are enabled allowing completion with
863 * I don't really care if we go once more round the loop in
864 * the error cases -eeb. */
867 LASSERT(set->set_remaining == 0);
870 list_for_each(tmp, &set->set_requests) {
871 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
873 LASSERT(req->rq_phase == RQ_PHASE_COMPLETE);
874 if (req->rq_status != 0)
878 if (set->set_interpret != NULL) {
879 int (*interpreter)(struct ptlrpc_request_set *set,void *,int) =
881 rc = interpreter (set, &set->set_args, rc);
887 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
890 if (request == NULL) {
895 LASSERT(!request->rq_receiving_reply);
897 /* We must take it off the imp_replay_list first. Otherwise, we'll set
898 * request->rq_reqmsg to NULL while osc_close is dereferencing it. */
899 if (request->rq_import != NULL) {
900 unsigned long flags = 0;
902 spin_lock_irqsave(&request->rq_import->imp_lock, flags);
903 list_del_init(&request->rq_list);
905 spin_unlock_irqrestore(&request->rq_import->imp_lock,
909 if (atomic_read(&request->rq_refcount) != 0) {
910 DEBUG_REQ(D_ERROR, request,
911 "freeing request with nonzero refcount");
915 if (request->rq_repmsg != NULL) {
916 OBD_FREE(request->rq_repmsg, request->rq_replen);
917 request->rq_repmsg = NULL;
919 if (request->rq_reqmsg != NULL) {
920 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
921 request->rq_reqmsg = NULL;
923 if (request->rq_export != NULL) {
924 class_export_put(request->rq_export);
925 request->rq_export = NULL;
927 if (request->rq_import != NULL) {
928 class_import_put(request->rq_import);
929 request->rq_import = NULL;
931 if (request->rq_bulk != NULL)
932 ptlrpc_free_bulk(request->rq_bulk);
934 ptlrpc_put_connection(request->rq_connection);
935 OBD_FREE(request, sizeof(*request));
939 void ptlrpc_free_req(struct ptlrpc_request *request)
941 __ptlrpc_free_req(request, 0);
944 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
950 if (request == (void *)(long)(0x5a5a5a5a5a5a5a5a) ||
951 request->rq_obd == (void *)(long)(0x5a5a5a5a5a5a5a5a)) {
952 CERROR("dereferencing freed request (bug 575)\n");
957 DEBUG_REQ(D_INFO, request, "refcount now %u",
958 atomic_read(&request->rq_refcount) - 1);
960 if (atomic_dec_and_test(&request->rq_refcount)) {
961 __ptlrpc_free_req(request, locked);
968 void ptlrpc_req_finished(struct ptlrpc_request *request)
970 __ptlrpc_req_finished(request, 0);
973 static void ptlrpc_cleanup_request_buf(struct ptlrpc_request *request)
975 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
976 request->rq_reqmsg = NULL;
977 request->rq_reqlen = 0;
980 /* Disengage the client's reply buffer from the network
981 * NB does _NOT_ unregister any client-side bulk.
982 * IDEMPOTENT, but _not_ safe against concurrent callers.
983 * The request owner (i.e. the thread doing the I/O) must call...
985 void ptlrpc_unregister_reply (struct ptlrpc_request *request)
991 LASSERT(!in_interrupt ()); /* might sleep */
993 spin_lock_irqsave (&request->rq_lock, flags);
994 if (!request->rq_receiving_reply) { /* not waiting for a reply */
995 spin_unlock_irqrestore (&request->rq_lock, flags);
997 /* NB reply buffer not freed here */
1001 LASSERT(!request->rq_replied); /* callback hasn't completed */
1002 spin_unlock_irqrestore (&request->rq_lock, flags);
1004 rc = PtlMDUnlink (request->rq_reply_md_h);
1009 case PTL_OK: /* unlinked before completion */
1010 LASSERT(request->rq_receiving_reply);
1011 LASSERT(!request->rq_replied);
1012 spin_lock_irqsave (&request->rq_lock, flags);
1013 request->rq_receiving_reply = 0;
1014 spin_unlock_irqrestore (&request->rq_lock, flags);
1015 OBD_FREE(request->rq_repmsg, request->rq_replen);
1016 request->rq_repmsg = NULL;
1020 case PTL_MD_INUSE: /* callback in progress */
1022 /* Network access will complete in finite time but
1023 * the timeout lets us CERROR for visibility */
1024 struct l_wait_info lwi = LWI_TIMEOUT(10*HZ, NULL, NULL);
1026 rc = l_wait_event (request->rq_wait_for_rep,
1027 request->rq_replied, &lwi);
1028 LASSERT(rc == 0 || rc == -ETIMEDOUT);
1030 spin_lock_irqsave (&request->rq_lock, flags);
1031 /* Ensure the callback has completed scheduling
1032 * me and taken its hands off the request */
1033 spin_unlock_irqrestore(&request->rq_lock,flags);
1037 CERROR ("Unexpectedly long timeout: req %p\n", request);
1041 case PTL_INV_MD: /* callback completed */
1042 LASSERT(!request->rq_receiving_reply);
1043 LASSERT(request->rq_replied);
1050 /* caller must hold imp->imp_lock */
1051 void ptlrpc_free_committed(struct obd_import *imp)
1053 struct list_head *tmp, *saved;
1054 struct ptlrpc_request *req;
1055 struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
1058 LASSERT(imp != NULL);
1061 LASSERT(spin_is_locked(&imp->imp_lock));
1064 CDEBUG(D_HA, "%s: committing for last_committed "LPU64"\n",
1065 imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
1067 list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
1068 req = list_entry(tmp, struct ptlrpc_request, rq_list);
1070 /* XXX ok to remove when 1357 resolved - rread 05/29/03 */
1071 LASSERT(req != last_req);
1074 if (req->rq_import_generation < imp->imp_generation) {
1075 DEBUG_REQ(D_HA, req, "freeing request with old gen");
1079 if (req->rq_replay) {
1080 DEBUG_REQ(D_HA, req, "keeping (FL_REPLAY)");
1084 /* not yet committed */
1085 if (req->rq_transno > imp->imp_peer_committed_transno) {
1086 DEBUG_REQ(D_HA, req, "stopping search");
1090 DEBUG_REQ(D_HA, req, "committing (last_committed "LPU64")",
1091 imp->imp_peer_committed_transno);
1093 list_del_init(&req->rq_list);
1094 __ptlrpc_req_finished(req, 1);
1101 void ptlrpc_cleanup_client(struct obd_import *imp)
1108 void ptlrpc_resend_req(struct ptlrpc_request *req)
1110 unsigned long flags;
1112 DEBUG_REQ(D_HA, req, "resending");
1113 req->rq_reqmsg->handle.cookie = 0;
1114 ptlrpc_put_connection(req->rq_connection);
1115 req->rq_connection =
1116 ptlrpc_connection_addref(req->rq_import->imp_connection);
1117 req->rq_status = -EAGAIN;
1119 spin_lock_irqsave (&req->rq_lock, flags);
1121 req->rq_timedout = 0;
1122 if (req->rq_set != NULL)
1123 wake_up (&req->rq_set->set_waitq);
1125 wake_up(&req->rq_wait_for_rep);
1126 spin_unlock_irqrestore (&req->rq_lock, flags);
1129 /* XXX: this function and rq_status are currently unused */
1130 void ptlrpc_restart_req(struct ptlrpc_request *req)
1132 unsigned long flags;
1134 DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
1135 req->rq_status = -ERESTARTSYS;
1137 spin_lock_irqsave (&req->rq_lock, flags);
1138 req->rq_restart = 1;
1139 req->rq_timedout = 0;
1140 if (req->rq_set != NULL)
1141 wake_up (&req->rq_set->set_waitq);
1143 wake_up(&req->rq_wait_for_rep);
1144 spin_unlock_irqrestore (&req->rq_lock, flags);
1147 static int expired_request(void *data)
1149 struct ptlrpc_request *req = data;
1152 RETURN(ptlrpc_expire_one_request(req));
1155 static void interrupted_request(void *data)
1157 unsigned long flags;
1159 struct ptlrpc_request *req = data;
1160 DEBUG_REQ(D_HA, req, "request interrupted");
1161 spin_lock_irqsave (&req->rq_lock, flags);
1163 spin_unlock_irqrestore (&req->rq_lock, flags);
1166 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
1169 atomic_inc(&req->rq_refcount);
1173 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
1174 struct obd_import *imp)
1176 struct list_head *tmp;
1179 LASSERT(spin_is_locked(&imp->imp_lock));
1182 LASSERT(imp->imp_replayable);
1183 /* Balanced in ptlrpc_free_committed, usually. */
1184 ptlrpc_request_addref(req);
1185 list_for_each_prev(tmp, &imp->imp_replay_list) {
1186 struct ptlrpc_request *iter =
1187 list_entry(tmp, struct ptlrpc_request, rq_list);
1189 /* We may have duplicate transnos if we create and then
1190 * open a file, or for closes retained if to match creating
1191 * opens, so use req->rq_xid as a secondary key.
1192 * (See bugs 684, 685, and 428.)
1193 * XXX no longer needed, but all opens need transnos!
1195 if (iter->rq_transno > req->rq_transno)
1198 if (iter->rq_transno == req->rq_transno) {
1199 LASSERT(iter->rq_xid != req->rq_xid);
1200 if (iter->rq_xid > req->rq_xid)
1204 list_add(&req->rq_list, &iter->rq_list);
1208 list_add_tail(&req->rq_list, &imp->imp_replay_list);
1211 int ptlrpc_queue_wait(struct ptlrpc_request *req)
1215 struct l_wait_info lwi;
1216 struct obd_import *imp = req->rq_import;
1217 struct obd_device *obd = imp->imp_obd;
1218 unsigned long flags;
1223 LASSERT(req->rq_set == NULL);
1224 LASSERT(!req->rq_receiving_reply);
1226 /* for distributed debugging */
1227 req->rq_reqmsg->status = current->pid;
1228 LASSERT(imp->imp_obd != NULL);
1229 CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:ni:nid:opc "
1230 "%s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
1231 imp->imp_obd->obd_uuid.uuid,
1232 req->rq_reqmsg->status, req->rq_xid,
1233 imp->imp_connection->c_peer.peer_ni->pni_name,
1234 imp->imp_connection->c_peer.peer_nid,
1235 req->rq_reqmsg->opc);
1237 /* Mark phase here for a little debug help */
1238 req->rq_phase = RQ_PHASE_RPC;
1240 spin_lock_irqsave(&imp->imp_lock, flags);
1241 req->rq_import_generation = imp->imp_generation;
1244 * If the import has been invalidated (such as by an OST failure), the
1245 * request must fail with -EIO. Recovery requests are allowed to go
1246 * through, though, so that they have a chance to revalidate the
1249 if (req->rq_import->imp_invalid && req->rq_level == LUSTRE_CONN_FULL) {
1250 DEBUG_REQ(D_ERROR, req, "IMP_INVALID:");
1251 spin_unlock_irqrestore(&imp->imp_lock, flags);
1252 GOTO(out, rc = -EIO);
1255 if (req->rq_import_generation < imp->imp_generation) {
1256 DEBUG_REQ(D_ERROR, req, "req old gen:");
1257 spin_unlock_irqrestore(&imp->imp_lock, flags);
1258 GOTO(out, rc = -EIO);
1261 if (req->rq_level > imp->imp_level) {
1262 list_del(&req->rq_list);
1263 if (req->rq_no_recov || obd->obd_no_recov ||
1264 imp->imp_dlm_fake) {
1265 spin_unlock_irqrestore(&imp->imp_lock, flags);
1266 GOTO(out, rc = -EWOULDBLOCK);
1269 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1270 spin_unlock_irqrestore(&imp->imp_lock, flags);
1272 DEBUG_REQ(D_HA, req, "\"%s\" waiting for recovery: (%d > %d)",
1273 current->comm, req->rq_level, imp->imp_level);
1274 lwi = LWI_INTR(NULL, NULL);
1275 rc = l_wait_event(req->rq_wait_for_rep,
1276 (req->rq_level <= imp->imp_level ||
1279 DEBUG_REQ(D_HA, req, "\"%s\" awake: (%d > %d or %d == 1)",
1280 current->comm, imp->imp_level, req->rq_level,
1283 spin_lock_irqsave(&imp->imp_lock, flags);
1284 list_del_init(&req->rq_list);
1287 req->rq_import_generation < imp->imp_generation)
1292 spin_unlock_irqrestore(&imp->imp_lock, flags);
1296 DEBUG_REQ(D_HA, req, "resumed");
1299 /* XXX this is the same as ptlrpc_set_wait */
1300 LASSERT(list_empty(&req->rq_list));
1301 list_add_tail(&req->rq_list, &imp->imp_sending_list);
1302 spin_unlock_irqrestore(&imp->imp_lock, flags);
1304 rc = ptl_send_rpc(req);
1306 /* The DLM's fake imports want to avoid all forms of
1308 if (imp->imp_dlm_fake) {
1309 spin_lock_irqsave(&imp->imp_lock, flags);
1310 list_del_init(&req->rq_list);
1311 spin_unlock_irqrestore(&imp->imp_lock, flags);
1315 DEBUG_REQ(D_ERROR, req, "send failed (%d); recovering", rc);
1317 ptlrpc_fail_import(imp, req->rq_import_generation);
1319 /* If we've been told to not wait, we're done. */
1320 if (req->rq_level < LUSTRE_CONN_FULL || req->rq_no_recov ||
1321 obd->obd_no_recov) {
1322 spin_lock_irqsave(&imp->imp_lock, flags);
1323 list_del_init(&req->rq_list);
1324 spin_unlock_irqrestore(&imp->imp_lock, flags);
1328 /* If we errored, allow the user to interrupt immediately */
1331 timeout = req->rq_timeout * HZ;
1332 DEBUG_REQ(D_NET, req, "-- sleeping");
1335 lwi = LWI_TIMEOUT_INTR(timeout, expired_request, interrupted_request,
1337 l_wait_event(req->rq_wait_for_rep, ptlrpc_check_reply(req), &lwi);
1340 extern int reply_in_callback(ptl_event_t *ev);
1341 ptl_event_t reply_ev;
1342 PtlEQWait(req->rq_connection->c_peer.peer_ni->pni_reply_in_eq_h,
1344 reply_in_callback(&reply_ev);
1346 LASSERT(reply_ev.mem_desc.user_ptr == (void *)req);
1347 // ptlrpc_check_reply(req);
1348 // not required now it only tests
1352 DEBUG_REQ(D_NET, req, "-- done sleeping");
1354 CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:ni:nid:opc "
1355 "%s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
1356 imp->imp_obd->obd_uuid.uuid,
1357 req->rq_reqmsg->status, req->rq_xid,
1358 imp->imp_connection->c_peer.peer_ni->pni_name,
1359 imp->imp_connection->c_peer.peer_nid,
1360 req->rq_reqmsg->opc);
1362 spin_lock_irqsave(&imp->imp_lock, flags);
1363 list_del_init(&req->rq_list);
1364 spin_unlock_irqrestore(&imp->imp_lock, flags);
1366 /* If the reply was received normally, this just grabs the spinlock
1367 * (ensuring the reply callback has returned), sees that
1368 * req->rq_receiving_reply is clear and returns. */
1369 ptlrpc_unregister_reply (req);
1372 GOTO(out, rc = -EIO);
1374 /* Resend if we need to, unless we were interrupted. */
1375 if (req->rq_resend && !req->rq_intr) {
1376 /* ...unless we were specifically told otherwise. */
1377 if (req->rq_no_resend) {
1378 spin_lock_irqsave (&req->rq_lock, flags);
1379 req->rq_no_resend = 0;
1380 spin_unlock_irqrestore (&req->rq_lock, flags);
1381 GOTO(out, rc = -ETIMEDOUT);
1383 spin_lock_irqsave (&req->rq_lock, flags);
1385 spin_unlock_irqrestore (&req->rq_lock, flags);
1386 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
1388 if (req->rq_bulk != NULL)
1389 ptlrpc_unregister_bulk (req);
1391 DEBUG_REQ(D_HA, req, "resending: ");
1392 spin_lock_irqsave(&imp->imp_lock, flags);
1397 /* Should only be interrupted if we timed out. */
1398 if (!req->rq_timedout)
1399 DEBUG_REQ(D_ERROR, req,
1400 "rq_intr set but rq_timedout not");
1401 GOTO(out, rc = -EINTR);
1404 if (req->rq_timedout) { /* non-recoverable timeout */
1405 GOTO(out, rc = -ETIMEDOUT);
1408 if (!req->rq_replied) {
1409 /* How can this be? -eeb */
1410 DEBUG_REQ(D_ERROR, req, "!rq_replied: ");
1412 GOTO(out, rc = req->rq_status);
1415 rc = after_reply (req, &do_restart);
1416 /* NB may return +ve success rc */
1418 if (req->rq_bulk != NULL)
1419 ptlrpc_unregister_bulk (req);
1420 DEBUG_REQ(D_HA, req, "resending: ");
1421 spin_lock_irqsave(&imp->imp_lock, flags);
1426 if (req->rq_bulk != NULL) {
1427 if (rc >= 0) { /* success so far */
1428 lwi = LWI_TIMEOUT(timeout, NULL, NULL);
1429 brc = l_wait_event(req->rq_wait_for_rep,
1430 ptlrpc_bulk_complete(req->rq_bulk),
1433 LASSERT(brc == -ETIMEDOUT);
1434 CERROR ("Timed out waiting for bulk\n");
1439 /* MDS blocks for put ACKs before replying */
1440 /* OSC sets rq_no_resend for the time being */
1441 LASSERT(req->rq_no_resend);
1442 ptlrpc_unregister_bulk (req);
1446 LASSERT(!req->rq_receiving_reply);
1447 req->rq_phase = RQ_PHASE_INTERPRET;
1451 int ptlrpc_replay_req(struct ptlrpc_request *req)
1453 int rc = 0, old_level, old_status = 0;
1454 // struct ptlrpc_client *cli = req->rq_import->imp_client;
1455 struct l_wait_info lwi;
1458 /* I don't touch rq_phase here, so the debug log can show what
1459 * state it was left in */
1461 /* Not handling automatic bulk replay yet (or ever?) */
1462 LASSERT(req->rq_bulk == NULL);
1464 DEBUG_REQ(D_NET, req, "about to replay");
1466 /* Update request's state, since we might have a new connection. */
1467 ptlrpc_put_connection(req->rq_connection);
1468 req->rq_connection =
1469 ptlrpc_connection_addref(req->rq_import->imp_connection);
1471 /* temporarily set request to RECOVD level (reset at out:) */
1472 old_level = req->rq_level;
1473 if (req->rq_replied)
1474 old_status = req->rq_repmsg->status;
1475 req->rq_level = LUSTRE_CONN_RECOVER;
1476 rc = ptl_send_rpc(req);
1478 CERROR("error %d, opcode %d\n", rc, req->rq_reqmsg->opc);
1479 ptlrpc_cleanup_request_buf(req);
1480 // up(&cli->cli_rpc_sem);
1481 GOTO(out, rc = -rc);
1484 CDEBUG(D_OTHER, "-- sleeping\n");
1485 lwi = LWI_INTR(NULL, NULL); /* XXX needs timeout, nested recovery */
1486 l_wait_event(req->rq_wait_for_rep, ptlrpc_check_reply(req), &lwi);
1487 CDEBUG(D_OTHER, "-- done\n");
1489 // up(&cli->cli_rpc_sem);
1491 /* If the reply was received normally, this just grabs the spinlock
1492 * (ensuring the reply callback has returned), sees that
1493 * req->rq_receiving_reply is clear and returns. */
1494 ptlrpc_unregister_reply (req);
1496 if (!req->rq_replied) {
1497 CERROR("Unknown reason for wakeup\n");
1498 /* XXX Phil - I end up here when I kill obdctl */
1499 /* ...that's because signals aren't all masked in
1500 * l_wait_event() -eeb */
1501 GOTO(out, rc = -EINTR);
1505 /* Clear reply swab mask; this is a new reply in sender's byte order */
1506 req->rq_rep_swab_mask = 0;
1508 rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
1510 CERROR("unpack_rep failed: %d\n", rc);
1511 GOTO(out, rc = -EPROTO);
1514 /* FIXME: Enable when BlueArc makes new release */
1515 if (req->rq_repmsg->type != PTL_RPC_MSG_REPLY &&
1516 req->rq_repmsg->type != PTL_RPC_MSG_ERR) {
1517 CERROR("invalid packet type received (type=%u)\n",
1518 req->rq_repmsg->type);
1519 GOTO(out, rc = -EPROTO);
1523 /* The transno had better not change over replay. */
1524 LASSERT(req->rq_reqmsg->transno == req->rq_repmsg->transno);
1526 CDEBUG(D_NET, "got rep "LPD64"\n", req->rq_xid);
1528 /* let the callback do fixups, possibly including in the request */
1529 if (req->rq_replay_cb)
1530 req->rq_replay_cb(req);
1532 if (req->rq_replied && req->rq_repmsg->status != old_status) {
1533 DEBUG_REQ(D_HA, req, "status %d, old was %d",
1534 req->rq_repmsg->status, old_status);
1538 req->rq_level = old_level;
1542 void ptlrpc_abort_inflight(struct obd_import *imp)
1544 unsigned long flags;
1545 struct list_head *tmp, *n;
1548 /* Make sure that no new requests get processed for this import.
1549 * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
1550 * this flag and then putting requests on sending_list or delayed_list.
1552 spin_lock_irqsave(&imp->imp_lock, flags);
1554 /* XXX locking? Maybe we should remove each request with the list
1555 * locked? Also, how do we know if the requests on the list are
1556 * being freed at this time?
1558 list_for_each_safe(tmp, n, &imp->imp_sending_list) {
1559 struct ptlrpc_request *req =
1560 list_entry(tmp, struct ptlrpc_request, rq_list);
1562 DEBUG_REQ(D_HA, req, "inflight");
1564 spin_lock (&req->rq_lock);
1565 if (req->rq_import_generation < imp->imp_generation) {
1567 if (req->rq_set != NULL)
1568 wake_up(&req->rq_set->set_waitq);
1570 wake_up(&req->rq_wait_for_rep);
1572 spin_unlock (&req->rq_lock);
1575 list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
1576 struct ptlrpc_request *req =
1577 list_entry(tmp, struct ptlrpc_request, rq_list);
1579 DEBUG_REQ(D_HA, req, "aborting waiting req");
1581 spin_lock (&req->rq_lock);
1582 if (req->rq_import_generation < imp->imp_generation) {
1584 if (req->rq_set != NULL)
1585 wake_up(&req->rq_set->set_waitq);
1587 wake_up(&req->rq_wait_for_rep);
1588 spin_unlock (&req->rq_lock);
1592 /* Last chance to free reqs left on the replay list, but we
1593 * will still leak reqs that haven't comitted. */
1594 if (imp->imp_replayable)
1595 ptlrpc_free_committed(imp);
1597 spin_unlock_irqrestore(&imp->imp_lock, flags);
1602 static __u64 ptlrpc_last_xid = 0;
1603 static spinlock_t ptlrpc_last_xid_lock = SPIN_LOCK_UNLOCKED;
1605 __u64 ptlrpc_next_xid(void)
1608 spin_lock(&ptlrpc_last_xid_lock);
1609 tmp = ++ptlrpc_last_xid;
1610 spin_unlock(&ptlrpc_last_xid_lock);