1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
4 * Copyright (c) 2002, 2003 Cluster File Systems, Inc.
6 * This file is part of Lustre, http://www.lustre.org.
8 * Lustre is free software; you can redistribute it and/or
9 * modify it under the terms of version 2 of the GNU General Public
10 * License as published by the Free Software Foundation.
12 * Lustre is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with Lustre; if not, write to the Free Software
19 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #define DEBUG_SUBSYSTEM S_RPC
27 #include <liblustre.h>
30 #include <linux/obd_support.h>
31 #include <linux/obd_class.h>
32 #include <linux/lustre_lib.h>
33 #include <linux/lustre_ha.h>
34 #include <linux/lustre_import.h>
36 #include "ptlrpc_internal.h"
38 void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
39 struct ptlrpc_client *cl)
41 cl->cli_request_portal = req_portal;
42 cl->cli_reply_portal = rep_portal;
46 struct obd_uuid *ptlrpc_req_to_uuid(struct ptlrpc_request *req)
48 return &req->rq_connection->c_remote_uuid;
51 struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid)
53 struct ptlrpc_connection *c;
54 struct ptlrpc_peer peer;
57 err = ptlrpc_uuid_to_peer(uuid, &peer);
59 CERROR("cannot find peer %s!\n", uuid->uuid);
63 c = ptlrpc_get_connection(&peer, uuid);
65 memcpy(c->c_remote_uuid.uuid,
66 uuid->uuid, sizeof(c->c_remote_uuid.uuid));
70 CDEBUG(D_INFO, "%s -> %p\n", uuid->uuid, c);
75 void ptlrpc_readdress_connection(struct ptlrpc_connection *conn,
76 struct obd_uuid *uuid)
78 struct ptlrpc_peer peer;
81 err = ptlrpc_uuid_to_peer (uuid, &peer);
83 CERROR("cannot find peer %s!\n", uuid->uuid);
87 memcpy (&conn->c_peer, &peer, sizeof (peer));
91 static inline struct ptlrpc_bulk_desc *new_bulk(void)
93 struct ptlrpc_bulk_desc *desc;
95 OBD_ALLOC(desc, sizeof(*desc));
99 spin_lock_init (&desc->bd_lock);
100 init_waitqueue_head(&desc->bd_waitq);
101 INIT_LIST_HEAD(&desc->bd_page_list);
102 desc->bd_md_h = PTL_HANDLE_NONE;
103 desc->bd_me_h = PTL_HANDLE_NONE;
108 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp (struct ptlrpc_request *req,
109 int type, int portal)
111 struct obd_import *imp = req->rq_import;
113 struct ptlrpc_bulk_desc *desc;
115 LASSERT (type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
121 /* Is this sampled at the right place? Do we want to get the import
122 * generation just before we send? Should it match the generation of
124 spin_lock_irqsave(&imp->imp_lock, flags);
125 desc->bd_import_generation = imp->imp_generation;
126 spin_unlock_irqrestore(&imp->imp_lock, flags);
128 desc->bd_import = class_import_get(imp);
130 desc->bd_type = type;
131 desc->bd_portal = portal;
133 /* This makes req own desc, and free it when she frees herself */
139 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp (struct ptlrpc_request *req,
140 int type, int portal)
142 struct obd_export *exp = req->rq_export;
143 struct ptlrpc_bulk_desc *desc;
145 LASSERT (type == BULK_PUT_SOURCE || type == BULK_GET_SINK);
151 desc->bd_export = class_export_get(exp);
153 desc->bd_type = type;
154 desc->bd_portal = portal;
156 /* NB we don't assign rq_bulk here; server-side requests are
157 * re-used, and the handler frees the bulk desc explicitly. */
162 int ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
163 struct page *page, int pageoffset, int len)
165 struct ptlrpc_bulk_page *bulk;
167 OBD_ALLOC(bulk, sizeof(*bulk));
171 LASSERT (page != NULL);
172 LASSERT (pageoffset >= 0);
174 LASSERT (pageoffset + len <= PAGE_SIZE);
176 bulk->bp_page = page;
177 bulk->bp_pageoffset = pageoffset;
178 bulk->bp_buflen = len;
180 bulk->bp_desc = desc;
181 list_add_tail(&bulk->bp_link, &desc->bd_page_list);
182 desc->bd_page_count++;
186 void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc)
188 struct list_head *tmp, *next;
191 LASSERT (desc != NULL);
192 LASSERT (desc->bd_page_count != 0x5a5a5a5a); /* not freed already */
193 LASSERT (!desc->bd_network_rw); /* network hands off or */
195 list_for_each_safe(tmp, next, &desc->bd_page_list) {
196 struct ptlrpc_bulk_page *bulk;
197 bulk = list_entry(tmp, struct ptlrpc_bulk_page, bp_link);
198 ptlrpc_free_bulk_page(bulk);
201 LASSERT (desc->bd_page_count == 0);
202 LASSERT((desc->bd_export != NULL) ^ (desc->bd_import != NULL));
205 class_export_put(desc->bd_export);
207 class_import_put(desc->bd_import);
209 OBD_FREE(desc, sizeof(*desc));
213 void ptlrpc_free_bulk_page(struct ptlrpc_bulk_page *bulk)
215 LASSERT (bulk != NULL);
217 list_del(&bulk->bp_link);
218 bulk->bp_desc->bd_page_count--;
219 OBD_FREE(bulk, sizeof(*bulk));
222 struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, int opcode,
223 int count, int *lengths, char **bufs)
225 struct ptlrpc_request *request;
229 LASSERT((unsigned long)imp > 0x1000);
231 OBD_ALLOC(request, sizeof(*request));
233 CERROR("request allocation out of memory\n");
237 rc = lustre_pack_msg(count, lengths, bufs,
238 &request->rq_reqlen, &request->rq_reqmsg);
240 CERROR("cannot pack request %d\n", rc);
241 OBD_FREE(request, sizeof(*request));
245 request->rq_timeout = obd_timeout;
246 request->rq_level = LUSTRE_CONN_FULL;
247 request->rq_type = PTL_RPC_MSG_REQUEST;
248 request->rq_import = class_import_get(imp);
249 request->rq_phase = RQ_PHASE_NEW;
251 /* XXX FIXME bug 249 */
252 request->rq_request_portal = imp->imp_client->cli_request_portal;
253 request->rq_reply_portal = imp->imp_client->cli_reply_portal;
255 request->rq_connection = ptlrpc_connection_addref(imp->imp_connection);
257 spin_lock_init (&request->rq_lock);
258 INIT_LIST_HEAD(&request->rq_list);
259 init_waitqueue_head(&request->rq_wait_for_rep);
260 request->rq_xid = ptlrpc_next_xid();
261 atomic_set(&request->rq_refcount, 1);
263 request->rq_reqmsg->opc = opcode;
264 request->rq_reqmsg->flags = 0;
269 struct ptlrpc_request_set *ptlrpc_prep_set(void)
271 struct ptlrpc_request_set *set;
273 OBD_ALLOC(set, sizeof *set);
276 INIT_LIST_HEAD(&set->set_requests);
277 init_waitqueue_head(&set->set_waitq);
278 set->set_remaining = 0;
283 /* Finish with this set; opposite of prep_set. */
284 void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
286 struct list_head *tmp;
287 struct list_head *next;
292 /* Requests on the set should either all be completed, or all be new */
293 expected_phase = (set->set_remaining == 0) ?
294 RQ_PHASE_COMPLETE : RQ_PHASE_NEW;
295 list_for_each (tmp, &set->set_requests) {
296 struct ptlrpc_request *req =
297 list_entry(tmp, struct ptlrpc_request, rq_set_chain);
299 LASSERT (req->rq_phase == expected_phase);
303 LASSERT (set->set_remaining == 0 || set->set_remaining == n);
305 list_for_each_safe(tmp, next, &set->set_requests) {
306 struct ptlrpc_request *req =
307 list_entry(tmp, struct ptlrpc_request, rq_set_chain);
308 list_del_init(&req->rq_set_chain);
310 LASSERT (req->rq_phase == expected_phase);
312 if (req->rq_phase == RQ_PHASE_NEW) {
314 if (req->rq_interpret_reply != NULL) {
315 int (*interpreter)(struct ptlrpc_request *,
317 req->rq_interpret_reply;
319 /* higher level (i.e. LOV) failed;
320 * let the sub reqs clean up */
321 req->rq_status = -EBADR;
322 interpreter(req, &req->rq_async_args, req->rq_status);
324 set->set_remaining--;
328 ptlrpc_req_finished (req);
331 LASSERT(set->set_remaining == 0);
333 OBD_FREE(set, sizeof(*set));
337 void ptlrpc_set_add_req(struct ptlrpc_request_set *set,
338 struct ptlrpc_request *req)
340 /* The set takes over the caller's request reference */
341 list_add_tail(&req->rq_set_chain, &set->set_requests);
343 set->set_remaining++;
346 static int ptlrpc_check_reply(struct ptlrpc_request *req)
352 /* serialise with network callback */
353 spin_lock_irqsave (&req->rq_lock, flags);
355 if (req->rq_replied) {
356 DEBUG_REQ(D_NET, req, "REPLIED:");
361 DEBUG_REQ(D_ERROR, req, "ABORTED:");
365 if (req->rq_resend) {
366 DEBUG_REQ(D_ERROR, req, "RESEND:");
370 if (req->rq_restart) {
371 DEBUG_REQ(D_ERROR, req, "RESTART:");
376 spin_unlock_irqrestore (&req->rq_lock, flags);
377 DEBUG_REQ(D_NET, req, "rc = %d for", rc);
381 static int ptlrpc_check_status(struct ptlrpc_request *req)
386 err = req->rq_repmsg->status;
387 if (req->rq_repmsg->type == PTL_RPC_MSG_ERR) {
388 DEBUG_REQ(D_ERROR, req, "type == PTL_RPC_MSG_ERR (%d)", err);
390 CERROR("Error Reply has >= zero status\n");
391 RETURN(err < 0 ? err : -EINVAL);
395 DEBUG_REQ(D_INFO, req, "status is %d", err);
396 } else if (err > 0) {
397 /* XXX: translate this error from net to host */
398 DEBUG_REQ(D_INFO, req, "status is %d", err);
404 #warning this needs to change after robert fixes eviction handling
405 static int after_reply(struct ptlrpc_request *req, int *restartp)
408 struct obd_import *imp = req->rq_import;
412 LASSERT (!req->rq_receiving_reply);
413 LASSERT (req->rq_replied);
415 if (restartp != NULL)
418 /* NB Until this point, the whole of the incoming message,
419 * including buflens, status etc is in the sender's byte order. */
422 /* Clear reply swab mask; this is a new reply in sender's byte order */
423 req->rq_rep_swab_mask = 0;
425 rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
427 CERROR("unpack_rep failed: %d\n", rc);
431 if (req->rq_repmsg->type != PTL_RPC_MSG_REPLY &&
432 req->rq_repmsg->type != PTL_RPC_MSG_ERR) {
433 CERROR("invalid packet type received (type=%u)\n",
434 req->rq_repmsg->type);
438 /* Store transno in reqmsg for replay. */
439 req->rq_reqmsg->transno = req->rq_transno = req->rq_repmsg->transno;
441 rc = ptlrpc_check_status(req);
443 /* Either we've been evicted, or the server has failed for
444 * some reason. Try to reconnect, and if that fails, punt to the
446 if (rc == -ENOTCONN) {
447 if (req->rq_level < LUSTRE_CONN_FULL || req->rq_no_recov ||
448 imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
452 rc = ptlrpc_request_handle_eviction(req);
454 CERROR("can't reconnect to %s@%s: %d\n",
455 imp->imp_target_uuid.uuid,
456 imp->imp_connection->c_remote_uuid.uuid, rc);
458 ptlrpc_wake_delayed(imp);
463 if (req->rq_resend) {
464 if (restartp == NULL)
465 LBUG(); /* async resend not supported yet */
466 spin_lock_irqsave (&req->rq_lock, flags);
468 spin_unlock_irqrestore (&req->rq_lock, flags);
470 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
471 DEBUG_REQ(D_HA, req, "resending: ");
475 CERROR("request should be err or resend: %p\n", req);
479 if (req->rq_import->imp_replayable) {
480 spin_lock_irqsave(&imp->imp_lock, flags);
481 if ((req->rq_replay || req->rq_transno != 0) && rc >= 0)
482 ptlrpc_retain_replayable_request(req, imp);
484 if (req->rq_transno > imp->imp_max_transno)
485 imp->imp_max_transno = req->rq_transno;
487 /* Replay-enabled imports return commit-status information. */
488 if (req->rq_repmsg->last_committed) {
489 if (req->rq_repmsg->last_committed <
490 imp->imp_peer_committed_transno) {
491 CERROR("%s went back in time (transno "LPD64
492 " was committed, server claims "LPD64
493 ")! is shared storage not coherent?\n",
494 imp->imp_target_uuid.uuid,
495 imp->imp_peer_committed_transno,
496 req->rq_repmsg->last_committed);
498 imp->imp_peer_committed_transno =
499 req->rq_repmsg->last_committed;
501 ptlrpc_free_committed(imp);
502 spin_unlock_irqrestore(&imp->imp_lock, flags);
508 static int check_set(struct ptlrpc_request_set *set)
511 struct list_head *tmp;
514 if (set->set_remaining == 0)
517 list_for_each(tmp, &set->set_requests) {
518 struct ptlrpc_request *req =
519 list_entry(tmp, struct ptlrpc_request, rq_set_chain);
520 struct obd_import *imp = req->rq_import;
523 if (!(req->rq_phase == RQ_PHASE_RPC ||
524 req->rq_phase == RQ_PHASE_BULK ||
525 req->rq_phase == RQ_PHASE_INTERPRET ||
526 req->rq_phase == RQ_PHASE_COMPLETE)) {
527 DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase);
531 if (req->rq_phase == RQ_PHASE_COMPLETE)
534 if (req->rq_phase == RQ_PHASE_INTERPRET)
535 GOTO (interpret, req->rq_status);
538 ptlrpc_unregister_reply(req);
539 if (req->rq_status == 0)
540 req->rq_status = -EIO;
541 req->rq_phase = RQ_PHASE_INTERPRET;
543 spin_lock_irqsave(&imp->imp_lock, flags);
544 list_del_init(&req->rq_list);
545 spin_unlock_irqrestore(&imp->imp_lock, flags);
547 GOTO (interpret, req->rq_status);
551 /* NB could be on delayed list */
552 ptlrpc_unregister_reply(req);
553 req->rq_status = -EINTR;
554 req->rq_phase = RQ_PHASE_INTERPRET;
556 spin_lock_irqsave(&imp->imp_lock, flags);
557 list_del_init(&req->rq_list);
558 spin_unlock_irqrestore(&imp->imp_lock, flags);
560 GOTO (interpret, req->rq_status);
563 if (req->rq_phase == RQ_PHASE_RPC) {
565 if (req->rq_waiting || req->rq_resend) {
566 spin_lock_irqsave(&imp->imp_lock, flags);
568 if (req->rq_level > imp->imp_level) {
569 spin_unlock_irqrestore(&imp->imp_lock,
574 list_del(&req->rq_list);
575 list_add_tail(&req->rq_list,
576 &imp->imp_sending_list);
577 spin_unlock_irqrestore(&imp->imp_lock, flags);
580 if (req->rq_resend) {
581 lustre_msg_add_flags(req->rq_reqmsg,
583 spin_lock_irqsave(&req->rq_lock, flags);
585 spin_unlock_irqrestore(&req->rq_lock,
587 ptlrpc_unregister_reply(req);
589 ptlrpc_unregister_bulk(req);
592 rc = ptl_send_rpc(req);
595 req->rq_phase = RQ_PHASE_INTERPRET;
596 GOTO (interpret, req->rq_status);
601 /* Ensure the network callback returned */
602 spin_lock_irqsave (&req->rq_lock, flags);
603 if (!req->rq_replied) {
604 spin_unlock_irqrestore (&req->rq_lock, flags);
607 spin_unlock_irqrestore (&req->rq_lock, flags);
609 spin_lock_irqsave(&imp->imp_lock, flags);
610 list_del_init(&req->rq_list);
611 spin_unlock_irqrestore(&imp->imp_lock, flags);
613 req->rq_status = after_reply(req, &do_restart);
615 spin_lock_irqsave (&req->rq_lock, flags);
616 req->rq_resend = 1; /* ugh */
617 spin_unlock_irqrestore (&req->rq_lock, flags);
621 /* If there is no bulk associated with this request,
622 * then we're done and should let the interpreter
623 * process the reply. Similarly if the RPC returned
624 * an error, and therefore the bulk will never arrive.
626 if (req->rq_bulk == NULL || req->rq_status != 0) {
627 req->rq_phase = RQ_PHASE_INTERPRET;
628 GOTO (interpret, req->rq_status);
631 req->rq_phase = RQ_PHASE_BULK;
634 LASSERT (req->rq_phase == RQ_PHASE_BULK);
635 if (!ptlrpc_bulk_complete (req->rq_bulk))
638 req->rq_phase = RQ_PHASE_INTERPRET;
641 LASSERT (req->rq_phase == RQ_PHASE_INTERPRET);
642 LASSERT (!req->rq_receiving_reply);
644 if (req->rq_bulk != NULL)
645 ptlrpc_unregister_bulk (req);
647 if (req->rq_interpret_reply != NULL) {
648 int (*interpreter)(struct ptlrpc_request *,void *,int) =
649 req->rq_interpret_reply;
650 req->rq_status = interpreter(req, &req->rq_async_args,
654 CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:ni:nid:"
655 "opc %s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
656 imp->imp_obd->obd_uuid.uuid, req->rq_reqmsg->status,
658 imp->imp_connection->c_peer.peer_ni->pni_name,
659 imp->imp_connection->c_peer.peer_nid,
660 req->rq_reqmsg->opc);
662 req->rq_phase = RQ_PHASE_COMPLETE;
663 set->set_remaining--;
666 RETURN (set->set_remaining == 0);
669 static int expire_one_request(struct ptlrpc_request *req)
672 struct obd_import *imp = req->rq_import;
675 DEBUG_REQ(D_ERROR, req, "timeout");
677 spin_lock_irqsave (&req->rq_lock, flags);
678 req->rq_timedout = 1;
679 spin_unlock_irqrestore (&req->rq_lock, flags);
681 ptlrpc_unregister_reply (req);
684 DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
688 /* The DLM server doesn't want recovery run on its imports. */
689 if (imp->imp_dlm_fake)
692 /* If this request is for recovery or other primordial tasks,
693 * don't go back to sleep, and don't start recovery again.. */
694 if (req->rq_level < LUSTRE_CONN_FULL || req->rq_no_recov ||
695 imp->imp_obd->obd_no_recov)
698 ptlrpc_fail_import(imp, req->rq_import_generation);
703 static int expired_set(void *data)
705 struct ptlrpc_request_set *set = data;
706 struct list_head *tmp;
707 time_t now = LTIME_S (CURRENT_TIME);
710 LASSERT (set != NULL);
712 /* A timeout expired; see which reqs it applies to... */
713 list_for_each (tmp, &set->set_requests) {
714 struct ptlrpc_request *req =
715 list_entry(tmp, struct ptlrpc_request, rq_set_chain);
717 /* request in-flight? */
718 if (!((req->rq_phase == RQ_PHASE_RPC && !req->rq_waiting) ||
719 (req->rq_phase == RQ_PHASE_BULK)))
722 if (req->rq_timedout || /* already dealt with */
723 req->rq_sent + req->rq_timeout > now) /* not expired */
726 /* deal with this guy */
727 expire_one_request (req);
730 /* When waiting for a whole set, we always to break out of the
731 * sleep so we can recalculate the timeout, or enable interrupts
732 * iff everyone's timed out.
737 static void interrupted_set(void *data)
739 struct ptlrpc_request_set *set = data;
740 struct list_head *tmp;
743 LASSERT (set != NULL);
744 CERROR("INTERRUPTED SET %p\n", set);
746 list_for_each(tmp, &set->set_requests) {
747 struct ptlrpc_request *req =
748 list_entry(tmp, struct ptlrpc_request, rq_set_chain);
750 if (req->rq_phase != RQ_PHASE_RPC)
753 spin_lock_irqsave (&req->rq_lock, flags);
755 spin_unlock_irqrestore (&req->rq_lock, flags);
759 int ptlrpc_set_wait(struct ptlrpc_request_set *set)
761 struct list_head *tmp;
762 struct obd_import *imp;
763 struct ptlrpc_request *req;
764 struct l_wait_info lwi;
772 LASSERT(!list_empty(&set->set_requests));
773 list_for_each(tmp, &set->set_requests) {
774 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
776 LASSERT (req->rq_level == LUSTRE_CONN_FULL);
777 LASSERT (req->rq_phase == RQ_PHASE_NEW);
778 req->rq_phase = RQ_PHASE_RPC;
780 imp = req->rq_import;
781 spin_lock_irqsave(&imp->imp_lock, flags);
783 if (imp->imp_invalid) {
784 spin_unlock_irqrestore(&imp->imp_lock, flags);
785 req->rq_status = -EIO;
786 req->rq_phase = RQ_PHASE_INTERPRET;
790 if (req->rq_level > imp->imp_level) {
791 if (req->rq_no_recov || imp->imp_obd->obd_no_recov ||
793 spin_unlock_irqrestore(&imp->imp_lock, flags);
794 req->rq_status = -EWOULDBLOCK;
795 req->rq_phase = RQ_PHASE_INTERPRET;
799 spin_lock (&req->rq_lock);
801 spin_unlock (&req->rq_lock);
802 LASSERT (list_empty (&req->rq_list));
803 // list_del(&req->rq_list);
804 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
805 spin_unlock_irqrestore(&imp->imp_lock, flags);
809 /* XXX this is the same as ptlrpc_queue_wait */
810 LASSERT(list_empty(&req->rq_list));
811 list_add_tail(&req->rq_list, &imp->imp_sending_list);
812 req->rq_import_generation = imp->imp_generation;
813 spin_unlock_irqrestore(&imp->imp_lock, flags);
815 CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:ni:nid:opc"
816 " %s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
817 imp->imp_obd->obd_uuid.uuid, req->rq_reqmsg->status,
819 imp->imp_connection->c_peer.peer_ni->pni_name,
820 imp->imp_connection->c_peer.peer_nid,
821 req->rq_reqmsg->opc);
823 rc = ptl_send_rpc(req);
826 req->rq_phase = RQ_PHASE_INTERPRET;
831 now = LTIME_S (CURRENT_TIME);
833 list_for_each (tmp, &set->set_requests) {
834 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
836 /* request in-flight? */
837 if (!((req->rq_phase == RQ_PHASE_RPC &&
839 (req->rq_phase == RQ_PHASE_BULK)))
842 if (req->rq_timedout) /* already timed out */
845 deadline = req->rq_sent + req->rq_timeout;
846 if (deadline <= now) /* actually expired already */
847 timeout = 1; /* ASAP */
848 else if (timeout == 0 || timeout > deadline - now)
849 timeout = deadline - now;
852 /* wait until all complete, interrupted, or an in-flight
854 CDEBUG(D_HA, "set %p going to sleep for %d seconds\n",
856 lwi = LWI_TIMEOUT_INTR(timeout ? timeout * HZ : 1,
857 expired_set, interrupted_set, set);
858 rc = l_wait_event(set->set_waitq, check_set(set), &lwi);
860 LASSERT (rc == 0 || rc == -EINTR || rc == -ETIMEDOUT);
862 /* -EINTR => all requests have been flagged rq_intr so next
864 * -ETIMEOUTD => someone timed out. When all reqs have
865 * timed out, signals are enabled allowing completion with
867 * I don't really care if we go once more round the loop in
868 * the error cases -eeb. */
871 LASSERT (set->set_remaining == 0);
874 list_for_each(tmp, &set->set_requests) {
875 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
877 LASSERT (req->rq_phase == RQ_PHASE_COMPLETE);
878 if (req->rq_status != 0)
882 if (set->set_interpret != NULL) {
883 int (*interpreter)(struct ptlrpc_request_set *set,void *,int) =
885 rc = interpreter (set, &set->set_args, rc);
891 static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
894 if (request == NULL) {
899 LASSERT (!request->rq_receiving_reply);
901 /* We must take it off the imp_replay_list first. Otherwise, we'll set
902 * request->rq_reqmsg to NULL while osc_close is dereferencing it. */
903 if (request->rq_import != NULL) {
904 unsigned long flags = 0;
906 spin_lock_irqsave(&request->rq_import->imp_lock, flags);
907 list_del_init(&request->rq_list);
909 spin_unlock_irqrestore(&request->rq_import->imp_lock,
913 if (atomic_read(&request->rq_refcount) != 0) {
914 DEBUG_REQ(D_ERROR, request,
915 "freeing request with nonzero refcount");
919 if (request->rq_repmsg != NULL) {
920 OBD_FREE(request->rq_repmsg, request->rq_replen);
921 request->rq_repmsg = NULL;
923 if (request->rq_reqmsg != NULL) {
924 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
925 request->rq_reqmsg = NULL;
927 if (request->rq_export != NULL) {
928 class_export_put(request->rq_export);
929 request->rq_export = NULL;
931 if (request->rq_import != NULL) {
932 class_import_put(request->rq_import);
933 request->rq_import = NULL;
935 if (request->rq_bulk != NULL)
936 ptlrpc_free_bulk(request->rq_bulk);
938 ptlrpc_put_connection(request->rq_connection);
939 OBD_FREE(request, sizeof(*request));
943 void ptlrpc_free_req(struct ptlrpc_request *request)
945 __ptlrpc_free_req(request, 0);
948 static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
954 if (request == (void *)(long)(0x5a5a5a5a5a5a5a5a)) {
955 CERROR("dereferencing freed request (bug 575)\n");
960 DEBUG_REQ(D_INFO, request, "refcount now %u",
961 atomic_read(&request->rq_refcount) - 1);
963 if (atomic_dec_and_test(&request->rq_refcount)) {
964 __ptlrpc_free_req(request, locked);
971 void ptlrpc_req_finished(struct ptlrpc_request *request)
973 __ptlrpc_req_finished(request, 0);
976 static void ptlrpc_cleanup_request_buf(struct ptlrpc_request *request)
978 OBD_FREE(request->rq_reqmsg, request->rq_reqlen);
979 request->rq_reqmsg = NULL;
980 request->rq_reqlen = 0;
983 /* Disengage the client's reply buffer from the network
984 * NB does _NOT_ unregister any client-side bulk.
985 * IDEMPOTENT, but _not_ safe against concurrent callers.
986 * The request owner (i.e. the thread doing the I/O) must call...
988 void ptlrpc_unregister_reply (struct ptlrpc_request *request)
994 LASSERT (!in_interrupt ()); /* might sleep */
996 spin_lock_irqsave (&request->rq_lock, flags);
997 if (!request->rq_receiving_reply) { /* not waiting for a reply */
998 spin_unlock_irqrestore (&request->rq_lock, flags);
1000 /* NB reply buffer not freed here */
1004 LASSERT (!request->rq_replied); /* callback hasn't completed */
1005 spin_unlock_irqrestore (&request->rq_lock, flags);
1007 rc = PtlMDUnlink (request->rq_reply_md_h);
1012 case PTL_OK: /* unlinked before completion */
1013 LASSERT (request->rq_receiving_reply);
1014 LASSERT (!request->rq_replied);
1015 spin_lock_irqsave (&request->rq_lock, flags);
1016 request->rq_receiving_reply = 0;
1017 spin_unlock_irqrestore (&request->rq_lock, flags);
1018 OBD_FREE(request->rq_repmsg, request->rq_replen);
1019 request->rq_repmsg = NULL;
1023 case PTL_MD_INUSE: /* callback in progress */
1025 /* Network access will complete in finite time but
1026 * the timeout lets us CERROR for visibility */
1027 struct l_wait_info lwi = LWI_TIMEOUT(10*HZ, NULL, NULL);
1029 rc = l_wait_event (request->rq_wait_for_rep,
1030 request->rq_replied, &lwi);
1031 LASSERT (rc == 0 || rc == -ETIMEDOUT);
1033 spin_lock_irqsave (&request->rq_lock, flags);
1034 /* Ensure the callback has completed scheduling
1035 * me and taken its hands off the request */
1036 spin_unlock_irqrestore(&request->rq_lock,flags);
1040 CERROR ("Unexpectedly long timeout: req %p\n", request);
1044 case PTL_INV_MD: /* callback completed */
1045 LASSERT (!request->rq_receiving_reply);
1046 LASSERT (request->rq_replied);
1053 /* caller must hold imp->imp_lock */
1054 void ptlrpc_free_committed(struct obd_import *imp)
1056 struct list_head *tmp, *saved;
1057 struct ptlrpc_request *req;
1058 struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
1061 LASSERT(imp != NULL);
1064 LASSERT(spin_is_locked(&imp->imp_lock));
1067 CDEBUG(D_HA, "%s: committing for last_committed "LPU64"\n",
1068 imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
1070 list_for_each_safe(tmp, saved, &imp->imp_replay_list) {
1071 req = list_entry(tmp, struct ptlrpc_request, rq_list);
1073 /* XXX ok to remove when 1357 resolved - rread 05/29/03 */
1074 LASSERT (req != last_req);
1077 if (req->rq_replay) {
1078 DEBUG_REQ(D_HA, req, "keeping (FL_REPLAY)");
1082 /* not yet committed */
1083 if (req->rq_transno > imp->imp_peer_committed_transno) {
1084 DEBUG_REQ(D_HA, req, "stopping search");
1088 DEBUG_REQ(D_HA, req, "committing (last_committed "LPU64")",
1089 imp->imp_peer_committed_transno);
1090 list_del_init(&req->rq_list);
1091 __ptlrpc_req_finished(req, 1);
1098 void ptlrpc_cleanup_client(struct obd_import *imp)
1105 void ptlrpc_resend_req(struct ptlrpc_request *req)
1107 unsigned long flags;
1109 DEBUG_REQ(D_HA, req, "resending");
1110 req->rq_reqmsg->handle.cookie = 0;
1111 ptlrpc_put_connection(req->rq_connection);
1112 req->rq_connection =
1113 ptlrpc_connection_addref(req->rq_import->imp_connection);
1114 req->rq_status = -EAGAIN;
1116 spin_lock_irqsave (&req->rq_lock, flags);
1118 req->rq_timedout = 0;
1119 if (req->rq_set != NULL)
1120 wake_up (&req->rq_set->set_waitq);
1122 wake_up(&req->rq_wait_for_rep);
1123 spin_unlock_irqrestore (&req->rq_lock, flags);
1126 /* XXX: this function and rq_status are currently unused */
1127 void ptlrpc_restart_req(struct ptlrpc_request *req)
1129 unsigned long flags;
1131 DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request");
1132 req->rq_status = -ERESTARTSYS;
1134 spin_lock_irqsave (&req->rq_lock, flags);
1135 req->rq_restart = 1;
1136 req->rq_timedout = 0;
1137 if (req->rq_set != NULL)
1138 wake_up (&req->rq_set->set_waitq);
1140 wake_up(&req->rq_wait_for_rep);
1141 spin_unlock_irqrestore (&req->rq_lock, flags);
1144 static int expired_request(void *data)
1146 struct ptlrpc_request *req = data;
1149 RETURN(expire_one_request(req));
1152 static void interrupted_request(void *data)
1154 unsigned long flags;
1156 struct ptlrpc_request *req = data;
1157 DEBUG_REQ(D_HA, req, "request interrupted");
1158 spin_lock_irqsave (&req->rq_lock, flags);
1160 spin_unlock_irqrestore (&req->rq_lock, flags);
1163 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
1166 atomic_inc(&req->rq_refcount);
1170 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req,
1171 struct obd_import *imp)
1173 struct list_head *tmp;
1176 LASSERT(spin_is_locked(&imp->imp_lock));
1179 LASSERT(imp->imp_replayable);
1180 /* Balanced in ptlrpc_free_committed, usually. */
1181 ptlrpc_request_addref(req);
1182 list_for_each_prev(tmp, &imp->imp_replay_list) {
1183 struct ptlrpc_request *iter =
1184 list_entry(tmp, struct ptlrpc_request, rq_list);
1186 /* We may have duplicate transnos if we create and then
1187 * open a file, or for closes retained if to match creating
1188 * opens, so use req->rq_xid as a secondary key.
1189 * (See bugs 684, 685, and 428.)
1190 * XXX no longer needed, but all opens need transnos!
1192 if (iter->rq_transno > req->rq_transno)
1195 if (iter->rq_transno == req->rq_transno) {
1196 LASSERT(iter->rq_xid != req->rq_xid);
1197 if (iter->rq_xid > req->rq_xid)
1201 list_add(&req->rq_list, &iter->rq_list);
1205 list_add_tail(&req->rq_list, &imp->imp_replay_list);
1208 int ptlrpc_queue_wait(struct ptlrpc_request *req)
1212 struct l_wait_info lwi;
1213 struct obd_import *imp = req->rq_import;
1214 struct obd_device *obd = imp->imp_obd;
1215 struct ptlrpc_connection *conn = imp->imp_connection;
1221 LASSERT (req->rq_set == NULL);
1222 LASSERT (!req->rq_receiving_reply);
1224 /* for distributed debugging */
1225 req->rq_reqmsg->status = current->pid;
1226 LASSERT(imp->imp_obd != NULL);
1227 CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:ni:nid:opc "
1228 "%s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
1229 imp->imp_obd->obd_uuid.uuid,
1230 req->rq_reqmsg->status, req->rq_xid,
1231 conn->c_peer.peer_ni->pni_name, conn->c_peer.peer_nid,
1232 req->rq_reqmsg->opc);
1234 /* Mark phase here for a little debug help */
1235 req->rq_phase = RQ_PHASE_RPC;
1239 * If the import has been invalidated (such as by an OST failure), the
1240 * request must fail with -EIO. Recovery requests are allowed to go
1241 * through, though, so that they have a chance to revalidate the
1244 spin_lock_irqsave(&imp->imp_lock, flags);
1245 if (req->rq_import->imp_invalid && req->rq_level == LUSTRE_CONN_FULL) {
1246 DEBUG_REQ(D_ERROR, req, "IMP_INVALID:");
1247 spin_unlock_irqrestore(&imp->imp_lock, flags);
1248 GOTO (out, rc = -EIO);
1251 if (req->rq_level > imp->imp_level) {
1252 list_del(&req->rq_list);
1253 if (req->rq_no_recov || obd->obd_no_recov ||
1254 imp->imp_dlm_fake) {
1255 spin_unlock_irqrestore(&imp->imp_lock, flags);
1256 GOTO (out, rc = -EWOULDBLOCK);
1259 list_add_tail(&req->rq_list, &imp->imp_delayed_list);
1260 spin_unlock_irqrestore(&imp->imp_lock, flags);
1262 DEBUG_REQ(D_HA, req, "\"%s\" waiting for recovery: (%d > %d)",
1263 current->comm, req->rq_level, imp->imp_level);
1264 lwi = LWI_INTR(NULL, NULL);
1265 rc = l_wait_event(req->rq_wait_for_rep,
1266 (req->rq_level <= imp->imp_level ||
1269 DEBUG_REQ(D_HA, req, "\"%s\" awake: (%d > %d)",
1270 current->comm, req->rq_level, imp->imp_level);
1272 spin_lock_irqsave(&imp->imp_lock, flags);
1273 list_del_init(&req->rq_list);
1279 spin_unlock_irqrestore(&imp->imp_lock, flags);
1283 CERROR("process %d resumed\n", current->pid);
1286 /* XXX this is the same as ptlrpc_set_wait */
1287 LASSERT(list_empty(&req->rq_list));
1288 list_add_tail(&req->rq_list, &imp->imp_sending_list);
1289 req->rq_import_generation = imp->imp_generation;
1290 spin_unlock_irqrestore(&imp->imp_lock, flags);
1292 rc = ptl_send_rpc(req);
1294 /* The DLM's fake imports want to avoid all forms of
1296 if (imp->imp_dlm_fake) {
1297 spin_lock_irqsave(&imp->imp_lock, flags);
1298 list_del_init(&req->rq_list);
1299 spin_unlock_irqrestore(&imp->imp_lock, flags);
1303 DEBUG_REQ(D_ERROR, req, "send failed (%d); recovering", rc);
1305 ptlrpc_fail_import(imp, req->rq_import_generation);
1307 /* If we've been told to not wait, we're done. */
1308 if (req->rq_level < LUSTRE_CONN_FULL || req->rq_no_recov ||
1309 obd->obd_no_recov) {
1310 spin_lock_irqsave(&imp->imp_lock, flags);
1311 list_del_init(&req->rq_list);
1312 spin_unlock_irqrestore(&imp->imp_lock, flags);
1316 /* If we errored, allow the user to interrupt immediately */
1319 timeout = req->rq_timeout * HZ;
1320 DEBUG_REQ(D_NET, req, "-- sleeping");
1323 lwi = LWI_TIMEOUT_INTR(timeout, expired_request, interrupted_request,
1325 l_wait_event(req->rq_wait_for_rep, ptlrpc_check_reply(req), &lwi);
1328 extern int reply_in_callback(ptl_event_t *ev);
1329 ptl_event_t reply_ev;
1330 PtlEQWait(req->rq_connection->c_peer.peer_ni->pni_reply_in_eq_h,
1332 reply_in_callback(&reply_ev);
1334 LASSERT (reply_ev.mem_desc.user_ptr == (void *)req);
1335 // ptlrpc_check_reply(req);
1336 // not required now it only tests
1340 DEBUG_REQ(D_NET, req, "-- done sleeping");
1342 CDEBUG(D_RPCTRACE, "Completed RPC pname:cluuid:pid:xid:ni:nid:opc "
1343 "%s:%s:%d:"LPU64":%s:"LPX64":%d\n", current->comm,
1344 imp->imp_obd->obd_uuid.uuid,
1345 req->rq_reqmsg->status, req->rq_xid,
1346 conn->c_peer.peer_ni->pni_name, conn->c_peer.peer_nid,
1347 req->rq_reqmsg->opc);
1349 spin_lock_irqsave(&imp->imp_lock, flags);
1350 list_del_init(&req->rq_list);
1351 spin_unlock_irqrestore(&imp->imp_lock, flags);
1353 /* If the reply was received normally, this just grabs the spinlock
1354 * (ensuring the reply callback has returned), sees that
1355 * req->rq_receiving_reply is clear and returns. */
1356 ptlrpc_unregister_reply (req);
1359 GOTO(out, rc = -EIO);
1361 /* Resend if we need to, unless we were interrupted. */
1362 if (req->rq_resend && !req->rq_intr) {
1363 /* ...unless we were specifically told otherwise. */
1364 if (req->rq_no_resend) {
1365 spin_lock_irqsave (&req->rq_lock, flags);
1366 req->rq_no_resend = 0;
1367 spin_unlock_irqrestore (&req->rq_lock, flags);
1368 GOTO(out, rc = -ETIMEDOUT);
1370 spin_lock_irqsave (&req->rq_lock, flags);
1372 spin_unlock_irqrestore (&req->rq_lock, flags);
1373 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT);
1375 if (req->rq_bulk != NULL)
1376 ptlrpc_unregister_bulk (req);
1378 DEBUG_REQ(D_HA, req, "resending: ");
1383 /* Should only be interrupted if we timed out. */
1384 if (!req->rq_timedout)
1385 DEBUG_REQ(D_ERROR, req,
1386 "rq_intr set but rq_timedout not");
1387 GOTO(out, rc = -EINTR);
1390 if (req->rq_timedout) { /* non-recoverable timeout */
1391 GOTO(out, rc = -ETIMEDOUT);
1394 if (!req->rq_replied) {
1395 /* How can this be? -eeb */
1396 DEBUG_REQ(D_ERROR, req, "!rq_replied: ");
1398 GOTO(out, rc = req->rq_status);
1401 rc = after_reply (req, &do_restart);
1402 /* NB may return +ve success rc */
1404 if (req->rq_bulk != NULL)
1405 ptlrpc_unregister_bulk (req);
1406 DEBUG_REQ(D_HA, req, "resending: ");
1411 if (req->rq_bulk != NULL) {
1412 if (rc >= 0) { /* success so far */
1413 lwi = LWI_TIMEOUT(timeout, NULL, NULL);
1414 brc = l_wait_event(req->rq_wait_for_rep,
1415 ptlrpc_bulk_complete(req->rq_bulk),
1418 LASSERT (brc == -ETIMEDOUT);
1419 CERROR ("Timed out waiting for bulk\n");
1424 /* MDS blocks for put ACKs before replying */
1425 /* OSC sets rq_no_resend for the time being */
1426 LASSERT (req->rq_no_resend);
1427 ptlrpc_unregister_bulk (req);
1431 LASSERT (!req->rq_receiving_reply);
1432 req->rq_phase = RQ_PHASE_INTERPRET;
1436 int ptlrpc_replay_req(struct ptlrpc_request *req)
1438 int rc = 0, old_level, old_status = 0;
1439 // struct ptlrpc_client *cli = req->rq_import->imp_client;
1440 struct l_wait_info lwi;
1443 /* I don't touch rq_phase here, so the debug log can show what
1444 * state it was left in */
1446 /* Not handling automatic bulk replay yet (or ever?) */
1447 LASSERT (req->rq_bulk == NULL);
1449 DEBUG_REQ(D_NET, req, "about to replay");
1451 /* Update request's state, since we might have a new connection. */
1452 ptlrpc_put_connection(req->rq_connection);
1453 req->rq_connection =
1454 ptlrpc_connection_addref(req->rq_import->imp_connection);
1456 /* temporarily set request to RECOVD level (reset at out:) */
1457 old_level = req->rq_level;
1458 if (req->rq_replied)
1459 old_status = req->rq_repmsg->status;
1460 req->rq_level = LUSTRE_CONN_RECOVD;
1461 rc = ptl_send_rpc(req);
1463 CERROR("error %d, opcode %d\n", rc, req->rq_reqmsg->opc);
1464 ptlrpc_cleanup_request_buf(req);
1465 // up(&cli->cli_rpc_sem);
1466 GOTO(out, rc = -rc);
1469 CDEBUG(D_OTHER, "-- sleeping\n");
1470 lwi = LWI_INTR(NULL, NULL); /* XXX needs timeout, nested recovery */
1471 l_wait_event(req->rq_wait_for_rep, ptlrpc_check_reply(req), &lwi);
1472 CDEBUG(D_OTHER, "-- done\n");
1474 // up(&cli->cli_rpc_sem);
1476 /* If the reply was received normally, this just grabs the spinlock
1477 * (ensuring the reply callback has returned), sees that
1478 * req->rq_receiving_reply is clear and returns. */
1479 ptlrpc_unregister_reply (req);
1481 if (!req->rq_replied) {
1482 CERROR("Unknown reason for wakeup\n");
1483 /* XXX Phil - I end up here when I kill obdctl */
1484 /* ...that's because signals aren't all masked in
1485 * l_wait_event() -eeb */
1486 GOTO(out, rc = -EINTR);
1490 /* Clear reply swab mask; this is a new reply in sender's byte order */
1491 req->rq_rep_swab_mask = 0;
1493 rc = lustre_unpack_msg(req->rq_repmsg, req->rq_replen);
1495 CERROR("unpack_rep failed: %d\n", rc);
1496 GOTO(out, rc = -EPROTO);
1499 /* FIXME: Enable when BlueArc makes new release */
1500 if (req->rq_repmsg->type != PTL_RPC_MSG_REPLY &&
1501 req->rq_repmsg->type != PTL_RPC_MSG_ERR) {
1502 CERROR("invalid packet type received (type=%u)\n",
1503 req->rq_repmsg->type);
1504 GOTO(out, rc = -EPROTO);
1508 /* The transno had better not change over replay. */
1509 LASSERT(req->rq_reqmsg->transno == req->rq_repmsg->transno);
1511 CDEBUG(D_NET, "got rep "LPD64"\n", req->rq_xid);
1513 /* let the callback do fixups, possibly including in the request */
1514 if (req->rq_replay_cb)
1515 req->rq_replay_cb(req);
1517 if (req->rq_replied && req->rq_repmsg->status != old_status) {
1518 DEBUG_REQ(D_HA, req, "status %d, old was %d",
1519 req->rq_repmsg->status, old_status);
1523 req->rq_level = old_level;
1527 void ptlrpc_abort_inflight(struct obd_import *imp)
1529 unsigned long flags;
1530 struct list_head *tmp, *n;
1533 /* Make sure that no new requests get processed for this import.
1534 * ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
1535 * this flag and then putting requests on sending_list or delayed_list.
1537 spin_lock_irqsave(&imp->imp_lock, flags);
1538 if (!imp->imp_replayable)
1539 /* on b_devel, I moved this line to
1540 ptlrpc_set_import_active because I thought it made
1541 more sense there and possibly not all callers of
1542 this function expect this. I'll leave it here until
1543 I can figure out if it's correct or not. - rread 5/12/03 */
1544 imp->imp_invalid = 1;
1546 /* XXX locking? Maybe we should remove each request with the list
1547 * locked? Also, how do we know if the requests on the list are
1548 * being freed at this time?
1550 list_for_each_safe(tmp, n, &imp->imp_sending_list) {
1551 struct ptlrpc_request *req =
1552 list_entry(tmp, struct ptlrpc_request, rq_list);
1554 DEBUG_REQ(D_HA, req, "inflight");
1556 spin_lock (&req->rq_lock);
1558 if (req->rq_set != NULL)
1559 wake_up(&req->rq_set->set_waitq);
1561 wake_up(&req->rq_wait_for_rep);
1562 spin_unlock (&req->rq_lock);
1565 list_for_each_safe(tmp, n, &imp->imp_delayed_list) {
1566 struct ptlrpc_request *req =
1567 list_entry(tmp, struct ptlrpc_request, rq_list);
1569 DEBUG_REQ(D_HA, req, "aborting waiting req");
1571 spin_lock (&req->rq_lock);
1573 if (req->rq_set != NULL)
1574 wake_up(&req->rq_set->set_waitq);
1576 wake_up(&req->rq_wait_for_rep);
1577 spin_unlock (&req->rq_lock);
1580 /* Last chance to free reqs left on the replay list, but we
1581 * will still leak reqs that haven't comitted. */
1582 if (imp->imp_replayable)
1583 ptlrpc_free_committed(imp);
1585 spin_unlock_irqrestore(&imp->imp_lock, flags);
1590 static __u64 ptlrpc_last_xid = 0;
1591 static spinlock_t ptlrpc_last_xid_lock = SPIN_LOCK_UNLOCKED;
1593 __u64 ptlrpc_next_xid(void)
1596 spin_lock(&ptlrpc_last_xid_lock);
1597 tmp = ++ptlrpc_last_xid;
1598 spin_unlock(&ptlrpc_last_xid_lock);