Whamcloud - gitweb
a1322ac97dd421dbfb30b3ec2900522cbc8565d6
[fs/lustre-release.git] / lustre / ptlrpc / client.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define EXPORT_SYMTAB
24
25 #include <linux/config.h>
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/list.h>
29
30 #define DEBUG_SUBSYSTEM S_RPC
31
32 #include <linux/obd_support.h>
33 #include <linux/obd_class.h>
34 #include <linux/lustre_net.h>
35
36 void ptlrpc_init_client(int dev, int req_portal, int rep_portal,
37                           struct ptlrpc_client *cl)
38 {
39         memset(cl, 0, sizeof(*cl));
40         spin_lock_init(&cl->cli_lock);
41         cl->cli_xid = 1;
42         cl->cli_generation = 1;
43         cl->cli_epoch = 1;
44         cl->cli_bootcount = 0;
45         cl->cli_obd = NULL;
46         cl->cli_request_portal = req_portal;
47         cl->cli_reply_portal = rep_portal;
48         INIT_LIST_HEAD(&cl->cli_sending_head);
49         INIT_LIST_HEAD(&cl->cli_sent_head);
50         sema_init(&cl->cli_rpc_sem, 32);
51 }
52
53 int ptlrpc_connect_client(int dev, char *uuid, struct ptlrpc_client *cl)
54 {
55         int err;
56
57         cl->cli_epoch++;
58         err = kportal_uuid_to_peer(uuid, &cl->cli_server);
59         if (err != 0)
60                 CERROR("cannot find peer %s!\n", uuid);
61
62         return err;
63 }
64
65 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk(struct lustre_peer *peer)
66 {
67         struct ptlrpc_bulk_desc *bulk;
68
69         OBD_ALLOC(bulk, sizeof(*bulk));
70         if (bulk != NULL) {
71                 memcpy(&bulk->b_peer, peer, sizeof(*peer));
72                 init_waitqueue_head(&bulk->b_waitq);
73         }
74
75         return bulk;
76 }
77
78 struct ptlrpc_request *ptlrpc_prep_req(struct ptlrpc_client *cl, int opcode,
79                                        int count, int *lengths, char **bufs)
80 {
81         struct ptlrpc_request *request;
82         int rc;
83         ENTRY;
84
85         OBD_ALLOC(request, sizeof(*request));
86         if (!request) {
87                 CERROR("request allocation out of memory\n");
88                 RETURN(NULL);
89         }
90
91         spin_lock(&cl->cli_lock);
92         request->rq_xid = cl->cli_xid++;
93         spin_unlock(&cl->cli_lock);
94
95         rc = lustre_pack_msg(count, lengths, bufs,
96                              &request->rq_reqlen, &request->rq_reqbuf);
97         if (rc) {
98                 CERROR("cannot pack request %d\n", rc);
99                 RETURN(NULL);
100         }
101         request->rq_type = PTL_RPC_REQUEST;
102         request->rq_reqmsg = (struct lustre_msg *)request->rq_reqbuf;
103         request->rq_reqmsg->opc = HTON__u32(opcode);
104         request->rq_reqmsg->xid = HTON__u32(request->rq_xid);
105         request->rq_reqmsg->type = HTON__u32(request->rq_type);
106
107         RETURN(request);
108 }
109
110 void ptlrpc_free_req(struct ptlrpc_request *request)
111 {
112         if (request == NULL)
113                 return;
114
115         if (request->rq_repbuf != NULL)
116                 OBD_FREE(request->rq_repbuf, request->rq_replen);
117         OBD_FREE(request, sizeof(*request));
118 }
119
120 static int ptlrpc_check_reply(struct ptlrpc_request *req)
121 {
122         int rc = 0;
123
124         if (req->rq_repbuf != NULL) {
125                 req->rq_flags = PTL_RPC_REPLY;
126                 GOTO(out, rc = 1);
127         }
128
129         if (sigismember(&(current->pending.signal), SIGKILL) ||
130             sigismember(&(current->pending.signal), SIGTERM) ||
131             sigismember(&(current->pending.signal), SIGINT)) {
132                 req->rq_flags = PTL_RPC_INTR;
133                 GOTO(out, rc = 1);
134         }
135
136  out:
137         return rc;
138 }
139
140 int ptlrpc_check_status(struct ptlrpc_request *req, int err)
141 {
142         ENTRY;
143
144         if (err != 0) {
145                 CERROR("err is %d\n", err);
146                 RETURN(err);
147         }
148
149         if (req == NULL) {
150                 CERROR("req == NULL\n");
151                 RETURN(-ENOMEM);
152         }
153
154         if (req->rq_repmsg == NULL) {
155                 CERROR("req->rq_repmsg == NULL\n");
156                 RETURN(-ENOMEM);
157         }
158
159         if (req->rq_repmsg->status != 0) {
160                 CERROR("req->rq_repmsg->status is %d\n",
161                        req->rq_repmsg->status);
162                 /* XXX: translate this error from net to host */
163                 RETURN(req->rq_repmsg->status);
164         }
165
166         RETURN(0);
167 }
168
169 static void ptlrpc_cleanup_request_buf(struct ptlrpc_request *request)
170 {
171         OBD_FREE(request->rq_reqbuf, request->rq_reqlen);
172         request->rq_reqbuf = NULL;
173         request->rq_reqlen = 0;
174 }
175
176 /* Abort this request and cleanup any resources associated with it. */
177 static int ptlrpc_abort(struct ptlrpc_request *request)
178 {
179         /* First remove the ME for the reply; in theory, this means
180          * that we can tear down the buffer safely. */
181         PtlMEUnlink(request->rq_reply_me_h);
182         OBD_FREE(request->rq_reply_md.start, request->rq_replen);
183         request->rq_repbuf = NULL;
184         request->rq_replen = 0;
185         return 0;
186 }
187
188 int ptlrpc_queue_wait(struct ptlrpc_client *cl, struct ptlrpc_request *req)
189 {
190         int rc = 0;
191         ENTRY;
192
193         init_waitqueue_head(&req->rq_wait_for_rep);
194
195         req->rq_client = cl;
196         req->rq_req_portal = cl->cli_request_portal;
197         req->rq_reply_portal = cl->cli_reply_portal;
198         rc = ptl_send_rpc(req, cl);
199         if (rc) {
200                 CERROR("error %d, opcode %d\n", rc, req->rq_reqmsg->opc);
201                 ptlrpc_cleanup_request_buf(req);
202                 up(&cl->cli_rpc_sem);
203                 RETURN(-rc);
204         }
205
206         CDEBUG(D_OTHER, "-- sleeping\n");
207         wait_event_interruptible(req->rq_wait_for_rep, ptlrpc_check_reply(req));
208         CDEBUG(D_OTHER, "-- done\n");
209         ptlrpc_cleanup_request_buf(req);
210         up(&cl->cli_rpc_sem);
211         if (req->rq_flags == PTL_RPC_INTR) {
212                 /* Clean up the dangling reply buffers */
213                 ptlrpc_abort(req);
214                 GOTO(out, rc = -EINTR);
215         }
216
217         if (req->rq_flags != PTL_RPC_REPLY) {
218                 CERROR("Unknown reason for wakeup\n");
219                 /* XXX Phil - I end up here when I kill obdctl */
220                 ptlrpc_abort(req);
221                 GOTO(out, rc = -EINTR);
222         }
223
224         rc = lustre_unpack_msg(req->rq_repbuf, req->rq_replen);
225         req->rq_repmsg = (struct lustre_msg *)req->rq_repbuf;
226         if (rc) {
227                 CERROR("unpack_rep failed: %d\n", rc);
228                 GOTO(out, rc);
229         }
230         CDEBUG(D_NET, "got rep %d\n", req->rq_repmsg->xid);
231
232         if (req->rq_repmsg->status == 0)
233                 CDEBUG(D_NET, "--> buf %p len %d status %d\n", req->rq_repbuf,
234                        req->rq_replen, req->rq_repmsg->status);
235
236         EXIT;
237  out:
238         return rc;
239 }