Whamcloud - gitweb
- More changes in the connection handle stuff. We are back to where
[fs/lustre-release.git] / lustre / ptlrpc / niobuf.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  *  Copyright (C) 2002 Cluster File Systems, Inc.
5  *
6  *   This file is part of Lustre, http://www.lustre.org.
7  *
8  *   Lustre is free software; you can redistribute it and/or
9  *   modify it under the terms of version 2 of the GNU General Public
10  *   License as published by the Free Software Foundation.
11  *
12  *   Lustre is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with Lustre; if not, write to the Free Software
19  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20  *
21  */
22
23 #define DEBUG_SUBSYSTEM S_RPC
24
25 #include <linux/lustre_net.h>
26
27 extern ptl_handle_eq_t request_out_eq, reply_in_eq, reply_out_eq,
28         bulk_source_eq, bulk_sink_eq;
29 static ptl_process_id_t local_id = {PTL_NID_ANY, PTL_PID_ANY};
30
31 int ptlrpc_check_bulk_sent(struct ptlrpc_bulk_desc *bulk)
32 {
33         ENTRY;
34
35         if (bulk->b_flags & PTL_BULK_FL_SENT)
36                 RETURN(1);
37
38         if (sigismember(&(current->pending.signal), SIGKILL) ||
39             sigismember(&(current->pending.signal), SIGINT)) {
40                 bulk->b_flags |= PTL_RPC_FL_INTR;
41                 RETURN(1);
42         }
43
44         CDEBUG(D_NET, "no event yet\n");
45         RETURN(0);
46 }
47
48 static int ptl_send_buf(struct ptlrpc_request *request,
49                         struct ptlrpc_connection *conn, int portal)
50 {
51         int rc;
52         ptl_process_id_t remote_id;
53         ptl_handle_md_t md_h;
54         ptl_ack_req_t ack;
55
56         request->rq_req_md.user_ptr = request;
57
58         switch (request->rq_type) {
59         case PTL_RPC_TYPE_REQUEST:
60                 request->rq_req_md.start = request->rq_reqmsg;
61                 request->rq_req_md.length = request->rq_reqlen;
62                 request->rq_req_md.eventq = request_out_eq;
63                 request->rq_req_md.threshold = 1;
64                 ack = PTL_NOACK_REQ;
65                 break;
66         case PTL_RPC_TYPE_REPLY:
67                 request->rq_req_md.start = request->rq_repmsg;
68                 request->rq_req_md.length = request->rq_replen;
69                 request->rq_req_md.eventq = reply_out_eq;
70                 request->rq_req_md.threshold = 1;
71                 ack = PTL_NOACK_REQ;
72                 break;
73         default:
74                 LBUG();
75                 return -1; /* notreached */
76         }
77         request->rq_req_md.options = PTL_MD_OP_PUT;
78         request->rq_req_md.user_ptr = request;
79
80         rc = PtlMDBind(conn->c_peer.peer_ni, request->rq_req_md, &md_h);
81         //CERROR("MDBind (outgoing req/rep/bulk): %Lu\n", (__u64)md_h);
82         if (rc != 0) {
83                 CERROR("PtlMDBind failed: %d\n", rc);
84                 LBUG();
85                 return rc;
86         }
87
88         remote_id.nid = conn->c_peer.peer_nid;
89         remote_id.pid = 0;
90
91         CDEBUG(D_NET, "Sending %d bytes to portal %d, xid %Ld\n",
92                request->rq_req_md.length, portal, request->rq_xid);
93
94         rc = PtlPut(md_h, ack, remote_id, portal, 0, request->rq_xid,
95                     0, 0);
96         if (rc != PTL_OK) {
97                 CERROR("PtlPut(%Lu, %d, %Ld) failed: %d\n", remote_id.nid,
98                        portal, request->rq_xid, rc);
99                 PtlMDUnlink(md_h);
100         }
101
102         return rc;
103 }
104
105 int ptlrpc_send_bulk(struct ptlrpc_bulk_desc *desc)
106 {
107         int rc;
108         struct list_head *tmp, *next;
109         ptl_process_id_t remote_id;
110         ENTRY;
111
112         list_for_each_safe(tmp, next, &desc->b_page_list) {
113                 struct ptlrpc_bulk_page *bulk;
114                 bulk = list_entry(tmp, struct ptlrpc_bulk_page, b_link);
115
116                 bulk->b_md.start = bulk->b_buf;
117                 bulk->b_md.length = bulk->b_buflen;
118                 bulk->b_md.eventq = bulk_source_eq;
119                 bulk->b_md.threshold = 2; /* SENT and ACK */
120                 bulk->b_md.options = PTL_MD_OP_PUT;
121                 bulk->b_md.user_ptr = bulk;
122
123                 rc = PtlMDBind(desc->b_connection->c_peer.peer_ni, bulk->b_md,
124                                &bulk->b_md_h);
125                 if (rc != 0) {
126                         CERROR("PtlMDBind failed: %d\n", rc);
127                         LBUG();
128                         RETURN(rc);
129                 }
130
131                 remote_id.nid = desc->b_connection->c_peer.peer_nid;
132                 remote_id.pid = 0;
133
134                 CDEBUG(D_NET, "Sending %d bytes to portal %d, xid %d\n",
135                        bulk->b_md.length, desc->b_portal, bulk->b_xid);
136
137                 rc = PtlPut(bulk->b_md_h, PTL_ACK_REQ, remote_id,
138                             desc->b_portal, 0, bulk->b_xid, 0, 0);
139                 if (rc != PTL_OK) {
140                         CERROR("PtlPut(%Lu, %d, %d) failed: %d\n",
141                                remote_id.nid, desc->b_portal, bulk->b_xid, rc);
142                         PtlMDUnlink(bulk->b_md_h);
143                         LBUG();
144                         RETURN(rc);
145                 }
146         }
147
148         RETURN(0);
149 }
150
151 int ptlrpc_register_bulk(struct ptlrpc_bulk_desc *desc)
152 {
153         struct list_head *tmp, *next;
154         int rc;
155         ENTRY;
156
157         list_for_each_safe(tmp, next, &desc->b_page_list) {
158                 struct ptlrpc_bulk_page *bulk;
159                 bulk = list_entry(tmp, struct ptlrpc_bulk_page, b_link);
160
161                 rc = PtlMEAttach(desc->b_connection->c_peer.peer_ni,
162                                  desc->b_portal, local_id, bulk->b_xid, 0,
163                                  PTL_UNLINK, PTL_INS_AFTER, &bulk->b_me_h);
164                 if (rc != PTL_OK) {
165                         CERROR("PtlMEAttach failed: %d\n", rc);
166                         LBUG();
167                         GOTO(cleanup, rc);
168                 }
169
170                 bulk->b_md.start = bulk->b_buf;
171                 bulk->b_md.length = bulk->b_buflen;
172                 bulk->b_md.threshold = 1;
173                 bulk->b_md.options = PTL_MD_OP_PUT;
174                 bulk->b_md.user_ptr = bulk;
175                 bulk->b_md.eventq = bulk_sink_eq;
176
177                 rc = PtlMDAttach(bulk->b_me_h, bulk->b_md, PTL_UNLINK,
178                                  &bulk->b_md_h);
179                 if (rc != PTL_OK) {
180                         CERROR("PtlMDAttach failed: %d\n", rc);
181                         LBUG();
182                         GOTO(cleanup, rc);
183                 }
184
185                 CDEBUG(D_NET, "Setup bulk sink buffer: %u bytes, xid %u, "
186                        "portal %u\n", bulk->b_buflen, bulk->b_xid,
187                        desc->b_portal);
188         }
189
190         RETURN(0);
191
192  cleanup:
193         ptlrpc_abort_bulk(desc);
194
195         return rc;
196 }
197
198 int ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc)
199 {
200         struct list_head *tmp, *next;
201
202         list_for_each_safe(tmp, next, &desc->b_page_list) {
203                 struct ptlrpc_bulk_page *bulk;
204                 bulk = list_entry(tmp, struct ptlrpc_bulk_page, b_link);
205
206                 /* This should be safe: these handles are initialized to be
207                  * invalid in ptlrpc_prep_bulk_page() */
208                 PtlMDUnlink(bulk->b_md_h);
209                 PtlMEUnlink(bulk->b_me_h);
210         }
211
212         return 0;
213 }
214
215 int ptlrpc_reply(struct ptlrpc_service *svc, struct ptlrpc_request *req)
216 {
217         /* FIXME: we need to increment the count of handled events */
218         req->rq_type = PTL_RPC_TYPE_REPLY;
219         //req->rq_repmsg->conn = req->rq_connection->c_remote_conn;
220         //req->rq_repmsg->token = req->rq_connection->c_remote_token;
221         req->rq_repmsg->status = HTON__u32(req->rq_status);
222         req->rq_reqmsg->type = HTON__u32(req->rq_type);
223         return ptl_send_buf(req, req->rq_connection, svc->srv_rep_portal);
224 }
225
226 int ptlrpc_error(struct ptlrpc_service *svc, struct ptlrpc_request *req)
227 {
228         int rc;
229         ENTRY;
230
231         if (req->rq_repmsg) {
232                 CERROR("req already has repmsg\n");
233                 LBUG();
234         }
235
236         rc = lustre_pack_msg(0, NULL, NULL, &req->rq_replen, &req->rq_repmsg);
237         if (rc)
238                 RETURN(rc);
239
240         req->rq_repmsg->type = HTON__u32(PTL_RPC_MSG_ERR);
241
242         rc = ptlrpc_reply(svc, req);
243         RETURN(rc);
244 }
245
246
247 int ptl_send_rpc(struct ptlrpc_request *request)
248 {
249         int rc;
250         char *repbuf;
251
252         ENTRY;
253
254         if (NTOH__u32(request->rq_reqmsg->type) != PTL_RPC_MSG_REQUEST) {
255                 CERROR("wrong packet type sent %d\n",
256                        NTOH__u32(request->rq_reqmsg->type));
257                 LBUG();
258                 RETURN(EINVAL);
259         }
260         if (request->rq_replen == 0) {
261                 CERROR("request->rq_replen is 0!\n");
262                 RETURN(EINVAL);
263         }
264
265         /* request->rq_repmsg is set only when the reply comes in, in
266          * client_packet_callback() */
267         if (request->rq_reply_md.start)
268                 OBD_FREE(request->rq_reply_md.start, request->rq_replen);
269
270         OBD_ALLOC(repbuf, request->rq_replen);
271         if (!repbuf) {
272                 LBUG();
273                 RETURN(ENOMEM);
274         }
275
276         down(&request->rq_client->cli_rpc_sem);
277
278         rc = PtlMEAttach(request->rq_connection->c_peer.peer_ni,
279                          request->rq_client->cli_reply_portal,
280                          local_id, request->rq_xid, 0, PTL_UNLINK,
281                          PTL_INS_AFTER, &request->rq_reply_me_h);
282         if (rc != PTL_OK) {
283                 CERROR("PtlMEAttach failed: %d\n", rc);
284                 LBUG();
285                 GOTO(cleanup, rc);
286         }
287
288         request->rq_type = PTL_RPC_TYPE_REQUEST;
289         request->rq_reply_md.start = repbuf;
290         request->rq_reply_md.length = request->rq_replen;
291         request->rq_reply_md.threshold = 1;
292         request->rq_reply_md.options = PTL_MD_OP_PUT;
293         request->rq_reply_md.user_ptr = request;
294         request->rq_reply_md.eventq = reply_in_eq;
295
296         rc = PtlMDAttach(request->rq_reply_me_h, request->rq_reply_md,
297                          PTL_UNLINK, &request->rq_reply_md_h);
298         if (rc != PTL_OK) {
299                 CERROR("PtlMDAttach failed: %d\n", rc);
300                 LBUG();
301                 GOTO(cleanup2, rc);
302         }
303
304         CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid %Lu, portal %u\n",
305                request->rq_replen, request->rq_xid,
306                request->rq_client->cli_reply_portal);
307
308         rc = ptl_send_buf(request, request->rq_connection,
309                           request->rq_client->cli_request_portal);
310         RETURN(rc);
311
312  cleanup2:
313         PtlMEUnlink(request->rq_reply_me_h);
314  cleanup:
315         OBD_FREE(repbuf, request->rq_replen);
316         up(&request->rq_client->cli_rpc_sem);
317
318         return rc;
319 }
320
321 void ptlrpc_link_svc_me(struct ptlrpc_service *service, int i)
322 {
323         int rc;
324         ptl_md_t dummy;
325         ptl_handle_md_t md_h;
326
327         /* Attach the leading ME on which we build the ring */
328         rc = PtlMEAttach(service->srv_self.peer_ni, service->srv_req_portal,
329                          local_id, 0, ~0, PTL_RETAIN, PTL_INS_BEFORE,
330                          &(service->srv_me_h[i]));
331         if (rc != PTL_OK) {
332                 CERROR("PtlMEAttach failed: %d\n", rc);
333                 LBUG();
334         }
335         
336         if (service->srv_ref_count[i])
337                 LBUG();
338
339         dummy.start         = service->srv_buf[i];
340         dummy.length        = service->srv_buf_size;
341         dummy.max_offset    = service->srv_buf_size;
342         dummy.threshold     = PTL_MD_THRESH_INF;
343         dummy.options       = PTL_MD_OP_PUT | PTL_MD_AUTO_UNLINK;
344         dummy.user_ptr      = service;
345         dummy.eventq        = service->srv_eq_h;
346         dummy.max_offset    = service->srv_buf_size;
347         
348         rc = PtlMDAttach(service->srv_me_h[i], dummy, PTL_UNLINK, &md_h);
349         if (rc != PTL_OK) {
350                 /* cleanup */
351                 CERROR("PtlMDAttach failed: %d\n", rc);
352                 LBUG();
353         }
354 }        
355
356 /* ptl_handled_rpc() should be called by the sleeping process once
357  * it finishes processing an event.  This ensures the ref count is
358  * decremented and that the rpc ring buffer cycles properly.
359  */ 
360 int ptl_handled_rpc(struct ptlrpc_service *service, void *start) 
361 {
362         int index;
363
364         spin_lock(&service->srv_lock);
365         for (index = 0; index < service->srv_ring_length; index++)
366                 if (service->srv_buf[index] == start) 
367                         break;
368
369         if (index == service->srv_ring_length)
370                 LBUG();
371
372         CDEBUG(D_INFO, "MD index=%d Ref Count=%d\n", index,
373                service->srv_ref_count[index]);
374         service->srv_ref_count[index]--;
375
376         if (service->srv_ref_count[index] < 0)
377                 LBUG();
378
379         if (service->srv_ref_count[index] == 0 &&
380             !ptl_is_valid_handle(&(service->srv_me_h[index]))) {
381                 CDEBUG(D_NET, "relinking %d\n", index); 
382                 ptlrpc_link_svc_me(service, index); 
383         }
384         
385         spin_unlock(&service->srv_lock);
386         return 0;
387 }