Whamcloud - gitweb
LU-1028 osc: fix grant checking on the osc side
[fs/lustre-release.git] / lustre / osc / osc_request.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * Copyright (c) 2011 Whamcloud, Inc.
34  */
35 /*
36  * This file is part of Lustre, http://www.lustre.org/
37  * Lustre is a trademark of Sun Microsystems, Inc.
38  */
39
40 #ifndef EXPORT_SYMTAB
41 # define EXPORT_SYMTAB
42 #endif
43 #define DEBUG_SUBSYSTEM S_OSC
44
45 #include <libcfs/libcfs.h>
46
47 #ifndef __KERNEL__
48 # include <liblustre.h>
49 #endif
50
51 #include <lustre_dlm.h>
52 #include <lustre_net.h>
53 #include <lustre/lustre_user.h>
54 #include <obd_cksum.h>
55 #include <obd_ost.h>
56 #include <obd_lov.h>
57
58 #ifdef  __CYGWIN__
59 # include <ctype.h>
60 #endif
61
62 #include <lustre_ha.h>
63 #include <lprocfs_status.h>
64 #include <lustre_log.h>
65 #include <lustre_debug.h>
66 #include <lustre_param.h>
67 #include "osc_internal.h"
68
69 static void osc_release_ppga(struct brw_page **ppga, obd_count count);
70 static int brw_interpret(const struct lu_env *env,
71                          struct ptlrpc_request *req, void *data, int rc);
72 static void osc_check_rpcs0(const struct lu_env *env, struct client_obd *cli,
73                             int ptlrpc);
74 int osc_cleanup(struct obd_device *obd);
75
76 /* Pack OSC object metadata for disk storage (LE byte order). */
77 static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
78                       struct lov_stripe_md *lsm)
79 {
80         int lmm_size;
81         ENTRY;
82
83         lmm_size = sizeof(**lmmp);
84         if (!lmmp)
85                 RETURN(lmm_size);
86
87         if (*lmmp && !lsm) {
88                 OBD_FREE(*lmmp, lmm_size);
89                 *lmmp = NULL;
90                 RETURN(0);
91         }
92
93         if (!*lmmp) {
94                 OBD_ALLOC(*lmmp, lmm_size);
95                 if (!*lmmp)
96                         RETURN(-ENOMEM);
97         }
98
99         if (lsm) {
100                 LASSERT(lsm->lsm_object_id);
101                 LASSERT_SEQ_IS_MDT(lsm->lsm_object_seq);
102                 (*lmmp)->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
103                 (*lmmp)->lmm_object_seq = cpu_to_le64(lsm->lsm_object_seq);
104         }
105
106         RETURN(lmm_size);
107 }
108
109 /* Unpack OSC object metadata from disk storage (LE byte order). */
110 static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
111                         struct lov_mds_md *lmm, int lmm_bytes)
112 {
113         int lsm_size;
114         struct obd_import *imp = class_exp2cliimp(exp);
115         ENTRY;
116
117         if (lmm != NULL) {
118                 if (lmm_bytes < sizeof (*lmm)) {
119                         CERROR("lov_mds_md too small: %d, need %d\n",
120                                lmm_bytes, (int)sizeof(*lmm));
121                         RETURN(-EINVAL);
122                 }
123                 /* XXX LOV_MAGIC etc check? */
124
125                 if (lmm->lmm_object_id == 0) {
126                         CERROR("lov_mds_md: zero lmm_object_id\n");
127                         RETURN(-EINVAL);
128                 }
129         }
130
131         lsm_size = lov_stripe_md_size(1);
132         if (lsmp == NULL)
133                 RETURN(lsm_size);
134
135         if (*lsmp != NULL && lmm == NULL) {
136                 OBD_FREE((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
137                 OBD_FREE(*lsmp, lsm_size);
138                 *lsmp = NULL;
139                 RETURN(0);
140         }
141
142         if (*lsmp == NULL) {
143                 OBD_ALLOC(*lsmp, lsm_size);
144                 if (*lsmp == NULL)
145                         RETURN(-ENOMEM);
146                 OBD_ALLOC((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
147                 if ((*lsmp)->lsm_oinfo[0] == NULL) {
148                         OBD_FREE(*lsmp, lsm_size);
149                         RETURN(-ENOMEM);
150                 }
151                 loi_init((*lsmp)->lsm_oinfo[0]);
152         }
153
154         if (lmm != NULL) {
155                 /* XXX zero *lsmp? */
156                 (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
157                 (*lsmp)->lsm_object_seq = le64_to_cpu (lmm->lmm_object_seq);
158                 LASSERT((*lsmp)->lsm_object_id);
159                 LASSERT_SEQ_IS_MDT((*lsmp)->lsm_object_seq);
160         }
161
162         if (imp != NULL &&
163             (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES))
164                 (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes;
165         else
166                 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
167
168         RETURN(lsm_size);
169 }
170
171 static inline void osc_pack_capa(struct ptlrpc_request *req,
172                                  struct ost_body *body, void *capa)
173 {
174         struct obd_capa *oc = (struct obd_capa *)capa;
175         struct lustre_capa *c;
176
177         if (!capa)
178                 return;
179
180         c = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1);
181         LASSERT(c);
182         capa_cpy(c, oc);
183         body->oa.o_valid |= OBD_MD_FLOSSCAPA;
184         DEBUG_CAPA(D_SEC, c, "pack");
185 }
186
187 static inline void osc_pack_req_body(struct ptlrpc_request *req,
188                                      struct obd_info *oinfo)
189 {
190         struct ost_body *body;
191
192         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
193         LASSERT(body);
194
195         lustre_set_wire_obdo(&body->oa, oinfo->oi_oa);
196         osc_pack_capa(req, body, oinfo->oi_capa);
197 }
198
199 static inline void osc_set_capa_size(struct ptlrpc_request *req,
200                                      const struct req_msg_field *field,
201                                      struct obd_capa *oc)
202 {
203         if (oc == NULL)
204                 req_capsule_set_size(&req->rq_pill, field, RCL_CLIENT, 0);
205         else
206                 /* it is already calculated as sizeof struct obd_capa */
207                 ;
208 }
209
210 static int osc_getattr_interpret(const struct lu_env *env,
211                                  struct ptlrpc_request *req,
212                                  struct osc_async_args *aa, int rc)
213 {
214         struct ost_body *body;
215         ENTRY;
216
217         if (rc != 0)
218                 GOTO(out, rc);
219
220         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
221         if (body) {
222                 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
223                 lustre_get_wire_obdo(aa->aa_oi->oi_oa, &body->oa);
224
225                 /* This should really be sent by the OST */
226                 aa->aa_oi->oi_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
227                 aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
228         } else {
229                 CDEBUG(D_INFO, "can't unpack ost_body\n");
230                 rc = -EPROTO;
231                 aa->aa_oi->oi_oa->o_valid = 0;
232         }
233 out:
234         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
235         RETURN(rc);
236 }
237
238 static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
239                              struct ptlrpc_request_set *set)
240 {
241         struct ptlrpc_request *req;
242         struct osc_async_args *aa;
243         int                    rc;
244         ENTRY;
245
246         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
247         if (req == NULL)
248                 RETURN(-ENOMEM);
249
250         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
251         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
252         if (rc) {
253                 ptlrpc_request_free(req);
254                 RETURN(rc);
255         }
256
257         osc_pack_req_body(req, oinfo);
258
259         ptlrpc_request_set_replen(req);
260         req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret;
261
262         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
263         aa = ptlrpc_req_async_args(req);
264         aa->aa_oi = oinfo;
265
266         ptlrpc_set_add_req(set, req);
267         RETURN(0);
268 }
269
270 static int osc_getattr(struct obd_export *exp, struct obd_info *oinfo)
271 {
272         struct ptlrpc_request *req;
273         struct ost_body       *body;
274         int                    rc;
275         ENTRY;
276
277         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
278         if (req == NULL)
279                 RETURN(-ENOMEM);
280
281         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
282         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
283         if (rc) {
284                 ptlrpc_request_free(req);
285                 RETURN(rc);
286         }
287
288         osc_pack_req_body(req, oinfo);
289
290         ptlrpc_request_set_replen(req);
291
292         rc = ptlrpc_queue_wait(req);
293         if (rc)
294                 GOTO(out, rc);
295
296         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
297         if (body == NULL)
298                 GOTO(out, rc = -EPROTO);
299
300         CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
301         lustre_get_wire_obdo(oinfo->oi_oa, &body->oa);
302
303         /* This should really be sent by the OST */
304         oinfo->oi_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
305         oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
306
307         EXIT;
308  out:
309         ptlrpc_req_finished(req);
310         return rc;
311 }
312
313 static int osc_setattr(struct obd_export *exp, struct obd_info *oinfo,
314                        struct obd_trans_info *oti)
315 {
316         struct ptlrpc_request *req;
317         struct ost_body       *body;
318         int                    rc;
319         ENTRY;
320
321         LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
322
323         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
324         if (req == NULL)
325                 RETURN(-ENOMEM);
326
327         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
328         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
329         if (rc) {
330                 ptlrpc_request_free(req);
331                 RETURN(rc);
332         }
333
334         osc_pack_req_body(req, oinfo);
335
336         ptlrpc_request_set_replen(req);
337
338         rc = ptlrpc_queue_wait(req);
339         if (rc)
340                 GOTO(out, rc);
341
342         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
343         if (body == NULL)
344                 GOTO(out, rc = -EPROTO);
345
346         lustre_get_wire_obdo(oinfo->oi_oa, &body->oa);
347
348         EXIT;
349 out:
350         ptlrpc_req_finished(req);
351         RETURN(rc);
352 }
353
354 static int osc_setattr_interpret(const struct lu_env *env,
355                                  struct ptlrpc_request *req,
356                                  struct osc_setattr_args *sa, int rc)
357 {
358         struct ost_body *body;
359         ENTRY;
360
361         if (rc != 0)
362                 GOTO(out, rc);
363
364         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
365         if (body == NULL)
366                 GOTO(out, rc = -EPROTO);
367
368         lustre_get_wire_obdo(sa->sa_oa, &body->oa);
369 out:
370         rc = sa->sa_upcall(sa->sa_cookie, rc);
371         RETURN(rc);
372 }
373
374 int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
375                            struct obd_trans_info *oti,
376                            obd_enqueue_update_f upcall, void *cookie,
377                            struct ptlrpc_request_set *rqset)
378 {
379         struct ptlrpc_request   *req;
380         struct osc_setattr_args *sa;
381         int                      rc;
382         ENTRY;
383
384         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
385         if (req == NULL)
386                 RETURN(-ENOMEM);
387
388         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
389         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
390         if (rc) {
391                 ptlrpc_request_free(req);
392                 RETURN(rc);
393         }
394
395         if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
396                 oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
397
398         osc_pack_req_body(req, oinfo);
399
400         ptlrpc_request_set_replen(req);
401
402         /* do mds to ost setattr asynchronously */
403         if (!rqset) {
404                 /* Do not wait for response. */
405                 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
406         } else {
407                 req->rq_interpret_reply =
408                         (ptlrpc_interpterer_t)osc_setattr_interpret;
409
410                 CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args));
411                 sa = ptlrpc_req_async_args(req);
412                 sa->sa_oa = oinfo->oi_oa;
413                 sa->sa_upcall = upcall;
414                 sa->sa_cookie = cookie;
415
416                 if (rqset == PTLRPCD_SET)
417                         ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
418                 else
419                         ptlrpc_set_add_req(rqset, req);
420         }
421
422         RETURN(0);
423 }
424
425 static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
426                              struct obd_trans_info *oti,
427                              struct ptlrpc_request_set *rqset)
428 {
429         return osc_setattr_async_base(exp, oinfo, oti,
430                                       oinfo->oi_cb_up, oinfo, rqset);
431 }
432
433 int osc_real_create(struct obd_export *exp, struct obdo *oa,
434                     struct lov_stripe_md **ea, struct obd_trans_info *oti)
435 {
436         struct ptlrpc_request *req;
437         struct ost_body       *body;
438         struct lov_stripe_md  *lsm;
439         int                    rc;
440         ENTRY;
441
442         LASSERT(oa);
443         LASSERT(ea);
444
445         lsm = *ea;
446         if (!lsm) {
447                 rc = obd_alloc_memmd(exp, &lsm);
448                 if (rc < 0)
449                         RETURN(rc);
450         }
451
452         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
453         if (req == NULL)
454                 GOTO(out, rc = -ENOMEM);
455
456         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
457         if (rc) {
458                 ptlrpc_request_free(req);
459                 GOTO(out, rc);
460         }
461
462         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
463         LASSERT(body);
464         lustre_set_wire_obdo(&body->oa, oa);
465
466         ptlrpc_request_set_replen(req);
467
468         if ((oa->o_valid & OBD_MD_FLFLAGS) &&
469             oa->o_flags == OBD_FL_DELORPHAN) {
470                 DEBUG_REQ(D_HA, req,
471                           "delorphan from OST integration");
472                 /* Don't resend the delorphan req */
473                 req->rq_no_resend = req->rq_no_delay = 1;
474         }
475
476         rc = ptlrpc_queue_wait(req);
477         if (rc)
478                 GOTO(out_req, rc);
479
480         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
481         if (body == NULL)
482                 GOTO(out_req, rc = -EPROTO);
483
484         lustre_get_wire_obdo(oa, &body->oa);
485
486         /* This should really be sent by the OST */
487         oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
488         oa->o_valid |= OBD_MD_FLBLKSZ;
489
490         /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
491          * have valid lsm_oinfo data structs, so don't go touching that.
492          * This needs to be fixed in a big way.
493          */
494         lsm->lsm_object_id = oa->o_id;
495         lsm->lsm_object_seq = oa->o_seq;
496         *ea = lsm;
497
498         if (oti != NULL) {
499                 oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
500
501                 if (oa->o_valid & OBD_MD_FLCOOKIE) {
502                         if (!oti->oti_logcookies)
503                                 oti_alloc_cookies(oti, 1);
504                         *oti->oti_logcookies = oa->o_lcookie;
505                 }
506         }
507
508         CDEBUG(D_HA, "transno: "LPD64"\n",
509                lustre_msg_get_transno(req->rq_repmsg));
510 out_req:
511         ptlrpc_req_finished(req);
512 out:
513         if (rc && !*ea)
514                 obd_free_memmd(exp, &lsm);
515         RETURN(rc);
516 }
517
518 int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
519                    obd_enqueue_update_f upcall, void *cookie,
520                    struct ptlrpc_request_set *rqset)
521 {
522         struct ptlrpc_request   *req;
523         struct osc_setattr_args *sa;
524         struct ost_body         *body;
525         int                      rc;
526         ENTRY;
527
528         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
529         if (req == NULL)
530                 RETURN(-ENOMEM);
531
532         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
533         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
534         if (rc) {
535                 ptlrpc_request_free(req);
536                 RETURN(rc);
537         }
538         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
539         ptlrpc_at_set_req_timeout(req);
540
541         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
542         LASSERT(body);
543         lustre_set_wire_obdo(&body->oa, oinfo->oi_oa);
544         osc_pack_capa(req, body, oinfo->oi_capa);
545
546         ptlrpc_request_set_replen(req);
547
548
549         req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
550         CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args));
551         sa = ptlrpc_req_async_args(req);
552         sa->sa_oa     = oinfo->oi_oa;
553         sa->sa_upcall = upcall;
554         sa->sa_cookie = cookie;
555         if (rqset == PTLRPCD_SET)
556                 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
557         else
558                 ptlrpc_set_add_req(rqset, req);
559
560         RETURN(0);
561 }
562
563 static int osc_punch(struct obd_export *exp, struct obd_info *oinfo,
564                      struct obd_trans_info *oti,
565                      struct ptlrpc_request_set *rqset)
566 {
567         oinfo->oi_oa->o_size   = oinfo->oi_policy.l_extent.start;
568         oinfo->oi_oa->o_blocks = oinfo->oi_policy.l_extent.end;
569         oinfo->oi_oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
570         return osc_punch_base(exp, oinfo,
571                               oinfo->oi_cb_up, oinfo, rqset);
572 }
573
574 static int osc_sync_interpret(const struct lu_env *env,
575                               struct ptlrpc_request *req,
576                               void *arg, int rc)
577 {
578         struct osc_async_args *aa = arg;
579         struct ost_body *body;
580         ENTRY;
581
582         if (rc)
583                 GOTO(out, rc);
584
585         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
586         if (body == NULL) {
587                 CERROR ("can't unpack ost_body\n");
588                 GOTO(out, rc = -EPROTO);
589         }
590
591         *aa->aa_oi->oi_oa = body->oa;
592 out:
593         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
594         RETURN(rc);
595 }
596
597 static int osc_sync(struct obd_export *exp, struct obd_info *oinfo,
598                     obd_size start, obd_size end,
599                     struct ptlrpc_request_set *set)
600 {
601         struct ptlrpc_request *req;
602         struct ost_body       *body;
603         struct osc_async_args *aa;
604         int                    rc;
605         ENTRY;
606
607         if (!oinfo->oi_oa) {
608                 CDEBUG(D_INFO, "oa NULL\n");
609                 RETURN(-EINVAL);
610         }
611
612         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
613         if (req == NULL)
614                 RETURN(-ENOMEM);
615
616         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
617         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
618         if (rc) {
619                 ptlrpc_request_free(req);
620                 RETURN(rc);
621         }
622
623         /* overload the size and blocks fields in the oa with start/end */
624         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
625         LASSERT(body);
626         lustre_set_wire_obdo(&body->oa, oinfo->oi_oa);
627         body->oa.o_size = start;
628         body->oa.o_blocks = end;
629         body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
630         osc_pack_capa(req, body, oinfo->oi_capa);
631
632         ptlrpc_request_set_replen(req);
633         req->rq_interpret_reply = osc_sync_interpret;
634
635         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
636         aa = ptlrpc_req_async_args(req);
637         aa->aa_oi = oinfo;
638
639         ptlrpc_set_add_req(set, req);
640         RETURN (0);
641 }
642
643 /* Find and cancel locally locks matched by @mode in the resource found by
644  * @objid. Found locks are added into @cancel list. Returns the amount of
645  * locks added to @cancels list. */
646 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
647                                    cfs_list_t *cancels,
648                                    ldlm_mode_t mode, int lock_flags)
649 {
650         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
651         struct ldlm_res_id res_id;
652         struct ldlm_resource *res;
653         int count;
654         ENTRY;
655
656         osc_build_res_name(oa->o_id, oa->o_seq, &res_id);
657         res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
658         if (res == NULL)
659                 RETURN(0);
660
661         LDLM_RESOURCE_ADDREF(res);
662         count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
663                                            lock_flags, 0, NULL);
664         LDLM_RESOURCE_DELREF(res);
665         ldlm_resource_putref(res);
666         RETURN(count);
667 }
668
669 static int osc_destroy_interpret(const struct lu_env *env,
670                                  struct ptlrpc_request *req, void *data,
671                                  int rc)
672 {
673         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
674
675         cfs_atomic_dec(&cli->cl_destroy_in_flight);
676         cfs_waitq_signal(&cli->cl_destroy_waitq);
677         return 0;
678 }
679
680 static int osc_can_send_destroy(struct client_obd *cli)
681 {
682         if (cfs_atomic_inc_return(&cli->cl_destroy_in_flight) <=
683             cli->cl_max_rpcs_in_flight) {
684                 /* The destroy request can be sent */
685                 return 1;
686         }
687         if (cfs_atomic_dec_return(&cli->cl_destroy_in_flight) <
688             cli->cl_max_rpcs_in_flight) {
689                 /*
690                  * The counter has been modified between the two atomic
691                  * operations.
692                  */
693                 cfs_waitq_signal(&cli->cl_destroy_waitq);
694         }
695         return 0;
696 }
697
698 /* Destroy requests can be async always on the client, and we don't even really
699  * care about the return code since the client cannot do anything at all about
700  * a destroy failure.
701  * When the MDS is unlinking a filename, it saves the file objects into a
702  * recovery llog, and these object records are cancelled when the OST reports
703  * they were destroyed and sync'd to disk (i.e. transaction committed).
704  * If the client dies, or the OST is down when the object should be destroyed,
705  * the records are not cancelled, and when the OST reconnects to the MDS next,
706  * it will retrieve the llog unlink logs and then sends the log cancellation
707  * cookies to the MDS after committing destroy transactions. */
708 static int osc_destroy(struct obd_export *exp, struct obdo *oa,
709                        struct lov_stripe_md *ea, struct obd_trans_info *oti,
710                        struct obd_export *md_export, void *capa)
711 {
712         struct client_obd     *cli = &exp->exp_obd->u.cli;
713         struct ptlrpc_request *req;
714         struct ost_body       *body;
715         CFS_LIST_HEAD(cancels);
716         int rc, count;
717         ENTRY;
718
719         if (!oa) {
720                 CDEBUG(D_INFO, "oa NULL\n");
721                 RETURN(-EINVAL);
722         }
723
724         count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
725                                         LDLM_FL_DISCARD_DATA);
726
727         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
728         if (req == NULL) {
729                 ldlm_lock_list_put(&cancels, l_bl_ast, count);
730                 RETURN(-ENOMEM);
731         }
732
733         osc_set_capa_size(req, &RMF_CAPA1, (struct obd_capa *)capa);
734         rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
735                                0, &cancels, count);
736         if (rc) {
737                 ptlrpc_request_free(req);
738                 RETURN(rc);
739         }
740
741         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
742         ptlrpc_at_set_req_timeout(req);
743
744         if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE)
745                 oa->o_lcookie = *oti->oti_logcookies;
746         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
747         LASSERT(body);
748         lustre_set_wire_obdo(&body->oa, oa);
749
750         osc_pack_capa(req, body, (struct obd_capa *)capa);
751         ptlrpc_request_set_replen(req);
752
753         /* don't throttle destroy RPCs for the MDT */
754         if (!(cli->cl_import->imp_connect_flags_orig & OBD_CONNECT_MDS)) {
755                 req->rq_interpret_reply = osc_destroy_interpret;
756                 if (!osc_can_send_destroy(cli)) {
757                         struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP,
758                                                           NULL);
759
760                         /*
761                          * Wait until the number of on-going destroy RPCs drops
762                          * under max_rpc_in_flight
763                          */
764                         l_wait_event_exclusive(cli->cl_destroy_waitq,
765                                                osc_can_send_destroy(cli), &lwi);
766                 }
767         }
768
769         /* Do not wait for response */
770         ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
771         RETURN(0);
772 }
773
774 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
775                                 long writing_bytes)
776 {
777         obd_flag bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
778
779         LASSERT(!(oa->o_valid & bits));
780
781         oa->o_valid |= bits;
782         client_obd_list_lock(&cli->cl_loi_list_lock);
783         oa->o_dirty = cli->cl_dirty;
784         if (cli->cl_dirty - cli->cl_dirty_transit > cli->cl_dirty_max) {
785                 CERROR("dirty %lu - %lu > dirty_max %lu\n",
786                        cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
787                 oa->o_undirty = 0;
788         } else if (cfs_atomic_read(&obd_dirty_pages) -
789                    cfs_atomic_read(&obd_dirty_transit_pages) >
790                    obd_max_dirty_pages + 1){
791                 /* The cfs_atomic_read() allowing the cfs_atomic_inc() are
792                  * not covered by a lock thus they may safely race and trip
793                  * this CERROR() unless we add in a small fudge factor (+1). */
794                 CERROR("dirty %d - %d > system dirty_max %d\n",
795                        cfs_atomic_read(&obd_dirty_pages),
796                        cfs_atomic_read(&obd_dirty_transit_pages),
797                        obd_max_dirty_pages);
798                 oa->o_undirty = 0;
799         } else if (cli->cl_dirty_max - cli->cl_dirty > 0x7fffffff) {
800                 CERROR("dirty %lu - dirty_max %lu too big???\n",
801                        cli->cl_dirty, cli->cl_dirty_max);
802                 oa->o_undirty = 0;
803         } else {
804                 long max_in_flight = (cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT)*
805                                 (cli->cl_max_rpcs_in_flight + 1);
806                 oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
807         }
808         oa->o_grant = cli->cl_avail_grant;
809         oa->o_dropped = cli->cl_lost_grant;
810         cli->cl_lost_grant = 0;
811         client_obd_list_unlock(&cli->cl_loi_list_lock);
812         CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
813                oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
814
815 }
816
817 static void osc_update_next_shrink(struct client_obd *cli)
818 {
819         cli->cl_next_shrink_grant =
820                 cfs_time_shift(cli->cl_grant_shrink_interval);
821         CDEBUG(D_CACHE, "next time %ld to shrink grant \n",
822                cli->cl_next_shrink_grant);
823 }
824
825 /* caller must hold loi_list_lock */
826 static void osc_consume_write_grant(struct client_obd *cli,
827                                     struct brw_page *pga)
828 {
829         LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock);
830         LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
831         cfs_atomic_inc(&obd_dirty_pages);
832         cli->cl_dirty += CFS_PAGE_SIZE;
833         cli->cl_avail_grant -= CFS_PAGE_SIZE;
834         pga->flag |= OBD_BRW_FROM_GRANT;
835         CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
836                CFS_PAGE_SIZE, pga, pga->pg);
837         LASSERT(cli->cl_avail_grant >= 0);
838         osc_update_next_shrink(cli);
839 }
840
841 /* the companion to osc_consume_write_grant, called when a brw has completed.
842  * must be called with the loi lock held. */
843 static void osc_release_write_grant(struct client_obd *cli,
844                                     struct brw_page *pga, int sent)
845 {
846         int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
847         ENTRY;
848
849         LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock);
850         if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
851                 EXIT;
852                 return;
853         }
854
855         pga->flag &= ~OBD_BRW_FROM_GRANT;
856         cfs_atomic_dec(&obd_dirty_pages);
857         cli->cl_dirty -= CFS_PAGE_SIZE;
858         if (pga->flag & OBD_BRW_NOCACHE) {
859                 pga->flag &= ~OBD_BRW_NOCACHE;
860                 cfs_atomic_dec(&obd_dirty_transit_pages);
861                 cli->cl_dirty_transit -= CFS_PAGE_SIZE;
862         }
863         if (!sent) {
864                 /* Reclaim grant from truncated pages. This is used to solve
865                  * write-truncate and grant all gone(to lost_grant) problem.
866                  * For a vfs write this problem can be easily solved by a sync
867                  * write, however, this is not an option for page_mkwrite()
868                  * because grant has to be allocated before a page becomes
869                  * dirty. */
870                 if (cli->cl_avail_grant < PTLRPC_MAX_BRW_SIZE)
871                         cli->cl_avail_grant += CFS_PAGE_SIZE;
872                 else
873                         cli->cl_lost_grant += CFS_PAGE_SIZE;
874                 CDEBUG(D_CACHE, "lost grant: %lu avail grant: %lu dirty: %lu\n",
875                        cli->cl_lost_grant, cli->cl_avail_grant, cli->cl_dirty);
876         } else if (CFS_PAGE_SIZE != blocksize && pga->count != CFS_PAGE_SIZE) {
877                 /* For short writes we shouldn't count parts of pages that
878                  * span a whole block on the OST side, or our accounting goes
879                  * wrong.  Should match the code in filter_grant_check. */
880                 int offset = pga->off & ~CFS_PAGE_MASK;
881                 int count = pga->count + (offset & (blocksize - 1));
882                 int end = (offset + pga->count) & (blocksize - 1);
883                 if (end)
884                         count += blocksize - end;
885
886                 cli->cl_lost_grant += CFS_PAGE_SIZE - count;
887                 CDEBUG(D_CACHE, "lost %lu grant: %lu avail: %lu dirty: %lu\n",
888                        CFS_PAGE_SIZE - count, cli->cl_lost_grant,
889                        cli->cl_avail_grant, cli->cl_dirty);
890         }
891
892         EXIT;
893 }
894
895 static unsigned long rpcs_in_flight(struct client_obd *cli)
896 {
897         return cli->cl_r_in_flight + cli->cl_w_in_flight;
898 }
899
900 /* caller must hold loi_list_lock */
901 void osc_wake_cache_waiters(struct client_obd *cli)
902 {
903         cfs_list_t *l, *tmp;
904         struct osc_cache_waiter *ocw;
905
906         ENTRY;
907         cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
908                 /* if we can't dirty more, we must wait until some is written */
909                 if ((cli->cl_dirty + CFS_PAGE_SIZE > cli->cl_dirty_max) ||
910                    (cfs_atomic_read(&obd_dirty_pages) + 1 >
911                     obd_max_dirty_pages)) {
912                         CDEBUG(D_CACHE, "no dirty room: dirty: %ld "
913                                "osc max %ld, sys max %d\n", cli->cl_dirty,
914                                cli->cl_dirty_max, obd_max_dirty_pages);
915                         return;
916                 }
917
918                 /* if still dirty cache but no grant wait for pending RPCs that
919                  * may yet return us some grant before doing sync writes */
920                 if (cli->cl_w_in_flight && cli->cl_avail_grant < CFS_PAGE_SIZE) {
921                         CDEBUG(D_CACHE, "%u BRW writes in flight, no grant\n",
922                                cli->cl_w_in_flight);
923                         return;
924                 }
925
926                 ocw = cfs_list_entry(l, struct osc_cache_waiter, ocw_entry);
927                 cfs_list_del_init(&ocw->ocw_entry);
928                 if (cli->cl_avail_grant < CFS_PAGE_SIZE) {
929                         /* no more RPCs in flight to return grant, do sync IO */
930                         ocw->ocw_rc = -EDQUOT;
931                         CDEBUG(D_INODE, "wake oap %p for sync\n", ocw->ocw_oap);
932                 } else {
933                         osc_consume_write_grant(cli,
934                                                 &ocw->ocw_oap->oap_brw_page);
935                 }
936
937                 CDEBUG(D_CACHE, "wake up %p for oap %p, avail grant %ld\n",
938                        ocw, ocw->ocw_oap, cli->cl_avail_grant);
939
940                 cfs_waitq_signal(&ocw->ocw_waitq);
941         }
942
943         EXIT;
944 }
945
946 static void __osc_update_grant(struct client_obd *cli, obd_size grant)
947 {
948         client_obd_list_lock(&cli->cl_loi_list_lock);
949         cli->cl_avail_grant += grant;
950         client_obd_list_unlock(&cli->cl_loi_list_lock);
951 }
952
953 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
954 {
955         if (body->oa.o_valid & OBD_MD_FLGRANT) {
956                 CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
957                 __osc_update_grant(cli, body->oa.o_grant);
958         }
959 }
960
961 static int osc_set_info_async(struct obd_export *exp, obd_count keylen,
962                               void *key, obd_count vallen, void *val,
963                               struct ptlrpc_request_set *set);
964
965 static int osc_shrink_grant_interpret(const struct lu_env *env,
966                                       struct ptlrpc_request *req,
967                                       void *aa, int rc)
968 {
969         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
970         struct obdo *oa = ((struct osc_grant_args *)aa)->aa_oa;
971         struct ost_body *body;
972
973         if (rc != 0) {
974                 __osc_update_grant(cli, oa->o_grant);
975                 GOTO(out, rc);
976         }
977
978         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
979         LASSERT(body);
980         osc_update_grant(cli, body);
981 out:
982         OBDO_FREE(oa);
983         return rc;
984 }
985
986 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
987 {
988         client_obd_list_lock(&cli->cl_loi_list_lock);
989         oa->o_grant = cli->cl_avail_grant / 4;
990         cli->cl_avail_grant -= oa->o_grant;
991         client_obd_list_unlock(&cli->cl_loi_list_lock);
992         if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
993                 oa->o_valid |= OBD_MD_FLFLAGS;
994                 oa->o_flags = 0;
995         }
996         oa->o_flags |= OBD_FL_SHRINK_GRANT;
997         osc_update_next_shrink(cli);
998 }
999
1000 /* Shrink the current grant, either from some large amount to enough for a
1001  * full set of in-flight RPCs, or if we have already shrunk to that limit
1002  * then to enough for a single RPC.  This avoids keeping more grant than
1003  * needed, and avoids shrinking the grant piecemeal. */
1004 static int osc_shrink_grant(struct client_obd *cli)
1005 {
1006         long target = (cli->cl_max_rpcs_in_flight + 1) *
1007                       cli->cl_max_pages_per_rpc;
1008
1009         client_obd_list_lock(&cli->cl_loi_list_lock);
1010         if (cli->cl_avail_grant <= target)
1011                 target = cli->cl_max_pages_per_rpc;
1012         client_obd_list_unlock(&cli->cl_loi_list_lock);
1013
1014         return osc_shrink_grant_to_target(cli, target);
1015 }
1016
1017 int osc_shrink_grant_to_target(struct client_obd *cli, long target)
1018 {
1019         int    rc = 0;
1020         struct ost_body     *body;
1021         ENTRY;
1022
1023         client_obd_list_lock(&cli->cl_loi_list_lock);
1024         /* Don't shrink if we are already above or below the desired limit
1025          * We don't want to shrink below a single RPC, as that will negatively
1026          * impact block allocation and long-term performance. */
1027         if (target < cli->cl_max_pages_per_rpc)
1028                 target = cli->cl_max_pages_per_rpc;
1029
1030         if (target >= cli->cl_avail_grant) {
1031                 client_obd_list_unlock(&cli->cl_loi_list_lock);
1032                 RETURN(0);
1033         }
1034         client_obd_list_unlock(&cli->cl_loi_list_lock);
1035
1036         OBD_ALLOC_PTR(body);
1037         if (!body)
1038                 RETURN(-ENOMEM);
1039
1040         osc_announce_cached(cli, &body->oa, 0);
1041
1042         client_obd_list_lock(&cli->cl_loi_list_lock);
1043         body->oa.o_grant = cli->cl_avail_grant - target;
1044         cli->cl_avail_grant = target;
1045         client_obd_list_unlock(&cli->cl_loi_list_lock);
1046         if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
1047                 body->oa.o_valid |= OBD_MD_FLFLAGS;
1048                 body->oa.o_flags = 0;
1049         }
1050         body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
1051         osc_update_next_shrink(cli);
1052
1053         rc = osc_set_info_async(cli->cl_import->imp_obd->obd_self_export,
1054                                 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
1055                                 sizeof(*body), body, NULL);
1056         if (rc != 0)
1057                 __osc_update_grant(cli, body->oa.o_grant);
1058         OBD_FREE_PTR(body);
1059         RETURN(rc);
1060 }
1061
1062 #define GRANT_SHRINK_LIMIT PTLRPC_MAX_BRW_SIZE
1063 static int osc_should_shrink_grant(struct client_obd *client)
1064 {
1065         cfs_time_t time = cfs_time_current();
1066         cfs_time_t next_shrink = client->cl_next_shrink_grant;
1067
1068         if ((client->cl_import->imp_connect_data.ocd_connect_flags &
1069              OBD_CONNECT_GRANT_SHRINK) == 0)
1070                 return 0;
1071
1072         if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) {
1073                 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
1074                     client->cl_avail_grant > GRANT_SHRINK_LIMIT)
1075                         return 1;
1076                 else
1077                         osc_update_next_shrink(client);
1078         }
1079         return 0;
1080 }
1081
1082 static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data)
1083 {
1084         struct client_obd *client;
1085
1086         cfs_list_for_each_entry(client, &item->ti_obd_list,
1087                                 cl_grant_shrink_list) {
1088                 if (osc_should_shrink_grant(client))
1089                         osc_shrink_grant(client);
1090         }
1091         return 0;
1092 }
1093
1094 static int osc_add_shrink_grant(struct client_obd *client)
1095 {
1096         int rc;
1097
1098         rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval,
1099                                        TIMEOUT_GRANT,
1100                                        osc_grant_shrink_grant_cb, NULL,
1101                                        &client->cl_grant_shrink_list);
1102         if (rc) {
1103                 CERROR("add grant client %s error %d\n",
1104                         client->cl_import->imp_obd->obd_name, rc);
1105                 return rc;
1106         }
1107         CDEBUG(D_CACHE, "add grant client %s \n",
1108                client->cl_import->imp_obd->obd_name);
1109         osc_update_next_shrink(client);
1110         return 0;
1111 }
1112
1113 static int osc_del_shrink_grant(struct client_obd *client)
1114 {
1115         return ptlrpc_del_timeout_client(&client->cl_grant_shrink_list,
1116                                          TIMEOUT_GRANT);
1117 }
1118
1119 static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1120 {
1121         /*
1122          * ocd_grant is the total grant amount we're expect to hold: if we've
1123          * been evicted, it's the new avail_grant amount, cl_dirty will drop
1124          * to 0 as inflight RPCs fail out; otherwise, it's avail_grant + dirty.
1125          *
1126          * race is tolerable here: if we're evicted, but imp_state already
1127          * left EVICTED state, then cl_dirty must be 0 already.
1128          */
1129         client_obd_list_lock(&cli->cl_loi_list_lock);
1130         if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
1131                 cli->cl_avail_grant = ocd->ocd_grant;
1132         else
1133                 cli->cl_avail_grant = ocd->ocd_grant - cli->cl_dirty;
1134
1135         if (cli->cl_avail_grant < 0) {
1136                 CWARN("%s: available grant < 0, the OSS is probably not running"
1137                       " with patch from bug20278 (%ld) \n",
1138                       cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant);
1139                 /* workaround for 1.6 servers which do not have
1140                  * the patch from bug20278 */
1141                 cli->cl_avail_grant = ocd->ocd_grant;
1142         }
1143
1144         client_obd_list_unlock(&cli->cl_loi_list_lock);
1145
1146         CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld \n",
1147                cli->cl_import->imp_obd->obd_name,
1148                cli->cl_avail_grant, cli->cl_lost_grant);
1149
1150         if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
1151             cfs_list_empty(&cli->cl_grant_shrink_list))
1152                 osc_add_shrink_grant(cli);
1153 }
1154
1155 /* We assume that the reason this OSC got a short read is because it read
1156  * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1157  * via the LOV, and it _knows_ it's reading inside the file, it's just that
1158  * this stripe never got written at or beyond this stripe offset yet. */
1159 static void handle_short_read(int nob_read, obd_count page_count,
1160                               struct brw_page **pga)
1161 {
1162         char *ptr;
1163         int i = 0;
1164
1165         /* skip bytes read OK */
1166         while (nob_read > 0) {
1167                 LASSERT (page_count > 0);
1168
1169                 if (pga[i]->count > nob_read) {
1170                         /* EOF inside this page */
1171                         ptr = cfs_kmap(pga[i]->pg) +
1172                                 (pga[i]->off & ~CFS_PAGE_MASK);
1173                         memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1174                         cfs_kunmap(pga[i]->pg);
1175                         page_count--;
1176                         i++;
1177                         break;
1178                 }
1179
1180                 nob_read -= pga[i]->count;
1181                 page_count--;
1182                 i++;
1183         }
1184
1185         /* zero remaining pages */
1186         while (page_count-- > 0) {
1187                 ptr = cfs_kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK);
1188                 memset(ptr, 0, pga[i]->count);
1189                 cfs_kunmap(pga[i]->pg);
1190                 i++;
1191         }
1192 }
1193
1194 static int check_write_rcs(struct ptlrpc_request *req,
1195                            int requested_nob, int niocount,
1196                            obd_count page_count, struct brw_page **pga)
1197 {
1198         int     i;
1199         __u32   *remote_rcs;
1200
1201         remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1202                                                   sizeof(*remote_rcs) *
1203                                                   niocount);
1204         if (remote_rcs == NULL) {
1205                 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1206                 return(-EPROTO);
1207         }
1208
1209         /* return error if any niobuf was in error */
1210         for (i = 0; i < niocount; i++) {
1211                 if ((int)remote_rcs[i] < 0)
1212                         return(remote_rcs[i]);
1213
1214                 if (remote_rcs[i] != 0) {
1215                         CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1216                                 i, remote_rcs[i], req);
1217                         return(-EPROTO);
1218                 }
1219         }
1220
1221         if (req->rq_bulk->bd_nob_transferred != requested_nob) {
1222                 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1223                        req->rq_bulk->bd_nob_transferred, requested_nob);
1224                 return(-EPROTO);
1225         }
1226
1227         return (0);
1228 }
1229
1230 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1231 {
1232         if (p1->flag != p2->flag) {
1233                 unsigned mask = ~(OBD_BRW_FROM_GRANT| OBD_BRW_NOCACHE|
1234                                   OBD_BRW_SYNC|OBD_BRW_ASYNC|OBD_BRW_NOQUOTA);
1235
1236                 /* warn if we try to combine flags that we don't know to be
1237                  * safe to combine */
1238                 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1239                         CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
1240                               "report this at http://bugs.whamcloud.com/\n",
1241                               p1->flag, p2->flag);
1242                 }
1243                 return 0;
1244         }
1245
1246         return (p1->off + p1->count == p2->off);
1247 }
1248
1249 static obd_count osc_checksum_bulk(int nob, obd_count pg_count,
1250                                    struct brw_page **pga, int opc,
1251                                    cksum_type_t cksum_type)
1252 {
1253         __u32 cksum;
1254         int i = 0;
1255
1256         LASSERT (pg_count > 0);
1257         cksum = init_checksum(cksum_type);
1258         while (nob > 0 && pg_count > 0) {
1259                 unsigned char *ptr = cfs_kmap(pga[i]->pg);
1260                 int off = pga[i]->off & ~CFS_PAGE_MASK;
1261                 int count = pga[i]->count > nob ? nob : pga[i]->count;
1262
1263                 /* corrupt the data before we compute the checksum, to
1264                  * simulate an OST->client data error */
1265                 if (i == 0 && opc == OST_READ &&
1266                     OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
1267                         memcpy(ptr + off, "bad1", min(4, nob));
1268                 cksum = compute_checksum(cksum, ptr + off, count, cksum_type);
1269                 cfs_kunmap(pga[i]->pg);
1270                 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d checksum %x\n",
1271                                off, cksum);
1272
1273                 nob -= pga[i]->count;
1274                 pg_count--;
1275                 i++;
1276         }
1277         /* For sending we only compute the wrong checksum instead
1278          * of corrupting the data so it is still correct on a redo */
1279         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1280                 cksum++;
1281
1282         return fini_checksum(cksum, cksum_type);
1283 }
1284
1285 static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
1286                                 struct lov_stripe_md *lsm, obd_count page_count,
1287                                 struct brw_page **pga,
1288                                 struct ptlrpc_request **reqp,
1289                                 struct obd_capa *ocapa, int reserve,
1290                                 int resend)
1291 {
1292         struct ptlrpc_request   *req;
1293         struct ptlrpc_bulk_desc *desc;
1294         struct ost_body         *body;
1295         struct obd_ioobj        *ioobj;
1296         struct niobuf_remote    *niobuf;
1297         int niocount, i, requested_nob, opc, rc;
1298         struct osc_brw_async_args *aa;
1299         struct req_capsule      *pill;
1300         struct brw_page *pg_prev;
1301
1302         ENTRY;
1303         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1304                 RETURN(-ENOMEM); /* Recoverable */
1305         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1306                 RETURN(-EINVAL); /* Fatal */
1307
1308         if ((cmd & OBD_BRW_WRITE) != 0) {
1309                 opc = OST_WRITE;
1310                 req = ptlrpc_request_alloc_pool(cli->cl_import,
1311                                                 cli->cl_import->imp_rq_pool,
1312                                                 &RQF_OST_BRW_WRITE);
1313         } else {
1314                 opc = OST_READ;
1315                 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1316         }
1317         if (req == NULL)
1318                 RETURN(-ENOMEM);
1319
1320         for (niocount = i = 1; i < page_count; i++) {
1321                 if (!can_merge_pages(pga[i - 1], pga[i]))
1322                         niocount++;
1323         }
1324
1325         pill = &req->rq_pill;
1326         req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1327                              sizeof(*ioobj));
1328         req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1329                              niocount * sizeof(*niobuf));
1330         osc_set_capa_size(req, &RMF_CAPA1, ocapa);
1331
1332         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1333         if (rc) {
1334                 ptlrpc_request_free(req);
1335                 RETURN(rc);
1336         }
1337         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
1338         ptlrpc_at_set_req_timeout(req);
1339
1340         if (opc == OST_WRITE)
1341                 desc = ptlrpc_prep_bulk_imp(req, page_count,
1342                                             BULK_GET_SOURCE, OST_BULK_PORTAL);
1343         else
1344                 desc = ptlrpc_prep_bulk_imp(req, page_count,
1345                                             BULK_PUT_SINK, OST_BULK_PORTAL);
1346
1347         if (desc == NULL)
1348                 GOTO(out, rc = -ENOMEM);
1349         /* NB request now owns desc and will free it when it gets freed */
1350
1351         body = req_capsule_client_get(pill, &RMF_OST_BODY);
1352         ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1353         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1354         LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1355
1356         lustre_set_wire_obdo(&body->oa, oa);
1357
1358         obdo_to_ioobj(oa, ioobj);
1359         ioobj->ioo_bufcnt = niocount;
1360         osc_pack_capa(req, body, ocapa);
1361         LASSERT (page_count > 0);
1362         pg_prev = pga[0];
1363         for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1364                 struct brw_page *pg = pga[i];
1365                 int poff = pg->off & ~CFS_PAGE_MASK;
1366
1367                 LASSERT(pg->count > 0);
1368                 /* make sure there is no gap in the middle of page array */
1369                 LASSERTF(page_count == 1 ||
1370                          (ergo(i == 0, poff + pg->count == CFS_PAGE_SIZE) &&
1371                           ergo(i > 0 && i < page_count - 1,
1372                                poff == 0 && pg->count == CFS_PAGE_SIZE)   &&
1373                           ergo(i == page_count - 1, poff == 0)),
1374                          "i: %d/%d pg: %p off: "LPU64", count: %u\n",
1375                          i, page_count, pg, pg->off, pg->count);
1376 #ifdef __linux__
1377                 LASSERTF(i == 0 || pg->off > pg_prev->off,
1378                          "i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
1379                          " prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
1380                          i, page_count,
1381                          pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1382                          pg_prev->pg, page_private(pg_prev->pg),
1383                          pg_prev->pg->index, pg_prev->off);
1384 #else
1385                 LASSERTF(i == 0 || pg->off > pg_prev->off,
1386                          "i %d p_c %u\n", i, page_count);
1387 #endif
1388                 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1389                         (pg->flag & OBD_BRW_SRVLOCK));
1390
1391                 ptlrpc_prep_bulk_page(desc, pg->pg, poff, pg->count);
1392                 requested_nob += pg->count;
1393
1394                 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1395                         niobuf--;
1396                         niobuf->len += pg->count;
1397                 } else {
1398                         niobuf->offset = pg->off;
1399                         niobuf->len    = pg->count;
1400                         niobuf->flags  = pg->flag;
1401                 }
1402                 pg_prev = pg;
1403         }
1404
1405         LASSERTF((void *)(niobuf - niocount) ==
1406                 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1407                 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1408                 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1409
1410         osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1411         if (resend) {
1412                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1413                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1414                         body->oa.o_flags = 0;
1415                 }
1416                 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1417         }
1418
1419         if (osc_should_shrink_grant(cli))
1420                 osc_shrink_grant_local(cli, &body->oa);
1421
1422         /* size[REQ_REC_OFF] still sizeof (*body) */
1423         if (opc == OST_WRITE) {
1424                 if (unlikely(cli->cl_checksum) &&
1425                     !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1426                         /* store cl_cksum_type in a local variable since
1427                          * it can be changed via lprocfs */
1428                         cksum_type_t cksum_type = cli->cl_cksum_type;
1429
1430                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1431                                 oa->o_flags &= OBD_FL_LOCAL_MASK;
1432                                 body->oa.o_flags = 0;
1433                         }
1434                         body->oa.o_flags |= cksum_type_pack(cksum_type);
1435                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1436                         body->oa.o_cksum = osc_checksum_bulk(requested_nob,
1437                                                              page_count, pga,
1438                                                              OST_WRITE,
1439                                                              cksum_type);
1440                         CDEBUG(D_PAGE, "checksum at write origin: %x\n",
1441                                body->oa.o_cksum);
1442                         /* save this in 'oa', too, for later checking */
1443                         oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1444                         oa->o_flags |= cksum_type_pack(cksum_type);
1445                 } else {
1446                         /* clear out the checksum flag, in case this is a
1447                          * resend but cl_checksum is no longer set. b=11238 */
1448                         oa->o_valid &= ~OBD_MD_FLCKSUM;
1449                 }
1450                 oa->o_cksum = body->oa.o_cksum;
1451                 /* 1 RC per niobuf */
1452                 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1453                                      sizeof(__u32) * niocount);
1454         } else {
1455                 if (unlikely(cli->cl_checksum) &&
1456                     !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1457                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1458                                 body->oa.o_flags = 0;
1459                         body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type);
1460                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1461                 }
1462         }
1463         ptlrpc_request_set_replen(req);
1464
1465         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1466         aa = ptlrpc_req_async_args(req);
1467         aa->aa_oa = oa;
1468         aa->aa_requested_nob = requested_nob;
1469         aa->aa_nio_count = niocount;
1470         aa->aa_page_count = page_count;
1471         aa->aa_resends = 0;
1472         aa->aa_ppga = pga;
1473         aa->aa_cli = cli;
1474         CFS_INIT_LIST_HEAD(&aa->aa_oaps);
1475         if (ocapa && reserve)
1476                 aa->aa_ocapa = capa_get(ocapa);
1477
1478         *reqp = req;
1479         RETURN(0);
1480
1481  out:
1482         ptlrpc_req_finished(req);
1483         RETURN(rc);
1484 }
1485
1486 static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
1487                                 __u32 client_cksum, __u32 server_cksum, int nob,
1488                                 obd_count page_count, struct brw_page **pga,
1489                                 cksum_type_t client_cksum_type)
1490 {
1491         __u32 new_cksum;
1492         char *msg;
1493         cksum_type_t cksum_type;
1494
1495         if (server_cksum == client_cksum) {
1496                 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1497                 return 0;
1498         }
1499
1500         cksum_type = cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1501                                        oa->o_flags : 0);
1502         new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE,
1503                                       cksum_type);
1504
1505         if (cksum_type != client_cksum_type)
1506                 msg = "the server did not use the checksum type specified in "
1507                       "the original request - likely a protocol problem";
1508         else if (new_cksum == server_cksum)
1509                 msg = "changed on the client after we checksummed it - "
1510                       "likely false positive due to mmap IO (bug 11742)";
1511         else if (new_cksum == client_cksum)
1512                 msg = "changed in transit before arrival at OST";
1513         else
1514                 msg = "changed in transit AND doesn't match the original - "
1515                       "likely false positive due to mmap IO (bug 11742)";
1516
1517         LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode "DFID
1518                            " object "LPU64"/"LPU64" extent ["LPU64"-"LPU64"]\n",
1519                            msg, libcfs_nid2str(peer->nid),
1520                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1521                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1522                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1523                            oa->o_id,
1524                            oa->o_valid & OBD_MD_FLGROUP ? oa->o_seq : (__u64)0,
1525                            pga[0]->off,
1526                            pga[page_count-1]->off + pga[page_count-1]->count - 1);
1527         CERROR("original client csum %x (type %x), server csum %x (type %x), "
1528                "client csum now %x\n", client_cksum, client_cksum_type,
1529                server_cksum, cksum_type, new_cksum);
1530         return 1;
1531 }
1532
1533 /* Note rc enters this function as number of bytes transferred */
1534 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1535 {
1536         struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1537         const lnet_process_id_t *peer =
1538                         &req->rq_import->imp_connection->c_peer;
1539         struct client_obd *cli = aa->aa_cli;
1540         struct ost_body *body;
1541         __u32 client_cksum = 0;
1542         ENTRY;
1543
1544         if (rc < 0 && rc != -EDQUOT) {
1545                 DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc);
1546                 RETURN(rc);
1547         }
1548
1549         LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1550         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1551         if (body == NULL) {
1552                 DEBUG_REQ(D_INFO, req, "Can't unpack body\n");
1553                 RETURN(-EPROTO);
1554         }
1555
1556         /* set/clear over quota flag for a uid/gid */
1557         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1558             body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) {
1559                 unsigned int qid[MAXQUOTAS] = { body->oa.o_uid, body->oa.o_gid };
1560
1561                 CDEBUG(D_QUOTA, "setdq for [%u %u] with valid "LPX64", flags %x\n",
1562                        body->oa.o_uid, body->oa.o_gid, body->oa.o_valid,
1563                        body->oa.o_flags);
1564                 osc_quota_setdq(cli, qid, body->oa.o_valid, body->oa.o_flags);
1565         }
1566
1567         osc_update_grant(cli, body);
1568
1569         if (rc < 0)
1570                 RETURN(rc);
1571
1572         if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1573                 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1574
1575         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1576                 if (rc > 0) {
1577                         CERROR("Unexpected +ve rc %d\n", rc);
1578                         RETURN(-EPROTO);
1579                 }
1580                 LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob);
1581
1582                 if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
1583                         RETURN(-EAGAIN);
1584
1585                 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
1586                     check_write_checksum(&body->oa, peer, client_cksum,
1587                                          body->oa.o_cksum, aa->aa_requested_nob,
1588                                          aa->aa_page_count, aa->aa_ppga,
1589                                          cksum_type_unpack(aa->aa_oa->o_flags)))
1590                         RETURN(-EAGAIN);
1591
1592                 rc = check_write_rcs(req, aa->aa_requested_nob,aa->aa_nio_count,
1593                                      aa->aa_page_count, aa->aa_ppga);
1594                 GOTO(out, rc);
1595         }
1596
1597         /* The rest of this function executes only for OST_READs */
1598
1599         /* if unwrap_bulk failed, return -EAGAIN to retry */
1600         rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
1601         if (rc < 0)
1602                 GOTO(out, rc = -EAGAIN);
1603
1604         if (rc > aa->aa_requested_nob) {
1605                 CERROR("Unexpected rc %d (%d requested)\n", rc,
1606                        aa->aa_requested_nob);
1607                 RETURN(-EPROTO);
1608         }
1609
1610         if (rc != req->rq_bulk->bd_nob_transferred) {
1611                 CERROR ("Unexpected rc %d (%d transferred)\n",
1612                         rc, req->rq_bulk->bd_nob_transferred);
1613                 return (-EPROTO);
1614         }
1615
1616         if (rc < aa->aa_requested_nob)
1617                 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
1618
1619         if (body->oa.o_valid & OBD_MD_FLCKSUM) {
1620                 static int cksum_counter;
1621                 __u32      server_cksum = body->oa.o_cksum;
1622                 char      *via;
1623                 char      *router;
1624                 cksum_type_t cksum_type;
1625
1626                 cksum_type = cksum_type_unpack(body->oa.o_valid &OBD_MD_FLFLAGS?
1627                                                body->oa.o_flags : 0);
1628                 client_cksum = osc_checksum_bulk(rc, aa->aa_page_count,
1629                                                  aa->aa_ppga, OST_READ,
1630                                                  cksum_type);
1631
1632                 if (peer->nid == req->rq_bulk->bd_sender) {
1633                         via = router = "";
1634                 } else {
1635                         via = " via ";
1636                         router = libcfs_nid2str(req->rq_bulk->bd_sender);
1637                 }
1638
1639                 if (server_cksum == ~0 && rc > 0) {
1640                         CERROR("Protocol error: server %s set the 'checksum' "
1641                                "bit, but didn't send a checksum.  Not fatal, "
1642                                "but please notify on http://bugs.whamcloud.com/\n",
1643                                libcfs_nid2str(peer->nid));
1644                 } else if (server_cksum != client_cksum) {
1645                         LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
1646                                            "%s%s%s inode "DFID" object "
1647                                            LPU64"/"LPU64" extent "
1648                                            "["LPU64"-"LPU64"]\n",
1649                                            req->rq_import->imp_obd->obd_name,
1650                                            libcfs_nid2str(peer->nid),
1651                                            via, router,
1652                                            body->oa.o_valid & OBD_MD_FLFID ?
1653                                                 body->oa.o_parent_seq : (__u64)0,
1654                                            body->oa.o_valid & OBD_MD_FLFID ?
1655                                                 body->oa.o_parent_oid : 0,
1656                                            body->oa.o_valid & OBD_MD_FLFID ?
1657                                                 body->oa.o_parent_ver : 0,
1658                                            body->oa.o_id,
1659                                            body->oa.o_valid & OBD_MD_FLGROUP ?
1660                                                 body->oa.o_seq : (__u64)0,
1661                                            aa->aa_ppga[0]->off,
1662                                            aa->aa_ppga[aa->aa_page_count-1]->off +
1663                                            aa->aa_ppga[aa->aa_page_count-1]->count -
1664                                                                         1);
1665                         CERROR("client %x, server %x, cksum_type %x\n",
1666                                client_cksum, server_cksum, cksum_type);
1667                         cksum_counter = 0;
1668                         aa->aa_oa->o_cksum = client_cksum;
1669                         rc = -EAGAIN;
1670                 } else {
1671                         cksum_counter++;
1672                         CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1673                         rc = 0;
1674                 }
1675         } else if (unlikely(client_cksum)) {
1676                 static int cksum_missed;
1677
1678                 cksum_missed++;
1679                 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
1680                         CERROR("Checksum %u requested from %s but not sent\n",
1681                                cksum_missed, libcfs_nid2str(peer->nid));
1682         } else {
1683                 rc = 0;
1684         }
1685 out:
1686         if (rc >= 0)
1687                 lustre_get_wire_obdo(aa->aa_oa, &body->oa);
1688
1689         RETURN(rc);
1690 }
1691
1692 static int osc_brw_internal(int cmd, struct obd_export *exp, struct obdo *oa,
1693                             struct lov_stripe_md *lsm,
1694                             obd_count page_count, struct brw_page **pga,
1695                             struct obd_capa *ocapa)
1696 {
1697         struct ptlrpc_request *req;
1698         int                    rc;
1699         cfs_waitq_t            waitq;
1700         int                    resends = 0;
1701         struct l_wait_info     lwi;
1702
1703         ENTRY;
1704
1705         cfs_waitq_init(&waitq);
1706
1707 restart_bulk:
1708         rc = osc_brw_prep_request(cmd, &exp->exp_obd->u.cli, oa, lsm,
1709                                   page_count, pga, &req, ocapa, 0, resends);
1710         if (rc != 0)
1711                 return (rc);
1712
1713         rc = ptlrpc_queue_wait(req);
1714
1715         if (rc == -ETIMEDOUT && req->rq_resend) {
1716                 DEBUG_REQ(D_HA, req,  "BULK TIMEOUT");
1717                 ptlrpc_req_finished(req);
1718                 goto restart_bulk;
1719         }
1720
1721         rc = osc_brw_fini_request(req, rc);
1722
1723         ptlrpc_req_finished(req);
1724         if (osc_recoverable_error(rc)) {
1725                 resends++;
1726                 if (!client_should_resend(resends, &exp->exp_obd->u.cli)) {
1727                         CERROR("too many resend retries, returning error\n");
1728                         RETURN(-EIO);
1729                 }
1730
1731                 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends), NULL, NULL, NULL);
1732                 l_wait_event(waitq, 0, &lwi);
1733
1734                 goto restart_bulk;
1735         }
1736
1737         RETURN (rc);
1738 }
1739
1740 int osc_brw_redo_request(struct ptlrpc_request *request,
1741                          struct osc_brw_async_args *aa)
1742 {
1743         struct ptlrpc_request *new_req;
1744         struct ptlrpc_request_set *set = request->rq_set;
1745         struct osc_brw_async_args *new_aa;
1746         struct osc_async_page *oap;
1747         int rc = 0;
1748         ENTRY;
1749
1750         if (!client_should_resend(aa->aa_resends, aa->aa_cli)) {
1751                 CERROR("too many resent retries, returning error\n");
1752                 RETURN(-EIO);
1753         }
1754
1755         DEBUG_REQ(D_ERROR, request, "redo for recoverable error");
1756
1757         rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
1758                                         OST_WRITE ? OBD_BRW_WRITE :OBD_BRW_READ,
1759                                   aa->aa_cli, aa->aa_oa,
1760                                   NULL /* lsm unused by osc currently */,
1761                                   aa->aa_page_count, aa->aa_ppga,
1762                                   &new_req, aa->aa_ocapa, 0, 1);
1763         if (rc)
1764                 RETURN(rc);
1765
1766         client_obd_list_lock(&aa->aa_cli->cl_loi_list_lock);
1767
1768         cfs_list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
1769                 if (oap->oap_request != NULL) {
1770                         LASSERTF(request == oap->oap_request,
1771                                  "request %p != oap_request %p\n",
1772                                  request, oap->oap_request);
1773                         if (oap->oap_interrupted) {
1774                                 client_obd_list_unlock(&aa->aa_cli->cl_loi_list_lock);
1775                                 ptlrpc_req_finished(new_req);
1776                                 RETURN(-EINTR);
1777                         }
1778                 }
1779         }
1780         /* New request takes over pga and oaps from old request.
1781          * Note that copying a list_head doesn't work, need to move it... */
1782         aa->aa_resends++;
1783         new_req->rq_interpret_reply = request->rq_interpret_reply;
1784         new_req->rq_async_args = request->rq_async_args;
1785         new_req->rq_sent = cfs_time_current_sec() + aa->aa_resends;
1786
1787         new_aa = ptlrpc_req_async_args(new_req);
1788
1789         CFS_INIT_LIST_HEAD(&new_aa->aa_oaps);
1790         cfs_list_splice(&aa->aa_oaps, &new_aa->aa_oaps);
1791         CFS_INIT_LIST_HEAD(&aa->aa_oaps);
1792
1793         cfs_list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
1794                 if (oap->oap_request) {
1795                         ptlrpc_req_finished(oap->oap_request);
1796                         oap->oap_request = ptlrpc_request_addref(new_req);
1797                 }
1798         }
1799
1800         new_aa->aa_ocapa = aa->aa_ocapa;
1801         aa->aa_ocapa = NULL;
1802
1803         /* use ptlrpc_set_add_req is safe because interpret functions work
1804          * in check_set context. only one way exist with access to request
1805          * from different thread got -EINTR - this way protected with
1806          * cl_loi_list_lock */
1807         ptlrpc_set_add_req(set, new_req);
1808
1809         client_obd_list_unlock(&aa->aa_cli->cl_loi_list_lock);
1810
1811         DEBUG_REQ(D_INFO, new_req, "new request");
1812         RETURN(0);
1813 }
1814
1815 /*
1816  * ugh, we want disk allocation on the target to happen in offset order.  we'll
1817  * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
1818  * fine for our small page arrays and doesn't require allocation.  its an
1819  * insertion sort that swaps elements that are strides apart, shrinking the
1820  * stride down until its '1' and the array is sorted.
1821  */
1822 static void sort_brw_pages(struct brw_page **array, int num)
1823 {
1824         int stride, i, j;
1825         struct brw_page *tmp;
1826
1827         if (num == 1)
1828                 return;
1829         for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1830                 ;
1831
1832         do {
1833                 stride /= 3;
1834                 for (i = stride ; i < num ; i++) {
1835                         tmp = array[i];
1836                         j = i;
1837                         while (j >= stride && array[j - stride]->off > tmp->off) {
1838                                 array[j] = array[j - stride];
1839                                 j -= stride;
1840                         }
1841                         array[j] = tmp;
1842                 }
1843         } while (stride > 1);
1844 }
1845
1846 static obd_count max_unfragmented_pages(struct brw_page **pg, obd_count pages)
1847 {
1848         int count = 1;
1849         int offset;
1850         int i = 0;
1851
1852         LASSERT (pages > 0);
1853         offset = pg[i]->off & ~CFS_PAGE_MASK;
1854
1855         for (;;) {
1856                 pages--;
1857                 if (pages == 0)         /* that's all */
1858                         return count;
1859
1860                 if (offset + pg[i]->count < CFS_PAGE_SIZE)
1861                         return count;   /* doesn't end on page boundary */
1862
1863                 i++;
1864                 offset = pg[i]->off & ~CFS_PAGE_MASK;
1865                 if (offset != 0)        /* doesn't start on page boundary */
1866                         return count;
1867
1868                 count++;
1869         }
1870 }
1871
1872 static struct brw_page **osc_build_ppga(struct brw_page *pga, obd_count count)
1873 {
1874         struct brw_page **ppga;
1875         int i;
1876
1877         OBD_ALLOC(ppga, sizeof(*ppga) * count);
1878         if (ppga == NULL)
1879                 return NULL;
1880
1881         for (i = 0; i < count; i++)
1882                 ppga[i] = pga + i;
1883         return ppga;
1884 }
1885
1886 static void osc_release_ppga(struct brw_page **ppga, obd_count count)
1887 {
1888         LASSERT(ppga != NULL);
1889         OBD_FREE(ppga, sizeof(*ppga) * count);
1890 }
1891
1892 static int osc_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
1893                    obd_count page_count, struct brw_page *pga,
1894                    struct obd_trans_info *oti)
1895 {
1896         struct obdo *saved_oa = NULL;
1897         struct brw_page **ppga, **orig;
1898         struct obd_import *imp = class_exp2cliimp(exp);
1899         struct client_obd *cli;
1900         int rc, page_count_orig;
1901         ENTRY;
1902
1903         LASSERT((imp != NULL) && (imp->imp_obd != NULL));
1904         cli = &imp->imp_obd->u.cli;
1905
1906         if (cmd & OBD_BRW_CHECK) {
1907                 /* The caller just wants to know if there's a chance that this
1908                  * I/O can succeed */
1909
1910                 if (imp->imp_invalid)
1911                         RETURN(-EIO);
1912                 RETURN(0);
1913         }
1914
1915         /* test_brw with a failed create can trip this, maybe others. */
1916         LASSERT(cli->cl_max_pages_per_rpc);
1917
1918         rc = 0;
1919
1920         orig = ppga = osc_build_ppga(pga, page_count);
1921         if (ppga == NULL)
1922                 RETURN(-ENOMEM);
1923         page_count_orig = page_count;
1924
1925         sort_brw_pages(ppga, page_count);
1926         while (page_count) {
1927                 obd_count pages_per_brw;
1928
1929                 if (page_count > cli->cl_max_pages_per_rpc)
1930                         pages_per_brw = cli->cl_max_pages_per_rpc;
1931                 else
1932                         pages_per_brw = page_count;
1933
1934                 pages_per_brw = max_unfragmented_pages(ppga, pages_per_brw);
1935
1936                 if (saved_oa != NULL) {
1937                         /* restore previously saved oa */
1938                         *oinfo->oi_oa = *saved_oa;
1939                 } else if (page_count > pages_per_brw) {
1940                         /* save a copy of oa (brw will clobber it) */
1941                         OBDO_ALLOC(saved_oa);
1942                         if (saved_oa == NULL)
1943                                 GOTO(out, rc = -ENOMEM);
1944                         *saved_oa = *oinfo->oi_oa;
1945                 }
1946
1947                 rc = osc_brw_internal(cmd, exp, oinfo->oi_oa, oinfo->oi_md,
1948                                       pages_per_brw, ppga, oinfo->oi_capa);
1949
1950                 if (rc != 0)
1951                         break;
1952
1953                 page_count -= pages_per_brw;
1954                 ppga += pages_per_brw;
1955         }
1956
1957 out:
1958         osc_release_ppga(orig, page_count_orig);
1959
1960         if (saved_oa != NULL)
1961                 OBDO_FREE(saved_oa);
1962
1963         RETURN(rc);
1964 }
1965
1966 /* The companion to osc_enter_cache(), called when @oap is no longer part of
1967  * the dirty accounting.  Writeback completes or truncate happens before
1968  * writing starts.  Must be called with the loi lock held. */
1969 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1970                            int sent)
1971 {
1972         osc_release_write_grant(cli, &oap->oap_brw_page, sent);
1973 }
1974
1975
1976 /* This maintains the lists of pending pages to read/write for a given object
1977  * (lop).  This is used by osc_check_rpcs->osc_next_loi() and loi_list_maint()
1978  * to quickly find objects that are ready to send an RPC. */
1979 static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop,
1980                          int cmd)
1981 {
1982         ENTRY;
1983
1984         if (lop->lop_num_pending == 0)
1985                 RETURN(0);
1986
1987         /* if we have an invalid import we want to drain the queued pages
1988          * by forcing them through rpcs that immediately fail and complete
1989          * the pages.  recovery relies on this to empty the queued pages
1990          * before canceling the locks and evicting down the llite pages */
1991         if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
1992                 RETURN(1);
1993
1994         /* stream rpcs in queue order as long as as there is an urgent page
1995          * queued.  this is our cheap solution for good batching in the case
1996          * where writepage marks some random page in the middle of the file
1997          * as urgent because of, say, memory pressure */
1998         if (!cfs_list_empty(&lop->lop_urgent)) {
1999                 CDEBUG(D_CACHE, "urgent request forcing RPC\n");
2000                 RETURN(1);
2001         }
2002
2003         if (cmd & OBD_BRW_WRITE) {
2004                 /* trigger a write rpc stream as long as there are dirtiers
2005                  * waiting for space.  as they're waiting, they're not going to
2006                  * create more pages to coalesce with what's waiting.. */
2007                 if (!cfs_list_empty(&cli->cl_cache_waiters)) {
2008                         CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
2009                         RETURN(1);
2010                 }
2011         }
2012         if (lop->lop_num_pending >= cli->cl_max_pages_per_rpc)
2013                 RETURN(1);
2014
2015         RETURN(0);
2016 }
2017
2018 static int lop_makes_hprpc(struct loi_oap_pages *lop)
2019 {
2020         struct osc_async_page *oap;
2021         ENTRY;
2022
2023         if (cfs_list_empty(&lop->lop_urgent))
2024                 RETURN(0);
2025
2026         oap = cfs_list_entry(lop->lop_urgent.next,
2027                          struct osc_async_page, oap_urgent_item);
2028
2029         if (oap->oap_async_flags & ASYNC_HP) {
2030                 CDEBUG(D_CACHE, "hp request forcing RPC\n");
2031                 RETURN(1);
2032         }
2033
2034         RETURN(0);
2035 }
2036
2037 static void on_list(cfs_list_t *item, cfs_list_t *list,
2038                     int should_be_on)
2039 {
2040         if (cfs_list_empty(item) && should_be_on)
2041                 cfs_list_add_tail(item, list);
2042         else if (!cfs_list_empty(item) && !should_be_on)
2043                 cfs_list_del_init(item);
2044 }
2045
2046 /* maintain the loi's cli list membership invariants so that osc_send_oap_rpc
2047  * can find pages to build into rpcs quickly */
2048 void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi)
2049 {
2050         if (lop_makes_hprpc(&loi->loi_write_lop) ||
2051             lop_makes_hprpc(&loi->loi_read_lop)) {
2052                 /* HP rpc */
2053                 on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list, 0);
2054                 on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 1);
2055         } else {
2056                 on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 0);
2057                 on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list,
2058                         lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)||
2059                         lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ));
2060         }
2061
2062         on_list(&loi->loi_write_item, &cli->cl_loi_write_list,
2063                 loi->loi_write_lop.lop_num_pending);
2064
2065         on_list(&loi->loi_read_item, &cli->cl_loi_read_list,
2066                 loi->loi_read_lop.lop_num_pending);
2067 }
2068
2069 static void lop_update_pending(struct client_obd *cli,
2070                                struct loi_oap_pages *lop, int cmd, int delta)
2071 {
2072         lop->lop_num_pending += delta;
2073         if (cmd & OBD_BRW_WRITE)
2074                 cli->cl_pending_w_pages += delta;
2075         else
2076                 cli->cl_pending_r_pages += delta;
2077 }
2078
2079 /**
2080  * this is called when a sync waiter receives an interruption.  Its job is to
2081  * get the caller woken as soon as possible.  If its page hasn't been put in an
2082  * rpc yet it can dequeue immediately.  Otherwise it has to mark the rpc as
2083  * desiring interruption which will forcefully complete the rpc once the rpc
2084  * has timed out.
2085  */
2086 int osc_oap_interrupted(const struct lu_env *env, struct osc_async_page *oap)
2087 {
2088         struct loi_oap_pages *lop;
2089         struct lov_oinfo *loi;
2090         int rc = -EBUSY;
2091         ENTRY;
2092
2093         LASSERT(!oap->oap_interrupted);
2094         oap->oap_interrupted = 1;
2095
2096         /* ok, it's been put in an rpc. only one oap gets a request reference */
2097         if (oap->oap_request != NULL) {
2098                 ptlrpc_mark_interrupted(oap->oap_request);
2099                 ptlrpcd_wake(oap->oap_request);
2100                 ptlrpc_req_finished(oap->oap_request);
2101                 oap->oap_request = NULL;
2102         }
2103
2104         /*
2105          * page completion may be called only if ->cpo_prep() method was
2106          * executed by osc_io_submit(), that also adds page the to pending list
2107          */
2108         if (!cfs_list_empty(&oap->oap_pending_item)) {
2109                 cfs_list_del_init(&oap->oap_pending_item);
2110                 cfs_list_del_init(&oap->oap_urgent_item);
2111
2112                 loi = oap->oap_loi;
2113                 lop = (oap->oap_cmd & OBD_BRW_WRITE) ?
2114                         &loi->loi_write_lop : &loi->loi_read_lop;
2115                 lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, -1);
2116                 loi_list_maint(oap->oap_cli, oap->oap_loi);
2117                 rc = oap->oap_caller_ops->ap_completion(env,
2118                                           oap->oap_caller_data,
2119                                           oap->oap_cmd, NULL, -EINTR);
2120         }
2121
2122         RETURN(rc);
2123 }
2124
2125 /* this is trying to propogate async writeback errors back up to the
2126  * application.  As an async write fails we record the error code for later if
2127  * the app does an fsync.  As long as errors persist we force future rpcs to be
2128  * sync so that the app can get a sync error and break the cycle of queueing
2129  * pages for which writeback will fail. */
2130 static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
2131                            int rc)
2132 {
2133         if (rc) {
2134                 if (!ar->ar_rc)
2135                         ar->ar_rc = rc;
2136
2137                 ar->ar_force_sync = 1;
2138                 ar->ar_min_xid = ptlrpc_sample_next_xid();
2139                 return;
2140
2141         }
2142
2143         if (ar->ar_force_sync && (xid >= ar->ar_min_xid))
2144                 ar->ar_force_sync = 0;
2145 }
2146
2147 void osc_oap_to_pending(struct osc_async_page *oap)
2148 {
2149         struct loi_oap_pages *lop;
2150
2151         if (oap->oap_cmd & OBD_BRW_WRITE)
2152                 lop = &oap->oap_loi->loi_write_lop;
2153         else
2154                 lop = &oap->oap_loi->loi_read_lop;
2155
2156         if (oap->oap_async_flags & ASYNC_HP)
2157                 cfs_list_add(&oap->oap_urgent_item, &lop->lop_urgent);
2158         else if (oap->oap_async_flags & ASYNC_URGENT)
2159                 cfs_list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent);
2160         cfs_list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
2161         lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, 1);
2162 }
2163
2164 /* this must be called holding the loi list lock to give coverage to exit_cache,
2165  * async_flag maintenance, and oap_request */
2166 static void osc_ap_completion(const struct lu_env *env,
2167                               struct client_obd *cli, struct obdo *oa,
2168                               struct osc_async_page *oap, int sent, int rc)
2169 {
2170         __u64 xid = 0;
2171
2172         ENTRY;
2173         if (oap->oap_request != NULL) {
2174                 xid = ptlrpc_req_xid(oap->oap_request);
2175                 ptlrpc_req_finished(oap->oap_request);
2176                 oap->oap_request = NULL;
2177         }
2178
2179         cfs_spin_lock(&oap->oap_lock);
2180         oap->oap_async_flags = 0;
2181         cfs_spin_unlock(&oap->oap_lock);
2182         oap->oap_interrupted = 0;
2183
2184         if (oap->oap_cmd & OBD_BRW_WRITE) {
2185                 osc_process_ar(&cli->cl_ar, xid, rc);
2186                 osc_process_ar(&oap->oap_loi->loi_ar, xid, rc);
2187         }
2188
2189         if (rc == 0 && oa != NULL) {
2190                 if (oa->o_valid & OBD_MD_FLBLOCKS)
2191                         oap->oap_loi->loi_lvb.lvb_blocks = oa->o_blocks;
2192                 if (oa->o_valid & OBD_MD_FLMTIME)
2193                         oap->oap_loi->loi_lvb.lvb_mtime = oa->o_mtime;
2194                 if (oa->o_valid & OBD_MD_FLATIME)
2195                         oap->oap_loi->loi_lvb.lvb_atime = oa->o_atime;
2196                 if (oa->o_valid & OBD_MD_FLCTIME)
2197                         oap->oap_loi->loi_lvb.lvb_ctime = oa->o_ctime;
2198         }
2199
2200         rc = oap->oap_caller_ops->ap_completion(env, oap->oap_caller_data,
2201                                                 oap->oap_cmd, oa, rc);
2202
2203         /* cl_page_completion() drops PG_locked. so, a new I/O on the page could
2204          * start, but OSC calls it under lock and thus we can add oap back to
2205          * pending safely */
2206         if (rc)
2207                 /* upper layer wants to leave the page on pending queue */
2208                 osc_oap_to_pending(oap);
2209         else
2210                 osc_exit_cache(cli, oap, sent);
2211         EXIT;
2212 }
2213
2214 static int brw_queue_work(const struct lu_env *env, void *data)
2215 {
2216         struct client_obd *cli = data;
2217
2218         CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
2219
2220         client_obd_list_lock(&cli->cl_loi_list_lock);
2221         osc_check_rpcs0(env, cli, 1);
2222         client_obd_list_unlock(&cli->cl_loi_list_lock);
2223         RETURN(0);
2224 }
2225
2226 static int brw_interpret(const struct lu_env *env,
2227                          struct ptlrpc_request *req, void *data, int rc)
2228 {
2229         struct osc_brw_async_args *aa = data;
2230         struct client_obd *cli;
2231         int async;
2232         ENTRY;
2233
2234         rc = osc_brw_fini_request(req, rc);
2235         CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
2236         if (osc_recoverable_error(rc)) {
2237                 rc = osc_brw_redo_request(req, aa);
2238                 if (rc == 0)
2239                         RETURN(0);
2240         }
2241
2242         if (aa->aa_ocapa) {
2243                 capa_put(aa->aa_ocapa);
2244                 aa->aa_ocapa = NULL;
2245         }
2246
2247         cli = aa->aa_cli;
2248         client_obd_list_lock(&cli->cl_loi_list_lock);
2249
2250         /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
2251          * is called so we know whether to go to sync BRWs or wait for more
2252          * RPCs to complete */
2253         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
2254                 cli->cl_w_in_flight--;
2255         else
2256                 cli->cl_r_in_flight--;
2257
2258         async = cfs_list_empty(&aa->aa_oaps);
2259         if (!async) { /* from osc_send_oap_rpc() */
2260                 struct osc_async_page *oap, *tmp;
2261                 /* the caller may re-use the oap after the completion call so
2262                  * we need to clean it up a little */
2263                 cfs_list_for_each_entry_safe(oap, tmp, &aa->aa_oaps,
2264                                              oap_rpc_item) {
2265                         cfs_list_del_init(&oap->oap_rpc_item);
2266                         osc_ap_completion(env, cli, aa->aa_oa, oap, 1, rc);
2267                 }
2268                 OBDO_FREE(aa->aa_oa);
2269         } else { /* from async_internal() */
2270                 obd_count i;
2271                 for (i = 0; i < aa->aa_page_count; i++)
2272                         osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1);
2273         }
2274         osc_wake_cache_waiters(cli);
2275         osc_check_rpcs0(env, cli, 1);
2276         client_obd_list_unlock(&cli->cl_loi_list_lock);
2277
2278         if (!async)
2279                 cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
2280                                   req->rq_bulk->bd_nob_transferred);
2281         osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
2282         ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
2283
2284         RETURN(rc);
2285 }
2286
2287 static struct ptlrpc_request *osc_build_req(const struct lu_env *env,
2288                                             struct client_obd *cli,
2289                                             cfs_list_t *rpc_list,
2290                                             int page_count, int cmd)
2291 {
2292         struct ptlrpc_request *req;
2293         struct brw_page **pga = NULL;
2294         struct osc_brw_async_args *aa;
2295         struct obdo *oa = NULL;
2296         const struct obd_async_page_ops *ops = NULL;
2297         struct osc_async_page *oap;
2298         struct osc_async_page *tmp;
2299         struct cl_req *clerq = NULL;
2300         enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
2301         struct ldlm_lock *lock = NULL;
2302         struct cl_req_attr crattr;
2303         int i, rc, mpflag = 0;
2304
2305         ENTRY;
2306         LASSERT(!cfs_list_empty(rpc_list));
2307
2308         if (cmd & OBD_BRW_MEMALLOC)
2309                 mpflag = cfs_memory_pressure_get_and_set();
2310
2311         memset(&crattr, 0, sizeof crattr);
2312         OBD_ALLOC(pga, sizeof(*pga) * page_count);
2313         if (pga == NULL)
2314                 GOTO(out, req = ERR_PTR(-ENOMEM));
2315
2316         OBDO_ALLOC(oa);
2317         if (oa == NULL)
2318                 GOTO(out, req = ERR_PTR(-ENOMEM));
2319
2320         i = 0;
2321         cfs_list_for_each_entry(oap, rpc_list, oap_rpc_item) {
2322                 struct cl_page *page = osc_oap2cl_page(oap);
2323                 if (ops == NULL) {
2324                         ops = oap->oap_caller_ops;
2325
2326                         clerq = cl_req_alloc(env, page, crt,
2327                                              1 /* only 1-object rpcs for
2328                                                 * now */);
2329                         if (IS_ERR(clerq))
2330                                 GOTO(out, req = (void *)clerq);
2331                         lock = oap->oap_ldlm_lock;
2332                 }
2333                 pga[i] = &oap->oap_brw_page;
2334                 pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
2335                 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
2336                        pga[i]->pg, cfs_page_index(oap->oap_page), oap, pga[i]->flag);
2337                 i++;
2338                 cl_req_page_add(env, clerq, page);
2339         }
2340
2341         /* always get the data for the obdo for the rpc */
2342         LASSERT(ops != NULL);
2343         crattr.cra_oa = oa;
2344         crattr.cra_capa = NULL;
2345         cl_req_attr_set(env, clerq, &crattr, ~0ULL);
2346         if (lock) {
2347                 oa->o_handle = lock->l_remote_handle;
2348                 oa->o_valid |= OBD_MD_FLHANDLE;
2349         }
2350
2351         rc = cl_req_prep(env, clerq);
2352         if (rc != 0) {
2353                 CERROR("cl_req_prep failed: %d\n", rc);
2354                 GOTO(out, req = ERR_PTR(rc));
2355         }
2356
2357         sort_brw_pages(pga, page_count);
2358         rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
2359                                   pga, &req, crattr.cra_capa, 1, 0);
2360         if (rc != 0) {
2361                 CERROR("prep_req failed: %d\n", rc);
2362                 GOTO(out, req = ERR_PTR(rc));
2363         }
2364
2365         if (cmd & OBD_BRW_MEMALLOC)
2366                 req->rq_memalloc = 1;
2367
2368         /* Need to update the timestamps after the request is built in case
2369          * we race with setattr (locally or in queue at OST).  If OST gets
2370          * later setattr before earlier BRW (as determined by the request xid),
2371          * the OST will not use BRW timestamps.  Sadly, there is no obvious
2372          * way to do this in a single call.  bug 10150 */
2373         cl_req_attr_set(env, clerq, &crattr,
2374                         OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME);
2375
2376         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2377         aa = ptlrpc_req_async_args(req);
2378         CFS_INIT_LIST_HEAD(&aa->aa_oaps);
2379         cfs_list_splice(rpc_list, &aa->aa_oaps);
2380         CFS_INIT_LIST_HEAD(rpc_list);
2381         aa->aa_clerq = clerq;
2382 out:
2383         if (cmd & OBD_BRW_MEMALLOC)
2384                 cfs_memory_pressure_restore(mpflag);
2385
2386         capa_put(crattr.cra_capa);
2387         if (IS_ERR(req)) {
2388                 if (oa)
2389                         OBDO_FREE(oa);
2390                 if (pga)
2391                         OBD_FREE(pga, sizeof(*pga) * page_count);
2392                 /* this should happen rarely and is pretty bad, it makes the
2393                  * pending list not follow the dirty order */
2394                 client_obd_list_lock(&cli->cl_loi_list_lock);
2395                 cfs_list_for_each_entry_safe(oap, tmp, rpc_list, oap_rpc_item) {
2396                         cfs_list_del_init(&oap->oap_rpc_item);
2397
2398                         /* queued sync pages can be torn down while the pages
2399                          * were between the pending list and the rpc */
2400                         if (oap->oap_interrupted) {
2401                                 CDEBUG(D_INODE, "oap %p interrupted\n", oap);
2402                                 osc_ap_completion(env, cli, NULL, oap, 0,
2403                                                   oap->oap_count);
2404                                 continue;
2405                         }
2406                         osc_ap_completion(env, cli, NULL, oap, 0, PTR_ERR(req));
2407                 }
2408                 if (clerq && !IS_ERR(clerq))
2409                         cl_req_completion(env, clerq, PTR_ERR(req));
2410         }
2411         RETURN(req);
2412 }
2413
2414 /**
2415  * prepare pages for ASYNC io and put pages in send queue.
2416  *
2417  * \param cmd OBD_BRW_* macroses
2418  * \param lop pending pages
2419  *
2420  * \return zero if no page added to send queue.
2421  * \return 1 if pages successfully added to send queue.
2422  * \return negative on errors.
2423  */
2424 static int
2425 osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
2426                  struct lov_oinfo *loi, int cmd,
2427                  struct loi_oap_pages *lop, pdl_policy_t pol)
2428 {
2429         struct ptlrpc_request *req;
2430         obd_count page_count = 0;
2431         struct osc_async_page *oap = NULL, *tmp;
2432         struct osc_brw_async_args *aa;
2433         const struct obd_async_page_ops *ops;
2434         CFS_LIST_HEAD(rpc_list);
2435         int srvlock = 0, mem_tight = 0;
2436         struct cl_object *clob = NULL;
2437         obd_off starting_offset = OBD_OBJECT_EOF;
2438         unsigned int ending_offset;
2439         int starting_page_off = 0;
2440         ENTRY;
2441
2442         /* ASYNC_HP pages first. At present, when the lock the pages is
2443          * to be canceled, the pages covered by the lock will be sent out
2444          * with ASYNC_HP. We have to send out them as soon as possible. */
2445         cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_urgent, oap_urgent_item) {
2446                 if (oap->oap_async_flags & ASYNC_HP)
2447                         cfs_list_move(&oap->oap_pending_item, &rpc_list);
2448                 else if (!(oap->oap_brw_flags & OBD_BRW_SYNC))
2449                         /* only do this for writeback pages. */
2450                         cfs_list_move_tail(&oap->oap_pending_item, &rpc_list);
2451                 if (++page_count >= cli->cl_max_pages_per_rpc)
2452                         break;
2453         }
2454         cfs_list_splice_init(&rpc_list, &lop->lop_pending);
2455         page_count = 0;
2456
2457         /* first we find the pages we're allowed to work with */
2458         cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_pending,
2459                                      oap_pending_item) {
2460                 ops = oap->oap_caller_ops;
2461
2462                 LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, "
2463                          "magic 0x%x\n", oap, oap->oap_magic);
2464
2465                 if (clob == NULL) {
2466                         /* pin object in memory, so that completion call-backs
2467                          * can be safely called under client_obd_list lock. */
2468                         clob = osc_oap2cl_page(oap)->cp_obj;
2469                         cl_object_get(clob);
2470                 }
2471
2472                 if (page_count != 0 &&
2473                     srvlock != !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK)) {
2474                         CDEBUG(D_PAGE, "SRVLOCK flag mismatch,"
2475                                " oap %p, page %p, srvlock %u\n",
2476                                oap, oap->oap_brw_page.pg, (unsigned)!srvlock);
2477                         break;
2478                 }
2479
2480                 /* If there is a gap at the start of this page, it can't merge
2481                  * with any previous page, so we'll hand the network a
2482                  * "fragmented" page array that it can't transfer in 1 RDMA */
2483                 if (oap->oap_obj_off < starting_offset) {
2484                         if (starting_page_off != 0)
2485                                 break;
2486
2487                         starting_page_off = oap->oap_page_off;
2488                         starting_offset = oap->oap_obj_off + starting_page_off;
2489                 } else if (oap->oap_page_off != 0)
2490                         break;
2491
2492                 /* in llite being 'ready' equates to the page being locked
2493                  * until completion unlocks it.  commit_write submits a page
2494                  * as not ready because its unlock will happen unconditionally
2495                  * as the call returns.  if we race with commit_write giving
2496                  * us that page we don't want to create a hole in the page
2497                  * stream, so we stop and leave the rpc to be fired by
2498                  * another dirtier or kupdated interval (the not ready page
2499                  * will still be on the dirty list).  we could call in
2500                  * at the end of ll_file_write to process the queue again. */
2501                 if (!(oap->oap_async_flags & ASYNC_READY)) {
2502                         int rc = ops->ap_make_ready(env, oap->oap_caller_data,
2503                                                     cmd);
2504                         if (rc < 0)
2505                                 CDEBUG(D_INODE, "oap %p page %p returned %d "
2506                                                 "instead of ready\n", oap,
2507                                                 oap->oap_page, rc);
2508                         switch (rc) {
2509                         case -EAGAIN:
2510                                 /* llite is telling us that the page is still
2511                                  * in commit_write and that we should try
2512                                  * and put it in an rpc again later.  we
2513                                  * break out of the loop so we don't create
2514                                  * a hole in the sequence of pages in the rpc
2515                                  * stream.*/
2516                                 oap = NULL;
2517                                 break;
2518                         case -EINTR:
2519                                 /* the io isn't needed.. tell the checks
2520                                  * below to complete the rpc with EINTR */
2521                                 cfs_spin_lock(&oap->oap_lock);
2522                                 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
2523                                 cfs_spin_unlock(&oap->oap_lock);
2524                                 oap->oap_count = -EINTR;
2525                                 break;
2526                         case 0:
2527                                 cfs_spin_lock(&oap->oap_lock);
2528                                 oap->oap_async_flags |= ASYNC_READY;
2529                                 cfs_spin_unlock(&oap->oap_lock);
2530                                 break;
2531                         default:
2532                                 LASSERTF(0, "oap %p page %p returned %d "
2533                                             "from make_ready\n", oap,
2534                                             oap->oap_page, rc);
2535                                 break;
2536                         }
2537                 }
2538                 if (oap == NULL)
2539                         break;
2540
2541                 /* take the page out of our book-keeping */
2542                 cfs_list_del_init(&oap->oap_pending_item);
2543                 lop_update_pending(cli, lop, cmd, -1);
2544                 cfs_list_del_init(&oap->oap_urgent_item);
2545
2546                 /* ask the caller for the size of the io as the rpc leaves. */
2547                 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
2548                         oap->oap_count =
2549                                 ops->ap_refresh_count(env, oap->oap_caller_data,
2550                                                       cmd);
2551                         LASSERT(oap->oap_page_off + oap->oap_count <= CFS_PAGE_SIZE);
2552                 }
2553                 if (oap->oap_count <= 0) {
2554                         CDEBUG(D_CACHE, "oap %p count %d, completing\n", oap,
2555                                oap->oap_count);
2556                         osc_ap_completion(env, cli, NULL,
2557                                           oap, 0, oap->oap_count);
2558                         continue;
2559                 }
2560
2561                 /* now put the page back in our accounting */
2562                 cfs_list_add_tail(&oap->oap_rpc_item, &rpc_list);
2563                 if (page_count++ == 0)
2564                         srvlock = !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK);
2565
2566                 if (oap->oap_brw_flags & OBD_BRW_MEMALLOC)
2567                         mem_tight = 1;
2568
2569                 /* End on a PTLRPC_MAX_BRW_SIZE boundary.  We want full-sized
2570                  * RPCs aligned on PTLRPC_MAX_BRW_SIZE boundaries to help reads
2571                  * have the same alignment as the initial writes that allocated
2572                  * extents on the server. */
2573                 ending_offset = oap->oap_obj_off + oap->oap_page_off +
2574                                 oap->oap_count;
2575                 if (!(ending_offset & (PTLRPC_MAX_BRW_SIZE - 1)))
2576                         break;
2577
2578                 if (page_count >= cli->cl_max_pages_per_rpc)
2579                         break;
2580
2581                 /* If there is a gap at the end of this page, it can't merge
2582                  * with any subsequent pages, so we'll hand the network a
2583                  * "fragmented" page array that it can't transfer in 1 RDMA */
2584                 if (oap->oap_page_off + oap->oap_count < CFS_PAGE_SIZE)
2585                         break;
2586         }
2587
2588         loi_list_maint(cli, loi);
2589
2590         client_obd_list_unlock(&cli->cl_loi_list_lock);
2591
2592         if (clob != NULL)
2593                 cl_object_put(env, clob);
2594
2595         if (page_count == 0) {
2596                 client_obd_list_lock(&cli->cl_loi_list_lock);
2597                 RETURN(0);
2598         }
2599
2600         req = osc_build_req(env, cli, &rpc_list, page_count,
2601                             mem_tight ? (cmd | OBD_BRW_MEMALLOC) : cmd);
2602         if (IS_ERR(req)) {
2603                 LASSERT(cfs_list_empty(&rpc_list));
2604                 loi_list_maint(cli, loi);
2605                 RETURN(PTR_ERR(req));
2606         }
2607
2608         aa = ptlrpc_req_async_args(req);
2609
2610         starting_offset &= PTLRPC_MAX_BRW_SIZE - 1;
2611         if (cmd == OBD_BRW_READ) {
2612                 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2613                 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2614                 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2615                                       (starting_offset >> CFS_PAGE_SHIFT) + 1);
2616         } else {
2617                 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2618                 lprocfs_oh_tally(&cli->cl_write_rpc_hist,
2619                                  cli->cl_w_in_flight);
2620                 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2621                                       (starting_offset >> CFS_PAGE_SHIFT) + 1);
2622         }
2623
2624         client_obd_list_lock(&cli->cl_loi_list_lock);
2625
2626         if (cmd == OBD_BRW_READ)
2627                 cli->cl_r_in_flight++;
2628         else
2629                 cli->cl_w_in_flight++;
2630
2631         /* queued sync pages can be torn down while the pages
2632          * were between the pending list and the rpc */
2633         tmp = NULL;
2634         cfs_list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2635                 /* only one oap gets a request reference */
2636                 if (tmp == NULL)
2637                         tmp = oap;
2638                 if (oap->oap_interrupted && !req->rq_intr) {
2639                         CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
2640                                oap, req);
2641                         ptlrpc_mark_interrupted(req);
2642                 }
2643         }
2644         if (tmp != NULL)
2645                 tmp->oap_request = ptlrpc_request_addref(req);
2646
2647         DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
2648                   page_count, aa, cli->cl_r_in_flight, cli->cl_w_in_flight);
2649
2650         req->rq_interpret_reply = brw_interpret;
2651
2652         /* XXX: Maybe the caller can check the RPC bulk descriptor to see which
2653          *      CPU/NUMA node the majority of pages were allocated on, and try
2654          *      to assign the async RPC to the CPU core (PDL_POLICY_PREFERRED)
2655          *      to reduce cross-CPU memory traffic.
2656          *
2657          *      But on the other hand, we expect that multiple ptlrpcd threads
2658          *      and the initial write sponsor can run in parallel, especially
2659          *      when data checksum is enabled, which is CPU-bound operation and
2660          *      single ptlrpcd thread cannot process in time. So more ptlrpcd
2661          *      threads sharing BRW load (with PDL_POLICY_ROUND) seems better.
2662          */
2663         ptlrpcd_add_req(req, pol, -1);
2664         RETURN(1);
2665 }
2666
2667 #define LOI_DEBUG(LOI, STR, args...)                                     \
2668         CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR,           \
2669                !cfs_list_empty(&(LOI)->loi_ready_item) ||                \
2670                !cfs_list_empty(&(LOI)->loi_hp_ready_item),               \
2671                (LOI)->loi_write_lop.lop_num_pending,                     \
2672                !cfs_list_empty(&(LOI)->loi_write_lop.lop_urgent),        \
2673                (LOI)->loi_read_lop.lop_num_pending,                      \
2674                !cfs_list_empty(&(LOI)->loi_read_lop.lop_urgent),         \
2675                args)                                                     \
2676
2677 /* This is called by osc_check_rpcs() to find which objects have pages that
2678  * we could be sending.  These lists are maintained by lop_makes_rpc(). */
2679 struct lov_oinfo *osc_next_loi(struct client_obd *cli)
2680 {
2681         ENTRY;
2682
2683         /* First return objects that have blocked locks so that they
2684          * will be flushed quickly and other clients can get the lock,
2685          * then objects which have pages ready to be stuffed into RPCs */
2686         if (!cfs_list_empty(&cli->cl_loi_hp_ready_list))
2687                 RETURN(cfs_list_entry(cli->cl_loi_hp_ready_list.next,
2688                                       struct lov_oinfo, loi_hp_ready_item));
2689         if (!cfs_list_empty(&cli->cl_loi_ready_list))
2690                 RETURN(cfs_list_entry(cli->cl_loi_ready_list.next,
2691                                       struct lov_oinfo, loi_ready_item));
2692
2693         /* then if we have cache waiters, return all objects with queued
2694          * writes.  This is especially important when many small files
2695          * have filled up the cache and not been fired into rpcs because
2696          * they don't pass the nr_pending/object threshhold */
2697         if (!cfs_list_empty(&cli->cl_cache_waiters) &&
2698             !cfs_list_empty(&cli->cl_loi_write_list))
2699                 RETURN(cfs_list_entry(cli->cl_loi_write_list.next,
2700                                       struct lov_oinfo, loi_write_item));
2701
2702         /* then return all queued objects when we have an invalid import
2703          * so that they get flushed */
2704         if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
2705                 if (!cfs_list_empty(&cli->cl_loi_write_list))
2706                         RETURN(cfs_list_entry(cli->cl_loi_write_list.next,
2707                                               struct lov_oinfo,
2708                                               loi_write_item));
2709                 if (!cfs_list_empty(&cli->cl_loi_read_list))
2710                         RETURN(cfs_list_entry(cli->cl_loi_read_list.next,
2711                                               struct lov_oinfo, loi_read_item));
2712         }
2713         RETURN(NULL);
2714 }
2715
2716 static int osc_max_rpc_in_flight(struct client_obd *cli, struct lov_oinfo *loi)
2717 {
2718         struct osc_async_page *oap;
2719         int hprpc = 0;
2720
2721         if (!cfs_list_empty(&loi->loi_write_lop.lop_urgent)) {
2722                 oap = cfs_list_entry(loi->loi_write_lop.lop_urgent.next,
2723                                      struct osc_async_page, oap_urgent_item);
2724                 hprpc = !!(oap->oap_async_flags & ASYNC_HP);
2725         }
2726
2727         if (!hprpc && !cfs_list_empty(&loi->loi_read_lop.lop_urgent)) {
2728                 oap = cfs_list_entry(loi->loi_read_lop.lop_urgent.next,
2729                                      struct osc_async_page, oap_urgent_item);
2730                 hprpc = !!(oap->oap_async_flags & ASYNC_HP);
2731         }
2732
2733         return rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight + hprpc;
2734 }
2735
2736 /* called with the loi list lock held */
2737 static void osc_check_rpcs0(const struct lu_env *env, struct client_obd *cli, int ptlrpc)
2738 {
2739         struct lov_oinfo *loi;
2740         int rc = 0, race_counter = 0;
2741         pdl_policy_t pol;
2742         ENTRY;
2743
2744         pol = ptlrpc ? PDL_POLICY_SAME : PDL_POLICY_ROUND;
2745
2746         while ((loi = osc_next_loi(cli)) != NULL) {
2747                 LOI_DEBUG(loi, "%lu in flight\n", rpcs_in_flight(cli));
2748
2749                 if (osc_max_rpc_in_flight(cli, loi))
2750                         break;
2751
2752                 /* attempt some read/write balancing by alternating between
2753                  * reads and writes in an object.  The makes_rpc checks here
2754                  * would be redundant if we were getting read/write work items
2755                  * instead of objects.  we don't want send_oap_rpc to drain a
2756                  * partial read pending queue when we're given this object to
2757                  * do io on writes while there are cache waiters */
2758                 if (lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
2759                         rc = osc_send_oap_rpc(env, cli, loi, OBD_BRW_WRITE,
2760                                               &loi->loi_write_lop, pol);
2761                         if (rc < 0) {
2762                                 CERROR("Write request failed with %d\n", rc);
2763
2764                                 /* osc_send_oap_rpc failed, mostly because of
2765                                  * memory pressure.
2766                                  *
2767                                  * It can't break here, because if:
2768                                  *  - a page was submitted by osc_io_submit, so
2769                                  *    page locked;
2770                                  *  - no request in flight
2771                                  *  - no subsequent request
2772                                  * The system will be in live-lock state,
2773                                  * because there is no chance to call
2774                                  * osc_io_unplug() and osc_check_rpcs() any
2775                                  * more. pdflush can't help in this case,
2776                                  * because it might be blocked at grabbing
2777                                  * the page lock as we mentioned.
2778                                  *
2779                                  * Anyway, continue to drain pages. */
2780                                 /* break; */
2781                         }
2782
2783                         if (rc > 0)
2784                                 race_counter = 0;
2785                         else if (rc == 0)
2786                                 race_counter++;
2787                 }
2788                 if (lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)) {
2789                         rc = osc_send_oap_rpc(env, cli, loi, OBD_BRW_READ,
2790                                               &loi->loi_read_lop, pol);
2791                         if (rc < 0)
2792                                 CERROR("Read request failed with %d\n", rc);
2793
2794                         if (rc > 0)
2795                                 race_counter = 0;
2796                         else if (rc == 0)
2797                                 race_counter++;
2798                 }
2799
2800                 /* attempt some inter-object balancing by issuing rpcs
2801                  * for each object in turn */
2802                 if (!cfs_list_empty(&loi->loi_hp_ready_item))
2803                         cfs_list_del_init(&loi->loi_hp_ready_item);
2804                 if (!cfs_list_empty(&loi->loi_ready_item))
2805                         cfs_list_del_init(&loi->loi_ready_item);
2806                 if (!cfs_list_empty(&loi->loi_write_item))
2807                         cfs_list_del_init(&loi->loi_write_item);
2808                 if (!cfs_list_empty(&loi->loi_read_item))
2809                         cfs_list_del_init(&loi->loi_read_item);
2810
2811                 loi_list_maint(cli, loi);
2812
2813                 /* send_oap_rpc fails with 0 when make_ready tells it to
2814                  * back off.  llite's make_ready does this when it tries
2815                  * to lock a page queued for write that is already locked.
2816                  * we want to try sending rpcs from many objects, but we
2817                  * don't want to spin failing with 0.  */
2818                 if (race_counter == 10)
2819                         break;
2820         }
2821 }
2822
2823 void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
2824 {
2825         osc_check_rpcs0(env, cli, 0);
2826 }
2827
2828 /**
2829  * Non-blocking version of osc_enter_cache() that consumes grant only when it
2830  * is available.
2831  */
2832 int osc_enter_cache_try(const struct lu_env *env,
2833                         struct client_obd *cli, struct lov_oinfo *loi,
2834                         struct osc_async_page *oap, int transient)
2835 {
2836         int has_grant;
2837
2838         has_grant = cli->cl_avail_grant >= CFS_PAGE_SIZE;
2839         if (has_grant) {
2840                 osc_consume_write_grant(cli, &oap->oap_brw_page);
2841                 if (transient) {
2842                         cli->cl_dirty_transit += CFS_PAGE_SIZE;
2843                         cfs_atomic_inc(&obd_dirty_transit_pages);
2844                         oap->oap_brw_flags |= OBD_BRW_NOCACHE;
2845                 }
2846         }
2847         return has_grant;
2848 }
2849
2850 /* Caller must hold loi_list_lock - we drop/regain it if we need to wait for
2851  * grant or cache space. */
2852 static int osc_enter_cache(const struct lu_env *env,
2853                            struct client_obd *cli, struct lov_oinfo *loi,
2854                            struct osc_async_page *oap)
2855 {
2856         struct osc_cache_waiter ocw;
2857         struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
2858         int rc = -EDQUOT;
2859         ENTRY;
2860
2861         CDEBUG(D_CACHE, "dirty: %ld/%d dirty_max: %ld/%d dropped: %lu "
2862                "grant: %lu\n", cli->cl_dirty, cfs_atomic_read(&obd_dirty_pages),
2863                cli->cl_dirty_max, obd_max_dirty_pages,
2864                cli->cl_lost_grant, cli->cl_avail_grant);
2865
2866         /* force the caller to try sync io.  this can jump the list
2867          * of queued writes and create a discontiguous rpc stream */
2868         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
2869             cli->cl_dirty_max < CFS_PAGE_SIZE     ||
2870             cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync)
2871                 RETURN(-EDQUOT);
2872
2873         /* Hopefully normal case - cache space and write credits available */
2874         if (cli->cl_dirty + CFS_PAGE_SIZE <= cli->cl_dirty_max &&
2875             cfs_atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages &&
2876             osc_enter_cache_try(env, cli, loi, oap, 0))
2877                 RETURN(0);
2878
2879         /* We can get here for two reasons: too many dirty pages in cache, or
2880          * run out of grants. In both cases we should write dirty pages out.
2881          * Adding a cache waiter will trigger urgent write-out no matter what
2882          * RPC size will be.
2883          * The exiting condition is no avail grants and no dirty pages caching,
2884          * that really means there is no space on the OST. */
2885         cfs_waitq_init(&ocw.ocw_waitq);
2886         ocw.ocw_oap = oap;
2887         while (cli->cl_dirty > 0) {
2888                 cfs_list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
2889                 ocw.ocw_rc = 0;
2890
2891                 loi_list_maint(cli, loi);
2892                 osc_check_rpcs(env, cli);
2893                 client_obd_list_unlock(&cli->cl_loi_list_lock);
2894
2895                 CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n",
2896                        cli->cl_import->imp_obd->obd_name, &ocw, oap);
2897
2898                 rc = l_wait_event(ocw.ocw_waitq, cfs_list_empty(&ocw.ocw_entry), &lwi);
2899
2900                 client_obd_list_lock(&cli->cl_loi_list_lock);
2901                 cfs_list_del_init(&ocw.ocw_entry);
2902                 if (rc < 0)
2903                         break;
2904
2905                 rc = ocw.ocw_rc;
2906                 if (rc != -EDQUOT)
2907                         break;
2908         }
2909
2910         RETURN(rc);
2911 }
2912
2913
2914 int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
2915                         struct lov_oinfo *loi, cfs_page_t *page,
2916                         obd_off offset, const struct obd_async_page_ops *ops,
2917                         void *data, void **res, int nocache,
2918                         struct lustre_handle *lockh)
2919 {
2920         struct osc_async_page *oap;
2921
2922         ENTRY;
2923
2924         if (!page)
2925                 return cfs_size_round(sizeof(*oap));
2926
2927         oap = *res;
2928         oap->oap_magic = OAP_MAGIC;
2929         oap->oap_cli = &exp->exp_obd->u.cli;
2930         oap->oap_loi = loi;
2931
2932         oap->oap_caller_ops = ops;
2933         oap->oap_caller_data = data;
2934
2935         oap->oap_page = page;
2936         oap->oap_obj_off = offset;
2937         if (!client_is_remote(exp) &&
2938             cfs_capable(CFS_CAP_SYS_RESOURCE))
2939                 oap->oap_brw_flags = OBD_BRW_NOQUOTA;
2940
2941         LASSERT(!(offset & ~CFS_PAGE_MASK));
2942
2943         CFS_INIT_LIST_HEAD(&oap->oap_pending_item);
2944         CFS_INIT_LIST_HEAD(&oap->oap_urgent_item);
2945         CFS_INIT_LIST_HEAD(&oap->oap_rpc_item);
2946         CFS_INIT_LIST_HEAD(&oap->oap_page_list);
2947
2948         cfs_spin_lock_init(&oap->oap_lock);
2949         CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
2950         RETURN(0);
2951 }
2952
2953 int osc_queue_async_io(const struct lu_env *env, struct obd_export *exp,
2954                        struct lov_stripe_md *lsm, struct lov_oinfo *loi,
2955                        struct osc_async_page *oap, int cmd, int off,
2956                        int count, obd_flag brw_flags, enum async_flags async_flags)
2957 {
2958         struct client_obd *cli = &exp->exp_obd->u.cli;
2959         int rc = 0;
2960         ENTRY;
2961
2962         if (oap->oap_magic != OAP_MAGIC)
2963                 RETURN(-EINVAL);
2964
2965         if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
2966                 RETURN(-EIO);
2967
2968         if (!cfs_list_empty(&oap->oap_pending_item) ||
2969             !cfs_list_empty(&oap->oap_urgent_item) ||
2970             !cfs_list_empty(&oap->oap_rpc_item))
2971                 RETURN(-EBUSY);
2972
2973         /* check if the file's owner/group is over quota */
2974         if ((cmd & OBD_BRW_WRITE) && !(cmd & OBD_BRW_NOQUOTA)) {
2975                 struct cl_object *obj;
2976                 struct cl_attr    attr; /* XXX put attr into thread info */
2977                 unsigned int qid[MAXQUOTAS];
2978
2979                 obj = cl_object_top(osc_oap2cl_page(oap)->cp_obj);
2980
2981                 cl_object_attr_lock(obj);
2982                 rc = cl_object_attr_get(env, obj, &attr);
2983                 cl_object_attr_unlock(obj);
2984
2985                 qid[USRQUOTA] = attr.cat_uid;
2986                 qid[GRPQUOTA] = attr.cat_gid;
2987                 if (rc == 0 &&
2988                     osc_quota_chkdq(cli, qid) == NO_QUOTA)
2989                         rc = -EDQUOT;
2990                 if (rc)
2991                         RETURN(rc);
2992         }
2993
2994         if (loi == NULL)
2995                 loi = lsm->lsm_oinfo[0];
2996
2997         client_obd_list_lock(&cli->cl_loi_list_lock);
2998
2999         LASSERT(off + count <= CFS_PAGE_SIZE);
3000         oap->oap_cmd = cmd;
3001         oap->oap_page_off = off;
3002         oap->oap_count = count;
3003         oap->oap_brw_flags = brw_flags;
3004         /* Give a hint to OST that requests are coming from kswapd - bug19529 */
3005         if (cfs_memory_pressure_get())
3006                 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
3007         cfs_spin_lock(&oap->oap_lock);
3008         oap->oap_async_flags = async_flags;
3009         cfs_spin_unlock(&oap->oap_lock);
3010
3011         if (cmd & OBD_BRW_WRITE) {
3012                 rc = osc_enter_cache(env, cli, loi, oap);
3013                 if (rc) {
3014                         client_obd_list_unlock(&cli->cl_loi_list_lock);
3015                         RETURN(rc);
3016                 }
3017         }
3018
3019         LOI_DEBUG(loi, "oap %p page %p added for cmd %d\n", oap, oap->oap_page,
3020                   cmd);
3021
3022         osc_oap_to_pending(oap);
3023         loi_list_maint(cli, loi);
3024         if (!osc_max_rpc_in_flight(cli, loi) &&
3025             lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
3026                 LASSERT(cli->cl_writeback_work != NULL);
3027                 rc = ptlrpcd_queue_work(cli->cl_writeback_work);
3028
3029                 CDEBUG(D_CACHE, "Queued writeback work for client obd %p/%d.\n",
3030                        cli, rc);
3031         }
3032         client_obd_list_unlock(&cli->cl_loi_list_lock);
3033
3034         RETURN(0);
3035 }
3036
3037 /* aka (~was & now & flag), but this is more clear :) */
3038 #define SETTING(was, now, flag) (!(was & flag) && (now & flag))
3039
3040 int osc_set_async_flags_base(struct client_obd *cli,
3041                              struct lov_oinfo *loi, struct osc_async_page *oap,
3042                              obd_flag async_flags)
3043 {
3044         struct loi_oap_pages *lop;
3045         int flags = 0;
3046         ENTRY;
3047
3048         LASSERT(!cfs_list_empty(&oap->oap_pending_item));
3049
3050         if (oap->oap_cmd & OBD_BRW_WRITE) {
3051                 lop = &loi->loi_write_lop;
3052         } else {
3053                 lop = &loi->loi_read_lop;
3054         }
3055
3056         if ((oap->oap_async_flags & async_flags) == async_flags)
3057                 RETURN(0);
3058
3059         if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY))
3060                 flags |= ASYNC_READY;
3061
3062         if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT) &&
3063             cfs_list_empty(&oap->oap_rpc_item)) {
3064                 if (oap->oap_async_flags & ASYNC_HP)
3065                         cfs_list_add(&oap->oap_urgent_item, &lop->lop_urgent);
3066                 else
3067                         cfs_list_add_tail(&oap->oap_urgent_item,
3068                                           &lop->lop_urgent);
3069                 flags |= ASYNC_URGENT;
3070                 loi_list_maint(cli, loi);
3071         }
3072         cfs_spin_lock(&oap->oap_lock);
3073         oap->oap_async_flags |= flags;
3074         cfs_spin_unlock(&oap->oap_lock);
3075
3076         LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page,
3077                         oap->oap_async_flags);
3078         RETURN(0);
3079 }
3080
3081 int osc_teardown_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
3082                             struct lov_oinfo *loi, struct osc_async_page *oap)
3083 {
3084         struct client_obd *cli = &exp->exp_obd->u.cli;
3085         struct loi_oap_pages *lop;
3086         int rc = 0;
3087         ENTRY;
3088
3089         if (oap->oap_magic != OAP_MAGIC)
3090                 RETURN(-EINVAL);
3091
3092         if (loi == NULL)
3093                 loi = lsm->lsm_oinfo[0];
3094
3095         if (oap->oap_cmd & OBD_BRW_WRITE) {
3096                 lop = &loi->loi_write_lop;
3097         } else {
3098                 lop = &loi->loi_read_lop;
3099         }
3100
3101         client_obd_list_lock(&cli->cl_loi_list_lock);
3102
3103         if (!cfs_list_empty(&oap->oap_rpc_item))
3104                 GOTO(out, rc = -EBUSY);
3105
3106         osc_exit_cache(cli, oap, 0);
3107         osc_wake_cache_waiters(cli);
3108
3109         if (!cfs_list_empty(&oap->oap_urgent_item)) {
3110                 cfs_list_del_init(&oap->oap_urgent_item);
3111                 cfs_spin_lock(&oap->oap_lock);
3112                 oap->oap_async_flags &= ~(ASYNC_URGENT | ASYNC_HP);
3113                 cfs_spin_unlock(&oap->oap_lock);
3114         }
3115         if (!cfs_list_empty(&oap->oap_pending_item)) {
3116                 cfs_list_del_init(&oap->oap_pending_item);
3117                 lop_update_pending(cli, lop, oap->oap_cmd, -1);
3118         }
3119         loi_list_maint(cli, loi);
3120         LOI_DEBUG(loi, "oap %p page %p torn down\n", oap, oap->oap_page);
3121 out:
3122         client_obd_list_unlock(&cli->cl_loi_list_lock);
3123         RETURN(rc);
3124 }
3125
3126 static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
3127                                         struct ldlm_enqueue_info *einfo)
3128 {
3129         void *data = einfo->ei_cbdata;
3130         int set = 0;
3131
3132         LASSERT(lock != NULL);
3133         LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
3134         LASSERT(lock->l_resource->lr_type == einfo->ei_type);
3135         LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
3136         LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
3137
3138         lock_res_and_lock(lock);
3139         cfs_spin_lock(&osc_ast_guard);
3140
3141         if (lock->l_ast_data == NULL)
3142                 lock->l_ast_data = data;
3143         if (lock->l_ast_data == data)
3144                 set = 1;
3145
3146         cfs_spin_unlock(&osc_ast_guard);
3147         unlock_res_and_lock(lock);
3148
3149         return set;
3150 }
3151
3152 static int osc_set_data_with_check(struct lustre_handle *lockh,
3153                                    struct ldlm_enqueue_info *einfo)
3154 {
3155         struct ldlm_lock *lock = ldlm_handle2lock(lockh);
3156         int set = 0;
3157
3158         if (lock != NULL) {
3159                 set = osc_set_lock_data_with_check(lock, einfo);
3160                 LDLM_LOCK_PUT(lock);
3161         } else
3162                 CERROR("lockh %p, data %p - client evicted?\n",
3163                        lockh, einfo->ei_cbdata);
3164         return set;
3165 }
3166
3167 static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
3168                              ldlm_iterator_t replace, void *data)
3169 {
3170         struct ldlm_res_id res_id;
3171         struct obd_device *obd = class_exp2obd(exp);
3172
3173         osc_build_res_name(lsm->lsm_object_id, lsm->lsm_object_seq, &res_id);
3174         ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
3175         return 0;
3176 }
3177
3178 /* find any ldlm lock of the inode in osc
3179  * return 0    not find
3180  *        1    find one
3181  *      < 0    error */
3182 static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
3183                            ldlm_iterator_t replace, void *data)
3184 {
3185         struct ldlm_res_id res_id;
3186         struct obd_device *obd = class_exp2obd(exp);
3187         int rc = 0;
3188
3189         osc_build_res_name(lsm->lsm_object_id, lsm->lsm_object_seq, &res_id);
3190         rc = ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
3191         if (rc == LDLM_ITER_STOP)
3192                 return(1);
3193         if (rc == LDLM_ITER_CONTINUE)
3194                 return(0);
3195         return(rc);
3196 }
3197
3198 static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
3199                             obd_enqueue_update_f upcall, void *cookie,
3200                             int *flags, int agl, int rc)
3201 {
3202         int intent = *flags & LDLM_FL_HAS_INTENT;
3203         ENTRY;
3204
3205         if (intent) {
3206                 /* The request was created before ldlm_cli_enqueue call. */
3207                 if (rc == ELDLM_LOCK_ABORTED) {
3208                         struct ldlm_reply *rep;
3209                         rep = req_capsule_server_get(&req->rq_pill,
3210                                                      &RMF_DLM_REP);
3211
3212                         LASSERT(rep != NULL);
3213                         if (rep->lock_policy_res1)
3214                                 rc = rep->lock_policy_res1;
3215                 }
3216         }
3217
3218         if ((intent != 0 && rc == ELDLM_LOCK_ABORTED && agl == 0) ||
3219             (rc == 0)) {
3220                 *flags |= LDLM_FL_LVB_READY;
3221                 CDEBUG(D_INODE,"got kms "LPU64" blocks "LPU64" mtime "LPU64"\n",
3222                        lvb->lvb_size, lvb->lvb_blocks, lvb->lvb_mtime);
3223         }
3224
3225         /* Call the update callback. */
3226         rc = (*upcall)(cookie, rc);
3227         RETURN(rc);
3228 }
3229
3230 static int osc_enqueue_interpret(const struct lu_env *env,
3231                                  struct ptlrpc_request *req,
3232                                  struct osc_enqueue_args *aa, int rc)
3233 {
3234         struct ldlm_lock *lock;
3235         struct lustre_handle handle;
3236         __u32 mode;
3237         struct ost_lvb *lvb;
3238         __u32 lvb_len;
3239         int *flags = aa->oa_flags;
3240
3241         /* Make a local copy of a lock handle and a mode, because aa->oa_*
3242          * might be freed anytime after lock upcall has been called. */
3243         lustre_handle_copy(&handle, aa->oa_lockh);
3244         mode = aa->oa_ei->ei_mode;
3245
3246         /* ldlm_cli_enqueue is holding a reference on the lock, so it must
3247          * be valid. */
3248         lock = ldlm_handle2lock(&handle);
3249
3250         /* Take an additional reference so that a blocking AST that
3251          * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
3252          * to arrive after an upcall has been executed by
3253          * osc_enqueue_fini(). */
3254         ldlm_lock_addref(&handle, mode);
3255
3256         /* Let CP AST to grant the lock first. */
3257         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
3258
3259         if (aa->oa_agl && rc == ELDLM_LOCK_ABORTED) {
3260                 lvb = NULL;
3261                 lvb_len = 0;
3262         } else {
3263                 lvb = aa->oa_lvb;
3264                 lvb_len = sizeof(*aa->oa_lvb);
3265         }
3266
3267         /* Complete obtaining the lock procedure. */
3268         rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
3269                                    mode, flags, lvb, lvb_len, &handle, rc);
3270         /* Complete osc stuff. */
3271         rc = osc_enqueue_fini(req, aa->oa_lvb, aa->oa_upcall, aa->oa_cookie,
3272                               flags, aa->oa_agl, rc);
3273
3274         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
3275
3276         /* Release the lock for async request. */
3277         if (lustre_handle_is_used(&handle) && rc == ELDLM_OK)
3278                 /*
3279                  * Releases a reference taken by ldlm_cli_enqueue(), if it is
3280                  * not already released by
3281                  * ldlm_cli_enqueue_fini()->failed_lock_cleanup()
3282                  */
3283                 ldlm_lock_decref(&handle, mode);
3284
3285         LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n",
3286                  aa->oa_lockh, req, aa);
3287         ldlm_lock_decref(&handle, mode);
3288         LDLM_LOCK_PUT(lock);
3289         return rc;
3290 }
3291
3292 void osc_update_enqueue(struct lustre_handle *lov_lockhp,
3293                         struct lov_oinfo *loi, int flags,
3294                         struct ost_lvb *lvb, __u32 mode, int rc)
3295 {
3296         struct ldlm_lock *lock = ldlm_handle2lock(lov_lockhp);
3297
3298         if (rc == ELDLM_OK) {
3299                 __u64 tmp;
3300
3301                 LASSERT(lock != NULL);
3302                 loi->loi_lvb = *lvb;
3303                 tmp = loi->loi_lvb.lvb_size;
3304                 /* Extend KMS up to the end of this lock and no further
3305                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
3306                 if (tmp > lock->l_policy_data.l_extent.end)
3307                         tmp = lock->l_policy_data.l_extent.end + 1;
3308                 if (tmp >= loi->loi_kms) {
3309                         LDLM_DEBUG(lock, "lock acquired, setting rss="LPU64
3310                                    ", kms="LPU64, loi->loi_lvb.lvb_size, tmp);
3311                         loi_kms_set(loi, tmp);
3312                 } else {
3313                         LDLM_DEBUG(lock, "lock acquired, setting rss="
3314                                    LPU64"; leaving kms="LPU64", end="LPU64,
3315                                    loi->loi_lvb.lvb_size, loi->loi_kms,
3316                                    lock->l_policy_data.l_extent.end);
3317                 }
3318                 ldlm_lock_allow_match(lock);
3319         } else if (rc == ELDLM_LOCK_ABORTED && (flags & LDLM_FL_HAS_INTENT)) {
3320                 LASSERT(lock != NULL);
3321                 loi->loi_lvb = *lvb;
3322                 ldlm_lock_allow_match(lock);
3323                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
3324                        " kms="LPU64"\n", loi->loi_lvb.lvb_size, loi->loi_kms);
3325                 rc = ELDLM_OK;
3326         }
3327
3328         if (lock != NULL) {
3329                 if (rc != ELDLM_OK)
3330                         ldlm_lock_fail_match(lock);
3331
3332                 LDLM_LOCK_PUT(lock);
3333         }
3334 }
3335 EXPORT_SYMBOL(osc_update_enqueue);
3336
3337 struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
3338
3339 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
3340  * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
3341  * other synchronous requests, however keeping some locks and trying to obtain
3342  * others may take a considerable amount of time in a case of ost failure; and
3343  * when other sync requests do not get released lock from a client, the client
3344  * is excluded from the cluster -- such scenarious make the life difficult, so
3345  * release locks just after they are obtained. */
3346 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
3347                      int *flags, ldlm_policy_data_t *policy,
3348                      struct ost_lvb *lvb, int kms_valid,
3349                      obd_enqueue_update_f upcall, void *cookie,
3350                      struct ldlm_enqueue_info *einfo,
3351                      struct lustre_handle *lockh,
3352                      struct ptlrpc_request_set *rqset, int async, int agl)
3353 {
3354         struct obd_device *obd = exp->exp_obd;
3355         struct ptlrpc_request *req = NULL;
3356         int intent = *flags & LDLM_FL_HAS_INTENT;
3357         int match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY);
3358         ldlm_mode_t mode;
3359         int rc;
3360         ENTRY;
3361
3362         /* Filesystem lock extents are extended to page boundaries so that
3363          * dealing with the page cache is a little smoother.  */
3364         policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
3365         policy->l_extent.end |= ~CFS_PAGE_MASK;
3366
3367         /*
3368          * kms is not valid when either object is completely fresh (so that no
3369          * locks are cached), or object was evicted. In the latter case cached
3370          * lock cannot be used, because it would prime inode state with
3371          * potentially stale LVB.
3372          */
3373         if (!kms_valid)
3374                 goto no_match;
3375
3376         /* Next, search for already existing extent locks that will cover us */
3377         /* If we're trying to read, we also search for an existing PW lock.  The
3378          * VFS and page cache already protect us locally, so lots of readers/
3379          * writers can share a single PW lock.
3380          *
3381          * There are problems with conversion deadlocks, so instead of
3382          * converting a read lock to a write lock, we'll just enqueue a new
3383          * one.
3384          *
3385          * At some point we should cancel the read lock instead of making them
3386          * send us a blocking callback, but there are problems with canceling
3387          * locks out from other users right now, too. */
3388         mode = einfo->ei_mode;
3389         if (einfo->ei_mode == LCK_PR)
3390                 mode |= LCK_PW;
3391         mode = ldlm_lock_match(obd->obd_namespace, *flags | match_lvb, res_id,
3392                                einfo->ei_type, policy, mode, lockh, 0);
3393         if (mode) {
3394                 struct ldlm_lock *matched = ldlm_handle2lock(lockh);
3395
3396                 if ((agl != 0) && !(matched->l_flags & LDLM_FL_LVB_READY)) {
3397                         /* For AGL, if enqueue RPC is sent but the lock is not
3398                          * granted, then skip to process this strpe.
3399                          * Return -ECANCELED to tell the caller. */
3400                         ldlm_lock_decref(lockh, mode);
3401                         LDLM_LOCK_PUT(matched);
3402                         RETURN(-ECANCELED);
3403                 } else if (osc_set_lock_data_with_check(matched, einfo)) {
3404                         *flags |= LDLM_FL_LVB_READY;
3405                         /* addref the lock only if not async requests and PW
3406                          * lock is matched whereas we asked for PR. */
3407                         if (!rqset && einfo->ei_mode != mode)
3408                                 ldlm_lock_addref(lockh, LCK_PR);
3409                         if (intent) {
3410                                 /* I would like to be able to ASSERT here that
3411                                  * rss <= kms, but I can't, for reasons which
3412                                  * are explained in lov_enqueue() */
3413                         }
3414
3415                         /* We already have a lock, and it's referenced */
3416                         (*upcall)(cookie, ELDLM_OK);
3417
3418                         if (einfo->ei_mode != mode)
3419                                 ldlm_lock_decref(lockh, LCK_PW);
3420                         else if (rqset)
3421                                 /* For async requests, decref the lock. */
3422                                 ldlm_lock_decref(lockh, einfo->ei_mode);
3423                         LDLM_LOCK_PUT(matched);
3424                         RETURN(ELDLM_OK);
3425                 } else {
3426                         ldlm_lock_decref(lockh, mode);
3427                         LDLM_LOCK_PUT(matched);
3428                 }
3429         }
3430
3431  no_match:
3432         if (intent) {
3433                 CFS_LIST_HEAD(cancels);
3434                 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
3435                                            &RQF_LDLM_ENQUEUE_LVB);
3436                 if (req == NULL)
3437                         RETURN(-ENOMEM);
3438
3439                 rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
3440                 if (rc) {
3441                         ptlrpc_request_free(req);
3442                         RETURN(rc);
3443                 }
3444
3445                 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
3446                                      sizeof *lvb);
3447                 ptlrpc_request_set_replen(req);
3448         }
3449
3450         /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
3451         *flags &= ~LDLM_FL_BLOCK_GRANTED;
3452
3453         rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
3454                               sizeof(*lvb), lockh, async);
3455         if (rqset) {
3456                 if (!rc) {
3457                         struct osc_enqueue_args *aa;
3458                         CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
3459                         aa = ptlrpc_req_async_args(req);
3460                         aa->oa_ei = einfo;
3461                         aa->oa_exp = exp;
3462                         aa->oa_flags  = flags;
3463                         aa->oa_upcall = upcall;
3464                         aa->oa_cookie = cookie;
3465                         aa->oa_lvb    = lvb;
3466                         aa->oa_lockh  = lockh;
3467                         aa->oa_agl    = !!agl;
3468
3469                         req->rq_interpret_reply =
3470                                 (ptlrpc_interpterer_t)osc_enqueue_interpret;
3471                         if (rqset == PTLRPCD_SET)
3472                                 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
3473                         else
3474                                 ptlrpc_set_add_req(rqset, req);
3475                 } else if (intent) {
3476                         ptlrpc_req_finished(req);
3477                 }
3478                 RETURN(rc);
3479         }
3480
3481         rc = osc_enqueue_fini(req, lvb, upcall, cookie, flags, agl, rc);
3482         if (intent)
3483                 ptlrpc_req_finished(req);
3484
3485         RETURN(rc);
3486 }
3487
3488 static int osc_enqueue(struct obd_export *exp, struct obd_info *oinfo,
3489                        struct ldlm_enqueue_info *einfo,
3490                        struct ptlrpc_request_set *rqset)
3491 {
3492         struct ldlm_res_id res_id;
3493         int rc;
3494         ENTRY;
3495
3496         osc_build_res_name(oinfo->oi_md->lsm_object_id,
3497                            oinfo->oi_md->lsm_object_seq, &res_id);
3498
3499         rc = osc_enqueue_base(exp, &res_id, &oinfo->oi_flags, &oinfo->oi_policy,
3500                               &oinfo->oi_md->lsm_oinfo[0]->loi_lvb,
3501                               oinfo->oi_md->lsm_oinfo[0]->loi_kms_valid,
3502                               oinfo->oi_cb_up, oinfo, einfo, oinfo->oi_lockh,
3503                               rqset, rqset != NULL, 0);
3504         RETURN(rc);
3505 }
3506
3507 int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
3508                    __u32 type, ldlm_policy_data_t *policy, __u32 mode,
3509                    int *flags, void *data, struct lustre_handle *lockh,
3510                    int unref)
3511 {
3512         struct obd_device *obd = exp->exp_obd;
3513         int lflags = *flags;
3514         ldlm_mode_t rc;
3515         ENTRY;
3516
3517         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
3518                 RETURN(-EIO);
3519
3520         /* Filesystem lock extents are extended to page boundaries so that
3521          * dealing with the page cache is a little smoother */
3522         policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
3523         policy->l_extent.end |= ~CFS_PAGE_MASK;
3524
3525         /* Next, search for already existing extent locks that will cover us */
3526         /* If we're trying to read, we also search for an existing PW lock.  The
3527          * VFS and page cache already protect us locally, so lots of readers/
3528          * writers can share a single PW lock. */
3529         rc = mode;
3530         if (mode == LCK_PR)
3531                 rc |= LCK_PW;
3532         rc = ldlm_lock_match(obd->obd_namespace, lflags,
3533                              res_id, type, policy, rc, lockh, unref);
3534         if (rc) {
3535                 if (data != NULL) {
3536                         if (!osc_set_data_with_check(lockh, data)) {
3537                                 if (!(lflags & LDLM_FL_TEST_LOCK))
3538                                         ldlm_lock_decref(lockh, rc);
3539                                 RETURN(0);
3540                         }
3541                 }
3542                 if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
3543                         ldlm_lock_addref(lockh, LCK_PR);
3544                         ldlm_lock_decref(lockh, LCK_PW);
3545                 }
3546                 RETURN(rc);
3547         }
3548         RETURN(rc);
3549 }
3550
3551 int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
3552 {
3553         ENTRY;
3554
3555         if (unlikely(mode == LCK_GROUP))
3556                 ldlm_lock_decref_and_cancel(lockh, mode);
3557         else
3558                 ldlm_lock_decref(lockh, mode);
3559
3560         RETURN(0);
3561 }
3562
3563 static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
3564                       __u32 mode, struct lustre_handle *lockh)
3565 {
3566         ENTRY;
3567         RETURN(osc_cancel_base(lockh, mode));
3568 }
3569
3570 static int osc_cancel_unused(struct obd_export *exp,
3571                              struct lov_stripe_md *lsm,
3572                              ldlm_cancel_flags_t flags,
3573                              void *opaque)
3574 {
3575         struct obd_device *obd = class_exp2obd(exp);
3576         struct ldlm_res_id res_id, *resp = NULL;
3577
3578         if (lsm != NULL) {
3579                 resp = osc_build_res_name(lsm->lsm_object_id,
3580                                           lsm->lsm_object_seq, &res_id);
3581         }
3582
3583         return ldlm_cli_cancel_unused(obd->obd_namespace, resp, flags, opaque);
3584 }
3585
3586 static int osc_statfs_interpret(const struct lu_env *env,
3587                                 struct ptlrpc_request *req,
3588                                 struct osc_async_args *aa, int rc)
3589 {
3590         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
3591         struct obd_statfs *msfs;
3592         __u64 used;
3593         ENTRY;
3594
3595         if (rc == -EBADR)
3596                 /* The request has in fact never been sent
3597                  * due to issues at a higher level (LOV).
3598                  * Exit immediately since the caller is
3599                  * aware of the problem and takes care
3600                  * of the clean up */
3601                  RETURN(rc);
3602
3603         if ((rc == -ENOTCONN || rc == -EAGAIN) &&
3604             (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
3605                 GOTO(out, rc = 0);
3606
3607         if (rc != 0)
3608                 GOTO(out, rc);
3609
3610         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3611         if (msfs == NULL) {
3612                 GOTO(out, rc = -EPROTO);
3613         }
3614
3615         /* Reinitialize the RDONLY and DEGRADED flags at the client
3616          * on each statfs, so they don't stay set permanently. */
3617         cfs_spin_lock(&cli->cl_oscc.oscc_lock);
3618
3619         if (unlikely(msfs->os_state & OS_STATE_DEGRADED))
3620                 cli->cl_oscc.oscc_flags |= OSCC_FLAG_DEGRADED;
3621         else if (unlikely(cli->cl_oscc.oscc_flags & OSCC_FLAG_DEGRADED))
3622                 cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_DEGRADED;
3623
3624         if (unlikely(msfs->os_state & OS_STATE_READONLY))
3625                 cli->cl_oscc.oscc_flags |= OSCC_FLAG_RDONLY;
3626         else if (unlikely(cli->cl_oscc.oscc_flags & OSCC_FLAG_RDONLY))
3627                 cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_RDONLY;
3628
3629         /* Add a bit of hysteresis so this flag isn't continually flapping,
3630          * and ensure that new files don't get extremely fragmented due to
3631          * only a small amount of available space in the filesystem.
3632          * We want to set the NOSPC flag when there is less than ~0.1% free
3633          * and clear it when there is at least ~0.2% free space, so:
3634          *                   avail < ~0.1% max          max = avail + used
3635          *            1025 * avail < avail + used       used = blocks - free
3636          *            1024 * avail < used
3637          *            1024 * avail < blocks - free
3638          *                   avail < ((blocks - free) >> 10)
3639          *
3640          * On very large disk, say 16TB 0.1% will be 16 GB. We don't want to
3641          * lose that amount of space so in those cases we report no space left
3642          * if their is less than 1 GB left.                             */
3643         used = min_t(__u64,(msfs->os_blocks - msfs->os_bfree) >> 10, 1 << 30);
3644         if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) == 0) &&
3645                      ((msfs->os_ffree < 32) || (msfs->os_bavail < used))))
3646                 cli->cl_oscc.oscc_flags |= OSCC_FLAG_NOSPC;
3647         else if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) != 0) &&
3648                           (msfs->os_ffree > 64) &&
3649                           (msfs->os_bavail > (used << 1)))) {
3650                 cli->cl_oscc.oscc_flags &= ~(OSCC_FLAG_NOSPC |
3651                                              OSCC_FLAG_NOSPC_BLK);
3652         }
3653
3654         if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) != 0) &&
3655                      (msfs->os_bavail < used)))
3656                 cli->cl_oscc.oscc_flags |= OSCC_FLAG_NOSPC_BLK;
3657
3658         cfs_spin_unlock(&cli->cl_oscc.oscc_lock);
3659
3660         *aa->aa_oi->oi_osfs = *msfs;
3661 out:
3662         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
3663         RETURN(rc);
3664 }
3665
3666 static int osc_statfs_async(struct obd_device *obd, struct obd_info *oinfo,
3667                             __u64 max_age, struct ptlrpc_request_set *rqset)
3668 {
3669         struct ptlrpc_request *req;
3670         struct osc_async_args *aa;
3671         int                    rc;
3672         ENTRY;
3673
3674         /* We could possibly pass max_age in the request (as an absolute
3675          * timestamp or a "seconds.usec ago") so the target can avoid doing
3676          * extra calls into the filesystem if that isn't necessary (e.g.
3677          * during mount that would help a bit).  Having relative timestamps
3678          * is not so great if request processing is slow, while absolute
3679          * timestamps are not ideal because they need time synchronization. */
3680         req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
3681         if (req == NULL)
3682                 RETURN(-ENOMEM);
3683
3684         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3685         if (rc) {
3686                 ptlrpc_request_free(req);
3687                 RETURN(rc);
3688         }
3689         ptlrpc_request_set_replen(req);
3690         req->rq_request_portal = OST_CREATE_PORTAL;
3691         ptlrpc_at_set_req_timeout(req);
3692
3693         if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
3694                 /* procfs requests not want stat in wait for avoid deadlock */
3695                 req->rq_no_resend = 1;
3696                 req->rq_no_delay = 1;
3697         }
3698
3699         req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
3700         CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
3701         aa = ptlrpc_req_async_args(req);
3702         aa->aa_oi = oinfo;
3703
3704         ptlrpc_set_add_req(rqset, req);
3705         RETURN(0);
3706 }
3707
3708 static int osc_statfs(struct obd_device *obd, struct obd_statfs *osfs,
3709                       __u64 max_age, __u32 flags)
3710 {
3711         struct obd_statfs     *msfs;
3712         struct ptlrpc_request *req;
3713         struct obd_import     *imp = NULL;
3714         int rc;
3715         ENTRY;
3716
3717         /*Since the request might also come from lprocfs, so we need
3718          *sync this with client_disconnect_export Bug15684*/
3719         cfs_down_read(&obd->u.cli.cl_sem);
3720         if (obd->u.cli.cl_import)
3721                 imp = class_import_get(obd->u.cli.cl_import);
3722         cfs_up_read(&obd->u.cli.cl_sem);
3723         if (!imp)
3724                 RETURN(-ENODEV);
3725
3726         /* We could possibly pass max_age in the request (as an absolute
3727          * timestamp or a "seconds.usec ago") so the target can avoid doing
3728          * extra calls into the filesystem if that isn't necessary (e.g.
3729          * during mount that would help a bit).  Having relative timestamps
3730          * is not so great if request processing is slow, while absolute
3731          * timestamps are not ideal because they need time synchronization. */
3732         req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
3733
3734         class_import_put(imp);
3735
3736         if (req == NULL)
3737                 RETURN(-ENOMEM);
3738
3739         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3740         if (rc) {
3741                 ptlrpc_request_free(req);
3742                 RETURN(rc);
3743         }
3744         ptlrpc_request_set_replen(req);
3745         req->rq_request_portal = OST_CREATE_PORTAL;
3746         ptlrpc_at_set_req_timeout(req);
3747
3748         if (flags & OBD_STATFS_NODELAY) {
3749                 /* procfs requests not want stat in wait for avoid deadlock */
3750                 req->rq_no_resend = 1;
3751                 req->rq_no_delay = 1;
3752         }
3753
3754         rc = ptlrpc_queue_wait(req);
3755         if (rc)
3756                 GOTO(out, rc);
3757
3758         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3759         if (msfs == NULL) {
3760                 GOTO(out, rc = -EPROTO);
3761         }
3762
3763         *osfs = *msfs;
3764
3765         EXIT;
3766  out:
3767         ptlrpc_req_finished(req);
3768         return rc;
3769 }
3770
3771 /* Retrieve object striping information.
3772  *
3773  * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
3774  * the maximum number of OST indices which will fit in the user buffer.
3775  * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
3776  */
3777 static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
3778 {
3779         /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
3780         struct lov_user_md_v3 lum, *lumk;
3781         struct lov_user_ost_data_v1 *lmm_objects;
3782         int rc = 0, lum_size;
3783         ENTRY;
3784
3785         if (!lsm)
3786                 RETURN(-ENODATA);
3787
3788         /* we only need the header part from user space to get lmm_magic and
3789          * lmm_stripe_count, (the header part is common to v1 and v3) */
3790         lum_size = sizeof(struct lov_user_md_v1);
3791         if (cfs_copy_from_user(&lum, lump, lum_size))
3792                 RETURN(-EFAULT);
3793
3794         if ((lum.lmm_magic != LOV_USER_MAGIC_V1) &&
3795             (lum.lmm_magic != LOV_USER_MAGIC_V3))
3796                 RETURN(-EINVAL);
3797
3798         /* lov_user_md_vX and lov_mds_md_vX must have the same size */
3799         LASSERT(sizeof(struct lov_user_md_v1) == sizeof(struct lov_mds_md_v1));
3800         LASSERT(sizeof(struct lov_user_md_v3) == sizeof(struct lov_mds_md_v3));
3801         LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lumk->lmm_objects[0]));
3802
3803         /* we can use lov_mds_md_size() to compute lum_size
3804          * because lov_user_md_vX and lov_mds_md_vX have the same size */
3805         if (lum.lmm_stripe_count > 0) {
3806                 lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic);
3807                 OBD_ALLOC(lumk, lum_size);
3808                 if (!lumk)
3809                         RETURN(-ENOMEM);
3810
3811                 if (lum.lmm_magic == LOV_USER_MAGIC_V1)
3812                         lmm_objects = &(((struct lov_user_md_v1 *)lumk)->lmm_objects[0]);
3813                 else
3814                         lmm_objects = &(lumk->lmm_objects[0]);
3815                 lmm_objects->l_object_id = lsm->lsm_object_id;
3816         } else {
3817                 lum_size = lov_mds_md_size(0, lum.lmm_magic);
3818                 lumk = &lum;
3819         }
3820
3821         lumk->lmm_object_id = lsm->lsm_object_id;
3822         lumk->lmm_object_seq = lsm->lsm_object_seq;
3823         lumk->lmm_stripe_count = 1;
3824
3825         if (cfs_copy_to_user(lump, lumk, lum_size))
3826                 rc = -EFAULT;
3827
3828         if (lumk != &lum)
3829                 OBD_FREE(lumk, lum_size);
3830
3831         RETURN(rc);
3832 }
3833
3834
3835 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
3836                          void *karg, void *uarg)
3837 {
3838         struct obd_device *obd = exp->exp_obd;
3839         struct obd_ioctl_data *data = karg;
3840         int err = 0;
3841         ENTRY;
3842
3843         if (!cfs_try_module_get(THIS_MODULE)) {
3844                 CERROR("Can't get module. Is it alive?");
3845                 return -EINVAL;
3846         }
3847         switch (cmd) {
3848         case OBD_IOC_LOV_GET_CONFIG: {
3849                 char *buf;
3850                 struct lov_desc *desc;
3851                 struct obd_uuid uuid;
3852
3853                 buf = NULL;
3854                 len = 0;
3855                 if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
3856                         GOTO(out, err = -EINVAL);
3857
3858                 data = (struct obd_ioctl_data *)buf;
3859
3860                 if (sizeof(*desc) > data->ioc_inllen1) {
3861                         obd_ioctl_freedata(buf, len);
3862                         GOTO(out, err = -EINVAL);
3863                 }
3864
3865                 if (data->ioc_inllen2 < sizeof(uuid)) {
3866                         obd_ioctl_freedata(buf, len);
3867                         GOTO(out, err = -EINVAL);
3868                 }
3869
3870                 desc = (struct lov_desc *)data->ioc_inlbuf1;
3871                 desc->ld_tgt_count = 1;
3872                 desc->ld_active_tgt_count = 1;
3873                 desc->ld_default_stripe_count = 1;
3874                 desc->ld_default_stripe_size = 0;
3875                 desc->ld_default_stripe_offset = 0;
3876                 desc->ld_pattern = 0;
3877                 memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
3878
3879                 memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
3880
3881                 err = cfs_copy_to_user((void *)uarg, buf, len);
3882                 if (err)
3883                         err = -EFAULT;
3884                 obd_ioctl_freedata(buf, len);
3885                 GOTO(out, err);
3886         }
3887         case LL_IOC_LOV_SETSTRIPE:
3888                 err = obd_alloc_memmd(exp, karg);
3889                 if (err > 0)
3890                         err = 0;
3891                 GOTO(out, err);
3892         case LL_IOC_LOV_GETSTRIPE:
3893                 err = osc_getstripe(karg, uarg);
3894                 GOTO(out, err);
3895         case OBD_IOC_CLIENT_RECOVER:
3896                 err = ptlrpc_recover_import(obd->u.cli.cl_import,
3897                                             data->ioc_inlbuf1, 0);
3898                 if (err > 0)
3899                         err = 0;
3900                 GOTO(out, err);
3901         case IOC_OSC_SET_ACTIVE:
3902                 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
3903                                                data->ioc_offset);
3904                 GOTO(out, err);
3905         case OBD_IOC_POLL_QUOTACHECK:
3906                 err = osc_quota_poll_check(exp, (struct if_quotacheck *)karg);
3907                 GOTO(out, err);
3908         case OBD_IOC_PING_TARGET:
3909                 err = ptlrpc_obd_ping(obd);
3910                 GOTO(out, err);
3911         default:
3912                 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
3913                        cmd, cfs_curproc_comm());
3914                 GOTO(out, err = -ENOTTY);
3915         }
3916 out:
3917         cfs_module_put(THIS_MODULE);
3918         return err;
3919 }
3920
3921 static int osc_get_info(struct obd_export *exp, obd_count keylen,
3922                         void *key, __u32 *vallen, void *val,
3923                         struct lov_stripe_md *lsm)
3924 {
3925         ENTRY;
3926         if (!vallen || !val)
3927                 RETURN(-EFAULT);
3928
3929         if (KEY_IS(KEY_LOCK_TO_STRIPE)) {
3930                 __u32 *stripe = val;
3931                 *vallen = sizeof(*stripe);
3932                 *stripe = 0;
3933                 RETURN(0);
3934         } else if (KEY_IS(KEY_LAST_ID)) {
3935                 struct ptlrpc_request *req;
3936                 obd_id                *reply;
3937                 char                  *tmp;
3938                 int                    rc;
3939
3940                 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
3941                                            &RQF_OST_GET_INFO_LAST_ID);
3942                 if (req == NULL)
3943                         RETURN(-ENOMEM);
3944
3945                 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3946                                      RCL_CLIENT, keylen);
3947                 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
3948                 if (rc) {
3949                         ptlrpc_request_free(req);
3950                         RETURN(rc);
3951                 }
3952
3953                 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3954                 memcpy(tmp, key, keylen);
3955
3956                 req->rq_no_delay = req->rq_no_resend = 1;
3957                 ptlrpc_request_set_replen(req);
3958                 rc = ptlrpc_queue_wait(req);
3959                 if (rc)
3960                         GOTO(out, rc);
3961
3962                 reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
3963                 if (reply == NULL)
3964                         GOTO(out, rc = -EPROTO);
3965
3966                 *((obd_id *)val) = *reply;
3967         out:
3968                 ptlrpc_req_finished(req);
3969                 RETURN(rc);
3970         } else if (KEY_IS(KEY_FIEMAP)) {
3971                 struct ptlrpc_request *req;
3972                 struct ll_user_fiemap *reply;
3973                 char *tmp;
3974                 int rc;
3975
3976                 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
3977                                            &RQF_OST_GET_INFO_FIEMAP);
3978                 if (req == NULL)
3979                         RETURN(-ENOMEM);
3980
3981                 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY,
3982                                      RCL_CLIENT, keylen);
3983                 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
3984                                      RCL_CLIENT, *vallen);
3985                 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
3986                                      RCL_SERVER, *vallen);
3987
3988                 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
3989                 if (rc) {
3990                         ptlrpc_request_free(req);
3991                         RETURN(rc);
3992                 }
3993
3994                 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
3995                 memcpy(tmp, key, keylen);
3996                 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
3997                 memcpy(tmp, val, *vallen);
3998
3999                 ptlrpc_request_set_replen(req);
4000                 rc = ptlrpc_queue_wait(req);
4001                 if (rc)
4002                         GOTO(out1, rc);
4003
4004                 reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
4005                 if (reply == NULL)
4006                         GOTO(out1, rc = -EPROTO);
4007
4008                 memcpy(val, reply, *vallen);
4009         out1:
4010                 ptlrpc_req_finished(req);
4011
4012                 RETURN(rc);
4013         }
4014
4015         RETURN(-EINVAL);
4016 }
4017
4018 static int osc_setinfo_mds_connect_import(struct obd_import *imp)
4019 {
4020         struct llog_ctxt *ctxt;
4021         int rc = 0;
4022         ENTRY;
4023
4024         ctxt = llog_get_context(imp->imp_obd, LLOG_MDS_OST_ORIG_CTXT);
4025         if (ctxt) {
4026                 rc = llog_initiator_connect(ctxt);
4027                 llog_ctxt_put(ctxt);
4028         } else {
4029                 /* XXX return an error? skip setting below flags? */
4030         }
4031
4032         cfs_spin_lock(&imp->imp_lock);
4033         imp->imp_server_timeout = 1;
4034         imp->imp_pingable = 1;
4035         cfs_spin_unlock(&imp->imp_lock);
4036         CDEBUG(D_RPCTRACE, "pinging OST %s\n", obd2cli_tgt(imp->imp_obd));
4037
4038         RETURN(rc);
4039 }
4040
4041 static int osc_setinfo_mds_conn_interpret(const struct lu_env *env,
4042                                           struct ptlrpc_request *req,
4043                                           void *aa, int rc)
4044 {
4045         ENTRY;
4046         if (rc != 0)
4047                 RETURN(rc);
4048
4049         RETURN(osc_setinfo_mds_connect_import(req->rq_import));
4050 }
4051
4052 static int osc_set_info_async(struct obd_export *exp, obd_count keylen,
4053                               void *key, obd_count vallen, void *val,
4054                               struct ptlrpc_request_set *set)
4055 {
4056         struct ptlrpc_request *req;
4057         struct obd_device     *obd = exp->exp_obd;
4058         struct obd_import     *imp = class_exp2cliimp(exp);
4059         char                  *tmp;
4060         int                    rc;
4061         ENTRY;
4062
4063         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
4064
4065         if (KEY_IS(KEY_NEXT_ID)) {
4066                 obd_id new_val;
4067                 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4068
4069                 if (vallen != sizeof(obd_id))
4070                         RETURN(-ERANGE);
4071                 if (val == NULL)
4072                         RETURN(-EINVAL);
4073
4074                 if (vallen != sizeof(obd_id))
4075                         RETURN(-EINVAL);
4076
4077                 /* avoid race between allocate new object and set next id
4078                  * from ll_sync thread */
4079                 cfs_spin_lock(&oscc->oscc_lock);
4080                 new_val = *((obd_id*)val) + 1;
4081                 if (new_val > oscc->oscc_next_id)
4082                         oscc->oscc_next_id = new_val;
4083                 cfs_spin_unlock(&oscc->oscc_lock);
4084                 CDEBUG(D_HA, "%s: set oscc_next_id = "LPU64"\n",
4085                        exp->exp_obd->obd_name,
4086                        obd->u.cli.cl_oscc.oscc_next_id);
4087
4088                 RETURN(0);
4089         }
4090
4091         if (KEY_IS(KEY_CHECKSUM)) {
4092                 if (vallen != sizeof(int))
4093                         RETURN(-EINVAL);
4094                 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
4095                 RETURN(0);
4096         }
4097
4098         if (KEY_IS(KEY_SPTLRPC_CONF)) {
4099                 sptlrpc_conf_client_adapt(obd);
4100                 RETURN(0);
4101         }
4102
4103         if (KEY_IS(KEY_FLUSH_CTX)) {
4104                 sptlrpc_import_flush_my_ctx(imp);
4105                 RETURN(0);
4106         }
4107
4108         if (!set && !KEY_IS(KEY_GRANT_SHRINK))
4109                 RETURN(-EINVAL);
4110
4111         /* We pass all other commands directly to OST. Since nobody calls osc
4112            methods directly and everybody is supposed to go through LOV, we
4113            assume lov checked invalid values for us.
4114            The only recognised values so far are evict_by_nid and mds_conn.
4115            Even if something bad goes through, we'd get a -EINVAL from OST
4116            anyway. */
4117
4118         if (KEY_IS(KEY_GRANT_SHRINK))
4119                 req = ptlrpc_request_alloc(imp, &RQF_OST_SET_GRANT_INFO);
4120         else
4121                 req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO);
4122
4123         if (req == NULL)
4124                 RETURN(-ENOMEM);
4125
4126         req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
4127                              RCL_CLIENT, keylen);
4128         req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
4129                              RCL_CLIENT, vallen);
4130         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
4131         if (rc) {
4132                 ptlrpc_request_free(req);
4133                 RETURN(rc);
4134         }
4135
4136         tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
4137         memcpy(tmp, key, keylen);
4138         tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
4139         memcpy(tmp, val, vallen);
4140
4141         if (KEY_IS(KEY_MDS_CONN)) {
4142                 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4143
4144                 oscc->oscc_oa.o_seq = (*(__u32 *)val);
4145                 oscc->oscc_oa.o_valid |= OBD_MD_FLGROUP;
4146                 LASSERT_SEQ_IS_MDT(oscc->oscc_oa.o_seq);
4147                 req->rq_no_delay = req->rq_no_resend = 1;
4148                 req->rq_interpret_reply = osc_setinfo_mds_conn_interpret;
4149         } else if (KEY_IS(KEY_GRANT_SHRINK)) {
4150                 struct osc_grant_args *aa;
4151                 struct obdo *oa;
4152
4153                 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
4154                 aa = ptlrpc_req_async_args(req);
4155                 OBDO_ALLOC(oa);
4156                 if (!oa) {
4157                         ptlrpc_req_finished(req);
4158                         RETURN(-ENOMEM);
4159                 }
4160                 *oa = ((struct ost_body *)val)->oa;
4161                 aa->aa_oa = oa;
4162                 req->rq_interpret_reply = osc_shrink_grant_interpret;
4163         }
4164
4165         ptlrpc_request_set_replen(req);
4166         if (!KEY_IS(KEY_GRANT_SHRINK)) {
4167                 LASSERT(set != NULL);
4168                 ptlrpc_set_add_req(set, req);
4169                 ptlrpc_check_set(NULL, set);
4170         } else
4171                 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
4172
4173         RETURN(0);
4174 }
4175
4176
4177 static struct llog_operations osc_size_repl_logops = {
4178         lop_cancel: llog_obd_repl_cancel
4179 };
4180
4181 static struct llog_operations osc_mds_ost_orig_logops;
4182
4183 static int __osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
4184                            struct obd_device *tgt, struct llog_catid *catid)
4185 {
4186         int rc;
4187         ENTRY;
4188
4189         rc = llog_setup(obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, tgt, 1,
4190                         &catid->lci_logid, &osc_mds_ost_orig_logops);
4191         if (rc) {
4192                 CERROR("failed LLOG_MDS_OST_ORIG_CTXT\n");
4193                 GOTO(out, rc);
4194         }
4195
4196         rc = llog_setup(obd, &obd->obd_olg, LLOG_SIZE_REPL_CTXT, tgt, 1,
4197                         NULL, &osc_size_repl_logops);
4198         if (rc) {
4199                 struct llog_ctxt *ctxt =
4200                         llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
4201                 if (ctxt)
4202                         llog_cleanup(ctxt);
4203                 CERROR("failed LLOG_SIZE_REPL_CTXT\n");
4204         }
4205         GOTO(out, rc);
4206 out:
4207         if (rc) {
4208                 CERROR("osc '%s' tgt '%s' catid %p rc=%d\n",
4209                        obd->obd_name, tgt->obd_name, catid, rc);
4210                 CERROR("logid "LPX64":0x%x\n",
4211                        catid->lci_logid.lgl_oid, catid->lci_logid.lgl_ogen);
4212         }
4213         return rc;
4214 }
4215
4216 static int osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
4217                          struct obd_device *disk_obd, int *index)
4218 {
4219         struct llog_catid catid;
4220         static char name[32] = CATLIST;
4221         int rc;
4222         ENTRY;
4223
4224         LASSERT(olg == &obd->obd_olg);
4225
4226         cfs_mutex_down(&olg->olg_cat_processing);
4227         rc = llog_get_cat_list(disk_obd, name, *index, 1, &catid);
4228         if (rc) {
4229                 CERROR("rc: %d\n", rc);
4230                 GOTO(out, rc);
4231         }
4232
4233         CDEBUG(D_INFO, "%s: Init llog for %d - catid "LPX64"/"LPX64":%x\n",
4234                obd->obd_name, *index, catid.lci_logid.lgl_oid,
4235                catid.lci_logid.lgl_oseq, catid.lci_logid.lgl_ogen);
4236
4237         rc = __osc_llog_init(obd, olg, disk_obd, &catid);
4238         if (rc) {
4239                 CERROR("rc: %d\n", rc);
4240                 GOTO(out, rc);
4241         }
4242
4243         rc = llog_put_cat_list(disk_obd, name, *index, 1, &catid);
4244         if (rc) {
4245                 CERROR("rc: %d\n", rc);
4246                 GOTO(out, rc);
4247         }
4248
4249  out:
4250         cfs_mutex_up(&olg->olg_cat_processing);
4251
4252         return rc;
4253 }
4254
4255 static int osc_llog_finish(struct obd_device *obd, int count)
4256 {
4257         struct llog_ctxt *ctxt;
4258         int rc = 0, rc2 = 0;
4259         ENTRY;
4260
4261         ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
4262         if (ctxt)
4263                 rc = llog_cleanup(ctxt);
4264
4265         ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
4266         if (ctxt)
4267                 rc2 = llog_cleanup(ctxt);
4268         if (!rc)
4269                 rc = rc2;
4270
4271         RETURN(rc);
4272 }
4273
4274 static int osc_reconnect(const struct lu_env *env,
4275                          struct obd_export *exp, struct obd_device *obd,
4276                          struct obd_uuid *cluuid,
4277                          struct obd_connect_data *data,
4278                          void *localdata)
4279 {
4280         struct client_obd *cli = &obd->u.cli;
4281
4282         if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
4283                 long lost_grant;
4284
4285                 client_obd_list_lock(&cli->cl_loi_list_lock);
4286                 data->ocd_grant = (cli->cl_avail_grant + cli->cl_dirty) ?:
4287                                 2 * cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT;
4288                 lost_grant = cli->cl_lost_grant;
4289                 cli->cl_lost_grant = 0;
4290                 client_obd_list_unlock(&cli->cl_loi_list_lock);
4291
4292                 CDEBUG(D_CACHE, "request ocd_grant: %d cl_avail_grant: %ld "
4293                        "cl_dirty: %ld cl_lost_grant: %ld\n", data->ocd_grant,
4294                        cli->cl_avail_grant, cli->cl_dirty, lost_grant);
4295                 CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d"
4296                        " ocd_grant: %d\n", data->ocd_connect_flags,
4297                        data->ocd_version, data->ocd_grant);
4298         }
4299
4300         RETURN(0);
4301 }
4302
4303 static int osc_disconnect(struct obd_export *exp)
4304 {
4305         struct obd_device *obd = class_exp2obd(exp);
4306         struct llog_ctxt  *ctxt;
4307         int rc;
4308
4309         ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
4310         if (ctxt) {
4311                 if (obd->u.cli.cl_conn_count == 1) {
4312                         /* Flush any remaining cancel messages out to the
4313                          * target */
4314                         llog_sync(ctxt, exp);
4315                 }
4316                 llog_ctxt_put(ctxt);
4317         } else {
4318                 CDEBUG(D_HA, "No LLOG_SIZE_REPL_CTXT found in obd %p\n",
4319                        obd);
4320         }
4321
4322         rc = client_disconnect_export(exp);
4323         /**
4324          * Initially we put del_shrink_grant before disconnect_export, but it
4325          * causes the following problem if setup (connect) and cleanup
4326          * (disconnect) are tangled together.
4327          *      connect p1                     disconnect p2
4328          *   ptlrpc_connect_import
4329          *     ...............               class_manual_cleanup
4330          *                                     osc_disconnect
4331          *                                     del_shrink_grant
4332          *   ptlrpc_connect_interrupt
4333          *     init_grant_shrink
4334          *   add this client to shrink list
4335          *                                      cleanup_osc
4336          * Bang! pinger trigger the shrink.
4337          * So the osc should be disconnected from the shrink list, after we
4338          * are sure the import has been destroyed. BUG18662
4339          */
4340         if (obd->u.cli.cl_import == NULL)
4341                 osc_del_shrink_grant(&obd->u.cli);
4342         return rc;
4343 }
4344
4345 static int osc_import_event(struct obd_device *obd,
4346                             struct obd_import *imp,
4347                             enum obd_import_event event)
4348 {
4349         struct client_obd *cli;
4350         int rc = 0;
4351
4352         ENTRY;
4353         LASSERT(imp->imp_obd == obd);
4354
4355         switch (event) {
4356         case IMP_EVENT_DISCON: {
4357                 /* Only do this on the MDS OSC's */
4358                 if (imp->imp_server_timeout) {
4359                         struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4360
4361                         cfs_spin_lock(&oscc->oscc_lock);
4362                         oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
4363                         cfs_spin_unlock(&oscc->oscc_lock);
4364                 }
4365                 cli = &obd->u.cli;
4366                 client_obd_list_lock(&cli->cl_loi_list_lock);
4367                 cli->cl_avail_grant = 0;
4368                 cli->cl_lost_grant = 0;
4369                 client_obd_list_unlock(&cli->cl_loi_list_lock);
4370                 break;
4371         }
4372         case IMP_EVENT_INACTIVE: {
4373                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
4374                 break;
4375         }
4376         case IMP_EVENT_INVALIDATE: {
4377                 struct ldlm_namespace *ns = obd->obd_namespace;
4378                 struct lu_env         *env;
4379                 int                    refcheck;
4380
4381                 env = cl_env_get(&refcheck);
4382                 if (!IS_ERR(env)) {
4383                         /* Reset grants */
4384                         cli = &obd->u.cli;
4385                         client_obd_list_lock(&cli->cl_loi_list_lock);
4386                         /* all pages go to failing rpcs due to the invalid
4387                          * import */
4388                         osc_check_rpcs(env, cli);
4389                         client_obd_list_unlock(&cli->cl_loi_list_lock);
4390
4391                         ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
4392                         cl_env_put(env, &refcheck);
4393                 } else
4394                         rc = PTR_ERR(env);
4395                 break;
4396         }
4397         case IMP_EVENT_ACTIVE: {
4398                 /* Only do this on the MDS OSC's */
4399                 if (imp->imp_server_timeout) {
4400                         struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4401
4402                         cfs_spin_lock(&oscc->oscc_lock);
4403                         oscc->oscc_flags &= ~(OSCC_FLAG_NOSPC |
4404                                               OSCC_FLAG_NOSPC_BLK);
4405                         cfs_spin_unlock(&oscc->oscc_lock);
4406                 }
4407                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
4408                 break;
4409         }
4410         case IMP_EVENT_OCD: {
4411                 struct obd_connect_data *ocd = &imp->imp_connect_data;
4412
4413                 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
4414                         osc_init_grant(&obd->u.cli, ocd);
4415
4416                 /* See bug 7198 */
4417                 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
4418                         imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
4419
4420                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
4421                 break;
4422         }
4423         case IMP_EVENT_DEACTIVATE: {
4424                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE, NULL);
4425                 break;
4426         }
4427         case IMP_EVENT_ACTIVATE: {
4428                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE, NULL);
4429                 break;
4430         }
4431         default:
4432                 CERROR("Unknown import event %d\n", event);
4433                 LBUG();
4434         }
4435         RETURN(rc);
4436 }
4437
4438 /**
4439  * Determine whether the lock can be canceled before replaying the lock
4440  * during recovery, see bug16774 for detailed information.
4441  *
4442  * \retval zero the lock can't be canceled
4443  * \retval other ok to cancel
4444  */
4445 static int osc_cancel_for_recovery(struct ldlm_lock *lock)
4446 {
4447         check_res_locked(lock->l_resource);
4448
4449         /*
4450          * Cancel all unused extent lock in granted mode LCK_PR or LCK_CR.
4451          *
4452          * XXX as a future improvement, we can also cancel unused write lock
4453          * if it doesn't have dirty data and active mmaps.
4454          */
4455         if (lock->l_resource->lr_type == LDLM_EXTENT &&
4456             (lock->l_granted_mode == LCK_PR ||
4457              lock->l_granted_mode == LCK_CR) &&
4458             (osc_dlm_lock_pageref(lock) == 0))
4459                 RETURN(1);
4460
4461         RETURN(0);
4462 }
4463
4464 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
4465 {
4466         struct client_obd *cli = &obd->u.cli;
4467         int rc;
4468         ENTRY;
4469
4470         ENTRY;
4471         rc = ptlrpcd_addref();
4472         if (rc)
4473                 RETURN(rc);
4474
4475         rc = client_obd_setup(obd, lcfg);
4476         if (rc == 0) {
4477                 void *handler;
4478                 handler = ptlrpcd_alloc_work(cli->cl_import,
4479                                              brw_queue_work, cli);
4480                 if (!IS_ERR(handler))
4481                         cli->cl_writeback_work = handler;
4482                 else
4483                         rc = PTR_ERR(handler);
4484         }
4485
4486         if (rc == 0) {
4487                 struct lprocfs_static_vars lvars = { 0 };
4488
4489                 cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
4490                 lprocfs_osc_init_vars(&lvars);
4491                 if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0) {
4492                         lproc_osc_attach_seqstat(obd);
4493                         sptlrpc_lprocfs_cliobd_attach(obd);
4494                         ptlrpc_lprocfs_register_obd(obd);
4495                 }
4496
4497                 oscc_init(obd);
4498                 /* We need to allocate a few requests more, because
4499                    brw_interpret tries to create new requests before freeing
4500                    previous ones. Ideally we want to have 2x max_rpcs_in_flight
4501                    reserved, but I afraid that might be too much wasted RAM
4502                    in fact, so 2 is just my guess and still should work. */
4503                 cli->cl_import->imp_rq_pool =
4504                         ptlrpc_init_rq_pool(cli->cl_max_rpcs_in_flight + 2,
4505                                             OST_MAXREQSIZE,
4506                                             ptlrpc_add_rqs_to_pool);
4507
4508                 CFS_INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
4509                 cfs_sema_init(&cli->cl_grant_sem, 1);
4510
4511                 ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery);
4512         }
4513
4514         if (rc)
4515                 ptlrpcd_decref();
4516         RETURN(rc);
4517 }
4518
4519 static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
4520 {
4521         int rc = 0;
4522         ENTRY;
4523
4524         switch (stage) {
4525         case OBD_CLEANUP_EARLY: {
4526                 struct obd_import *imp;
4527                 imp = obd->u.cli.cl_import;
4528                 CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
4529                 /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
4530                 ptlrpc_deactivate_import(imp);
4531                 cfs_spin_lock(&imp->imp_lock);
4532                 imp->imp_pingable = 0;
4533                 cfs_spin_unlock(&imp->imp_lock);
4534                 break;
4535         }
4536         case OBD_CLEANUP_EXPORTS: {
4537                 struct client_obd *cli = &obd->u.cli;
4538                 /* LU-464
4539                  * for echo client, export may be on zombie list, wait for
4540                  * zombie thread to cull it, because cli.cl_import will be
4541                  * cleared in client_disconnect_export():
4542                  *   class_export_destroy() -> obd_cleanup() ->
4543                  *   echo_device_free() -> echo_client_cleanup() ->
4544                  *   obd_disconnect() -> osc_disconnect() ->
4545                  *   client_disconnect_export()
4546                  */
4547                 obd_zombie_barrier();
4548                 if (cli->cl_writeback_work) {
4549                         ptlrpcd_destroy_work(cli->cl_writeback_work);
4550                         cli->cl_writeback_work = NULL;
4551                 }
4552                 obd_cleanup_client_import(obd);
4553                 ptlrpc_lprocfs_unregister_obd(obd);
4554                 lprocfs_obd_cleanup(obd);
4555                 rc = obd_llog_finish(obd, 0);
4556                 if (rc != 0)
4557                         CERROR("failed to cleanup llogging subsystems\n");
4558                 break;
4559                 }
4560         }
4561         RETURN(rc);
4562 }
4563
4564 int osc_cleanup(struct obd_device *obd)
4565 {
4566         int rc;
4567
4568         ENTRY;
4569
4570         /* free memory of osc quota cache */
4571         osc_quota_cleanup(obd);
4572
4573         rc = client_obd_cleanup(obd);
4574
4575         ptlrpcd_decref();
4576         RETURN(rc);
4577 }
4578
4579 int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg)
4580 {
4581         struct lprocfs_static_vars lvars = { 0 };
4582         int rc = 0;
4583
4584         lprocfs_osc_init_vars(&lvars);
4585
4586         switch (lcfg->lcfg_command) {
4587         default:
4588                 rc = class_process_proc_param(PARAM_OSC, lvars.obd_vars,
4589                                               lcfg, obd);
4590                 if (rc > 0)
4591                         rc = 0;
4592                 break;
4593         }
4594
4595         return(rc);
4596 }
4597
4598 static int osc_process_config(struct obd_device *obd, obd_count len, void *buf)
4599 {
4600         return osc_process_config_base(obd, buf);
4601 }
4602
4603 struct obd_ops osc_obd_ops = {
4604         .o_owner                = THIS_MODULE,
4605         .o_setup                = osc_setup,
4606         .o_precleanup           = osc_precleanup,
4607         .o_cleanup              = osc_cleanup,
4608         .o_add_conn             = client_import_add_conn,
4609         .o_del_conn             = client_import_del_conn,
4610         .o_connect              = client_connect_import,
4611         .o_reconnect            = osc_reconnect,
4612         .o_disconnect           = osc_disconnect,
4613         .o_statfs               = osc_statfs,
4614         .o_statfs_async         = osc_statfs_async,
4615         .o_packmd               = osc_packmd,
4616         .o_unpackmd             = osc_unpackmd,
4617         .o_precreate            = osc_precreate,
4618         .o_create               = osc_create,
4619         .o_create_async         = osc_create_async,
4620         .o_destroy              = osc_destroy,
4621         .o_getattr              = osc_getattr,
4622         .o_getattr_async        = osc_getattr_async,
4623         .o_setattr              = osc_setattr,
4624         .o_setattr_async        = osc_setattr_async,
4625         .o_brw                  = osc_brw,
4626         .o_punch                = osc_punch,
4627         .o_sync                 = osc_sync,
4628         .o_enqueue              = osc_enqueue,
4629         .o_change_cbdata        = osc_change_cbdata,
4630         .o_find_cbdata          = osc_find_cbdata,
4631         .o_cancel               = osc_cancel,
4632         .o_cancel_unused        = osc_cancel_unused,
4633         .o_iocontrol            = osc_iocontrol,
4634         .o_get_info             = osc_get_info,
4635         .o_set_info_async       = osc_set_info_async,
4636         .o_import_event         = osc_import_event,
4637         .o_llog_init            = osc_llog_init,
4638         .o_llog_finish          = osc_llog_finish,
4639         .o_process_config       = osc_process_config,
4640         .o_quotactl             = osc_quotactl,
4641         .o_quotacheck           = osc_quotacheck,
4642         .o_quota_adjust_qunit   = osc_quota_adjust_qunit,
4643 };
4644
4645 extern struct lu_kmem_descr osc_caches[];
4646 extern cfs_spinlock_t       osc_ast_guard;
4647 extern cfs_lock_class_key_t osc_ast_guard_class;
4648
4649 int __init osc_init(void)
4650 {
4651         struct lprocfs_static_vars lvars = { 0 };
4652         int rc;
4653         ENTRY;
4654
4655         /* print an address of _any_ initialized kernel symbol from this
4656          * module, to allow debugging with gdb that doesn't support data
4657          * symbols from modules.*/
4658         CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
4659
4660         rc = lu_kmem_init(osc_caches);
4661
4662         lprocfs_osc_init_vars(&lvars);
4663
4664         osc_quota_init();
4665         rc = class_register_type(&osc_obd_ops, NULL, lvars.module_vars,
4666                                  LUSTRE_OSC_NAME, &osc_device_type);
4667         if (rc) {
4668                 lu_kmem_fini(osc_caches);
4669                 RETURN(rc);
4670         }
4671
4672         cfs_spin_lock_init(&osc_ast_guard);
4673         cfs_lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
4674
4675         osc_mds_ost_orig_logops = llog_lvfs_ops;
4676         osc_mds_ost_orig_logops.lop_setup = llog_obd_origin_setup;
4677         osc_mds_ost_orig_logops.lop_cleanup = llog_obd_origin_cleanup;
4678         osc_mds_ost_orig_logops.lop_add = llog_obd_origin_add;
4679         osc_mds_ost_orig_logops.lop_connect = llog_origin_connect;
4680
4681         RETURN(rc);
4682 }
4683
4684 #ifdef __KERNEL__
4685 static void /*__exit*/ osc_exit(void)
4686 {
4687         lu_device_type_fini(&osc_device_type);
4688
4689         osc_quota_exit();
4690         class_unregister_type(LUSTRE_OSC_NAME);
4691         lu_kmem_fini(osc_caches);
4692 }
4693
4694 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
4695 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
4696 MODULE_LICENSE("GPL");
4697
4698 cfs_module(osc, LUSTRE_VERSION_STRING, osc_init, osc_exit);
4699 #endif