Whamcloud - gitweb
1efb9a210fb1ad66c2499cb8267c92b1dd5e0481
[fs/lustre-release.git] / lustre / osc / osc_request.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2011, 2012, Whamcloud, Inc.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  */
36
37 #ifndef EXPORT_SYMTAB
38 # define EXPORT_SYMTAB
39 #endif
40 #define DEBUG_SUBSYSTEM S_OSC
41
42 #include <libcfs/libcfs.h>
43
44 #ifndef __KERNEL__
45 # include <liblustre.h>
46 #endif
47
48 #include <lustre_dlm.h>
49 #include <lustre_net.h>
50 #include <lustre/lustre_user.h>
51 #include <obd_cksum.h>
52 #include <obd_ost.h>
53 #include <obd_lov.h>
54
55 #ifdef  __CYGWIN__
56 # include <ctype.h>
57 #endif
58
59 #include <lustre_ha.h>
60 #include <lprocfs_status.h>
61 #include <lustre_log.h>
62 #include <lustre_debug.h>
63 #include <lustre_param.h>
64 #include "osc_internal.h"
65
66 static void osc_release_ppga(struct brw_page **ppga, obd_count count);
67 static int brw_interpret(const struct lu_env *env,
68                          struct ptlrpc_request *req, void *data, int rc);
69 static void osc_check_rpcs0(const struct lu_env *env, struct client_obd *cli,
70                             int ptlrpc);
71 int osc_cleanup(struct obd_device *obd);
72
73 /* Pack OSC object metadata for disk storage (LE byte order). */
74 static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
75                       struct lov_stripe_md *lsm)
76 {
77         int lmm_size;
78         ENTRY;
79
80         lmm_size = sizeof(**lmmp);
81         if (!lmmp)
82                 RETURN(lmm_size);
83
84         if (*lmmp && !lsm) {
85                 OBD_FREE(*lmmp, lmm_size);
86                 *lmmp = NULL;
87                 RETURN(0);
88         }
89
90         if (!*lmmp) {
91                 OBD_ALLOC(*lmmp, lmm_size);
92                 if (!*lmmp)
93                         RETURN(-ENOMEM);
94         }
95
96         if (lsm) {
97                 LASSERT(lsm->lsm_object_id);
98                 LASSERT_SEQ_IS_MDT(lsm->lsm_object_seq);
99                 (*lmmp)->lmm_object_id = cpu_to_le64(lsm->lsm_object_id);
100                 (*lmmp)->lmm_object_seq = cpu_to_le64(lsm->lsm_object_seq);
101         }
102
103         RETURN(lmm_size);
104 }
105
106 /* Unpack OSC object metadata from disk storage (LE byte order). */
107 static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
108                         struct lov_mds_md *lmm, int lmm_bytes)
109 {
110         int lsm_size;
111         struct obd_import *imp = class_exp2cliimp(exp);
112         ENTRY;
113
114         if (lmm != NULL) {
115                 if (lmm_bytes < sizeof (*lmm)) {
116                         CERROR("lov_mds_md too small: %d, need %d\n",
117                                lmm_bytes, (int)sizeof(*lmm));
118                         RETURN(-EINVAL);
119                 }
120                 /* XXX LOV_MAGIC etc check? */
121
122                 if (lmm->lmm_object_id == 0) {
123                         CERROR("lov_mds_md: zero lmm_object_id\n");
124                         RETURN(-EINVAL);
125                 }
126         }
127
128         lsm_size = lov_stripe_md_size(1);
129         if (lsmp == NULL)
130                 RETURN(lsm_size);
131
132         if (*lsmp != NULL && lmm == NULL) {
133                 OBD_FREE((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
134                 OBD_FREE(*lsmp, lsm_size);
135                 *lsmp = NULL;
136                 RETURN(0);
137         }
138
139         if (*lsmp == NULL) {
140                 OBD_ALLOC(*lsmp, lsm_size);
141                 if (*lsmp == NULL)
142                         RETURN(-ENOMEM);
143                 OBD_ALLOC((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
144                 if ((*lsmp)->lsm_oinfo[0] == NULL) {
145                         OBD_FREE(*lsmp, lsm_size);
146                         RETURN(-ENOMEM);
147                 }
148                 loi_init((*lsmp)->lsm_oinfo[0]);
149         }
150
151         if (lmm != NULL) {
152                 /* XXX zero *lsmp? */
153                 (*lsmp)->lsm_object_id = le64_to_cpu (lmm->lmm_object_id);
154                 (*lsmp)->lsm_object_seq = le64_to_cpu (lmm->lmm_object_seq);
155                 LASSERT((*lsmp)->lsm_object_id);
156                 LASSERT_SEQ_IS_MDT((*lsmp)->lsm_object_seq);
157         }
158
159         if (imp != NULL &&
160             (imp->imp_connect_data.ocd_connect_flags & OBD_CONNECT_MAXBYTES))
161                 (*lsmp)->lsm_maxbytes = imp->imp_connect_data.ocd_maxbytes;
162         else
163                 (*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
164
165         RETURN(lsm_size);
166 }
167
168 static inline void osc_pack_capa(struct ptlrpc_request *req,
169                                  struct ost_body *body, void *capa)
170 {
171         struct obd_capa *oc = (struct obd_capa *)capa;
172         struct lustre_capa *c;
173
174         if (!capa)
175                 return;
176
177         c = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1);
178         LASSERT(c);
179         capa_cpy(c, oc);
180         body->oa.o_valid |= OBD_MD_FLOSSCAPA;
181         DEBUG_CAPA(D_SEC, c, "pack");
182 }
183
184 static inline void osc_pack_req_body(struct ptlrpc_request *req,
185                                      struct obd_info *oinfo)
186 {
187         struct ost_body *body;
188
189         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
190         LASSERT(body);
191
192         lustre_set_wire_obdo(&body->oa, oinfo->oi_oa);
193         osc_pack_capa(req, body, oinfo->oi_capa);
194 }
195
196 static inline void osc_set_capa_size(struct ptlrpc_request *req,
197                                      const struct req_msg_field *field,
198                                      struct obd_capa *oc)
199 {
200         if (oc == NULL)
201                 req_capsule_set_size(&req->rq_pill, field, RCL_CLIENT, 0);
202         else
203                 /* it is already calculated as sizeof struct obd_capa */
204                 ;
205 }
206
207 static int osc_getattr_interpret(const struct lu_env *env,
208                                  struct ptlrpc_request *req,
209                                  struct osc_async_args *aa, int rc)
210 {
211         struct ost_body *body;
212         ENTRY;
213
214         if (rc != 0)
215                 GOTO(out, rc);
216
217         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
218         if (body) {
219                 CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
220                 lustre_get_wire_obdo(aa->aa_oi->oi_oa, &body->oa);
221
222                 /* This should really be sent by the OST */
223                 aa->aa_oi->oi_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
224                 aa->aa_oi->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
225         } else {
226                 CDEBUG(D_INFO, "can't unpack ost_body\n");
227                 rc = -EPROTO;
228                 aa->aa_oi->oi_oa->o_valid = 0;
229         }
230 out:
231         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
232         RETURN(rc);
233 }
234
235 static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
236                              struct ptlrpc_request_set *set)
237 {
238         struct ptlrpc_request *req;
239         struct osc_async_args *aa;
240         int                    rc;
241         ENTRY;
242
243         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
244         if (req == NULL)
245                 RETURN(-ENOMEM);
246
247         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
248         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
249         if (rc) {
250                 ptlrpc_request_free(req);
251                 RETURN(rc);
252         }
253
254         osc_pack_req_body(req, oinfo);
255
256         ptlrpc_request_set_replen(req);
257         req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret;
258
259         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
260         aa = ptlrpc_req_async_args(req);
261         aa->aa_oi = oinfo;
262
263         ptlrpc_set_add_req(set, req);
264         RETURN(0);
265 }
266
267 static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
268                        struct obd_info *oinfo)
269 {
270         struct ptlrpc_request *req;
271         struct ost_body       *body;
272         int                    rc;
273         ENTRY;
274
275         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
276         if (req == NULL)
277                 RETURN(-ENOMEM);
278
279         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
280         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
281         if (rc) {
282                 ptlrpc_request_free(req);
283                 RETURN(rc);
284         }
285
286         osc_pack_req_body(req, oinfo);
287
288         ptlrpc_request_set_replen(req);
289
290         rc = ptlrpc_queue_wait(req);
291         if (rc)
292                 GOTO(out, rc);
293
294         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
295         if (body == NULL)
296                 GOTO(out, rc = -EPROTO);
297
298         CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
299         lustre_get_wire_obdo(oinfo->oi_oa, &body->oa);
300
301         /* This should really be sent by the OST */
302         oinfo->oi_oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
303         oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
304
305         EXIT;
306  out:
307         ptlrpc_req_finished(req);
308         return rc;
309 }
310
311 static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
312                        struct obd_info *oinfo, struct obd_trans_info *oti)
313 {
314         struct ptlrpc_request *req;
315         struct ost_body       *body;
316         int                    rc;
317         ENTRY;
318
319         LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
320
321         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
322         if (req == NULL)
323                 RETURN(-ENOMEM);
324
325         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
326         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
327         if (rc) {
328                 ptlrpc_request_free(req);
329                 RETURN(rc);
330         }
331
332         osc_pack_req_body(req, oinfo);
333
334         ptlrpc_request_set_replen(req);
335
336         rc = ptlrpc_queue_wait(req);
337         if (rc)
338                 GOTO(out, rc);
339
340         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
341         if (body == NULL)
342                 GOTO(out, rc = -EPROTO);
343
344         lustre_get_wire_obdo(oinfo->oi_oa, &body->oa);
345
346         EXIT;
347 out:
348         ptlrpc_req_finished(req);
349         RETURN(rc);
350 }
351
352 static int osc_setattr_interpret(const struct lu_env *env,
353                                  struct ptlrpc_request *req,
354                                  struct osc_setattr_args *sa, int rc)
355 {
356         struct ost_body *body;
357         ENTRY;
358
359         if (rc != 0)
360                 GOTO(out, rc);
361
362         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
363         if (body == NULL)
364                 GOTO(out, rc = -EPROTO);
365
366         lustre_get_wire_obdo(sa->sa_oa, &body->oa);
367 out:
368         rc = sa->sa_upcall(sa->sa_cookie, rc);
369         RETURN(rc);
370 }
371
372 int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
373                            struct obd_trans_info *oti,
374                            obd_enqueue_update_f upcall, void *cookie,
375                            struct ptlrpc_request_set *rqset)
376 {
377         struct ptlrpc_request   *req;
378         struct osc_setattr_args *sa;
379         int                      rc;
380         ENTRY;
381
382         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
383         if (req == NULL)
384                 RETURN(-ENOMEM);
385
386         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
387         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
388         if (rc) {
389                 ptlrpc_request_free(req);
390                 RETURN(rc);
391         }
392
393         if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
394                 oinfo->oi_oa->o_lcookie = *oti->oti_logcookies;
395
396         osc_pack_req_body(req, oinfo);
397
398         ptlrpc_request_set_replen(req);
399
400         /* do mds to ost setattr asynchronously */
401         if (!rqset) {
402                 /* Do not wait for response. */
403                 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
404         } else {
405                 req->rq_interpret_reply =
406                         (ptlrpc_interpterer_t)osc_setattr_interpret;
407
408                 CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args));
409                 sa = ptlrpc_req_async_args(req);
410                 sa->sa_oa = oinfo->oi_oa;
411                 sa->sa_upcall = upcall;
412                 sa->sa_cookie = cookie;
413
414                 if (rqset == PTLRPCD_SET)
415                         ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
416                 else
417                         ptlrpc_set_add_req(rqset, req);
418         }
419
420         RETURN(0);
421 }
422
423 static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
424                              struct obd_trans_info *oti,
425                              struct ptlrpc_request_set *rqset)
426 {
427         return osc_setattr_async_base(exp, oinfo, oti,
428                                       oinfo->oi_cb_up, oinfo, rqset);
429 }
430
431 int osc_real_create(struct obd_export *exp, struct obdo *oa,
432                     struct lov_stripe_md **ea, struct obd_trans_info *oti)
433 {
434         struct ptlrpc_request *req;
435         struct ost_body       *body;
436         struct lov_stripe_md  *lsm;
437         int                    rc;
438         ENTRY;
439
440         LASSERT(oa);
441         LASSERT(ea);
442
443         lsm = *ea;
444         if (!lsm) {
445                 rc = obd_alloc_memmd(exp, &lsm);
446                 if (rc < 0)
447                         RETURN(rc);
448         }
449
450         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
451         if (req == NULL)
452                 GOTO(out, rc = -ENOMEM);
453
454         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
455         if (rc) {
456                 ptlrpc_request_free(req);
457                 GOTO(out, rc);
458         }
459
460         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
461         LASSERT(body);
462         lustre_set_wire_obdo(&body->oa, oa);
463
464         ptlrpc_request_set_replen(req);
465
466         if ((oa->o_valid & OBD_MD_FLFLAGS) &&
467             oa->o_flags == OBD_FL_DELORPHAN) {
468                 DEBUG_REQ(D_HA, req,
469                           "delorphan from OST integration");
470                 /* Don't resend the delorphan req */
471                 req->rq_no_resend = req->rq_no_delay = 1;
472         }
473
474         rc = ptlrpc_queue_wait(req);
475         if (rc)
476                 GOTO(out_req, rc);
477
478         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
479         if (body == NULL)
480                 GOTO(out_req, rc = -EPROTO);
481
482         lustre_get_wire_obdo(oa, &body->oa);
483
484         /* This should really be sent by the OST */
485         oa->o_blksize = PTLRPC_MAX_BRW_SIZE;
486         oa->o_valid |= OBD_MD_FLBLKSZ;
487
488         /* XXX LOV STACKING: the lsm that is passed to us from LOV does not
489          * have valid lsm_oinfo data structs, so don't go touching that.
490          * This needs to be fixed in a big way.
491          */
492         lsm->lsm_object_id = oa->o_id;
493         lsm->lsm_object_seq = oa->o_seq;
494         *ea = lsm;
495
496         if (oti != NULL) {
497                 oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
498
499                 if (oa->o_valid & OBD_MD_FLCOOKIE) {
500                         if (!oti->oti_logcookies)
501                                 oti_alloc_cookies(oti, 1);
502                         *oti->oti_logcookies = oa->o_lcookie;
503                 }
504         }
505
506         CDEBUG(D_HA, "transno: "LPD64"\n",
507                lustre_msg_get_transno(req->rq_repmsg));
508 out_req:
509         ptlrpc_req_finished(req);
510 out:
511         if (rc && !*ea)
512                 obd_free_memmd(exp, &lsm);
513         RETURN(rc);
514 }
515
516 int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
517                    obd_enqueue_update_f upcall, void *cookie,
518                    struct ptlrpc_request_set *rqset)
519 {
520         struct ptlrpc_request   *req;
521         struct osc_setattr_args *sa;
522         struct ost_body         *body;
523         int                      rc;
524         ENTRY;
525
526         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
527         if (req == NULL)
528                 RETURN(-ENOMEM);
529
530         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
531         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
532         if (rc) {
533                 ptlrpc_request_free(req);
534                 RETURN(rc);
535         }
536         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
537         ptlrpc_at_set_req_timeout(req);
538
539         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
540         LASSERT(body);
541         lustre_set_wire_obdo(&body->oa, oinfo->oi_oa);
542         osc_pack_capa(req, body, oinfo->oi_capa);
543
544         ptlrpc_request_set_replen(req);
545
546
547         req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
548         CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args));
549         sa = ptlrpc_req_async_args(req);
550         sa->sa_oa     = oinfo->oi_oa;
551         sa->sa_upcall = upcall;
552         sa->sa_cookie = cookie;
553         if (rqset == PTLRPCD_SET)
554                 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
555         else
556                 ptlrpc_set_add_req(rqset, req);
557
558         RETURN(0);
559 }
560
561 static int osc_punch(const struct lu_env *env, struct obd_export *exp,
562                      struct obd_info *oinfo, struct obd_trans_info *oti,
563                      struct ptlrpc_request_set *rqset)
564 {
565         oinfo->oi_oa->o_size   = oinfo->oi_policy.l_extent.start;
566         oinfo->oi_oa->o_blocks = oinfo->oi_policy.l_extent.end;
567         oinfo->oi_oa->o_valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
568         return osc_punch_base(exp, oinfo,
569                               oinfo->oi_cb_up, oinfo, rqset);
570 }
571
572 static int osc_sync_interpret(const struct lu_env *env,
573                               struct ptlrpc_request *req,
574                               void *arg, int rc)
575 {
576         struct osc_async_args *aa = arg;
577         struct ost_body *body;
578         ENTRY;
579
580         if (rc)
581                 GOTO(out, rc);
582
583         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
584         if (body == NULL) {
585                 CERROR ("can't unpack ost_body\n");
586                 GOTO(out, rc = -EPROTO);
587         }
588
589         *aa->aa_oi->oi_oa = body->oa;
590 out:
591         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
592         RETURN(rc);
593 }
594
595 static int osc_sync(const struct lu_env *env, struct obd_export *exp,
596                     struct obd_info *oinfo, obd_size start, obd_size end,
597                     struct ptlrpc_request_set *set)
598 {
599         struct ptlrpc_request *req;
600         struct ost_body       *body;
601         struct osc_async_args *aa;
602         int                    rc;
603         ENTRY;
604
605         if (!oinfo->oi_oa) {
606                 CDEBUG(D_INFO, "oa NULL\n");
607                 RETURN(-EINVAL);
608         }
609
610         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
611         if (req == NULL)
612                 RETURN(-ENOMEM);
613
614         osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
615         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
616         if (rc) {
617                 ptlrpc_request_free(req);
618                 RETURN(rc);
619         }
620
621         /* overload the size and blocks fields in the oa with start/end */
622         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
623         LASSERT(body);
624         lustre_set_wire_obdo(&body->oa, oinfo->oi_oa);
625         body->oa.o_size = start;
626         body->oa.o_blocks = end;
627         body->oa.o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
628         osc_pack_capa(req, body, oinfo->oi_capa);
629
630         ptlrpc_request_set_replen(req);
631         req->rq_interpret_reply = osc_sync_interpret;
632
633         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
634         aa = ptlrpc_req_async_args(req);
635         aa->aa_oi = oinfo;
636
637         ptlrpc_set_add_req(set, req);
638         RETURN (0);
639 }
640
641 /* Find and cancel locally locks matched by @mode in the resource found by
642  * @objid. Found locks are added into @cancel list. Returns the amount of
643  * locks added to @cancels list. */
644 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
645                                    cfs_list_t *cancels,
646                                    ldlm_mode_t mode, int lock_flags)
647 {
648         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
649         struct ldlm_res_id res_id;
650         struct ldlm_resource *res;
651         int count;
652         ENTRY;
653
654         osc_build_res_name(oa->o_id, oa->o_seq, &res_id);
655         res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
656         if (res == NULL)
657                 RETURN(0);
658
659         LDLM_RESOURCE_ADDREF(res);
660         count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
661                                            lock_flags, 0, NULL);
662         LDLM_RESOURCE_DELREF(res);
663         ldlm_resource_putref(res);
664         RETURN(count);
665 }
666
667 static int osc_destroy_interpret(const struct lu_env *env,
668                                  struct ptlrpc_request *req, void *data,
669                                  int rc)
670 {
671         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
672
673         cfs_atomic_dec(&cli->cl_destroy_in_flight);
674         cfs_waitq_signal(&cli->cl_destroy_waitq);
675         return 0;
676 }
677
678 static int osc_can_send_destroy(struct client_obd *cli)
679 {
680         if (cfs_atomic_inc_return(&cli->cl_destroy_in_flight) <=
681             cli->cl_max_rpcs_in_flight) {
682                 /* The destroy request can be sent */
683                 return 1;
684         }
685         if (cfs_atomic_dec_return(&cli->cl_destroy_in_flight) <
686             cli->cl_max_rpcs_in_flight) {
687                 /*
688                  * The counter has been modified between the two atomic
689                  * operations.
690                  */
691                 cfs_waitq_signal(&cli->cl_destroy_waitq);
692         }
693         return 0;
694 }
695
696 /* Destroy requests can be async always on the client, and we don't even really
697  * care about the return code since the client cannot do anything at all about
698  * a destroy failure.
699  * When the MDS is unlinking a filename, it saves the file objects into a
700  * recovery llog, and these object records are cancelled when the OST reports
701  * they were destroyed and sync'd to disk (i.e. transaction committed).
702  * If the client dies, or the OST is down when the object should be destroyed,
703  * the records are not cancelled, and when the OST reconnects to the MDS next,
704  * it will retrieve the llog unlink logs and then sends the log cancellation
705  * cookies to the MDS after committing destroy transactions. */
706 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
707                        struct obdo *oa, struct lov_stripe_md *ea,
708                        struct obd_trans_info *oti, struct obd_export *md_export,
709                        void *capa)
710 {
711         struct client_obd     *cli = &exp->exp_obd->u.cli;
712         struct ptlrpc_request *req;
713         struct ost_body       *body;
714         CFS_LIST_HEAD(cancels);
715         int rc, count;
716         ENTRY;
717
718         if (!oa) {
719                 CDEBUG(D_INFO, "oa NULL\n");
720                 RETURN(-EINVAL);
721         }
722
723         count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
724                                         LDLM_FL_DISCARD_DATA);
725
726         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
727         if (req == NULL) {
728                 ldlm_lock_list_put(&cancels, l_bl_ast, count);
729                 RETURN(-ENOMEM);
730         }
731
732         osc_set_capa_size(req, &RMF_CAPA1, (struct obd_capa *)capa);
733         rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
734                                0, &cancels, count);
735         if (rc) {
736                 ptlrpc_request_free(req);
737                 RETURN(rc);
738         }
739
740         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
741         ptlrpc_at_set_req_timeout(req);
742
743         if (oti != NULL && oa->o_valid & OBD_MD_FLCOOKIE)
744                 oa->o_lcookie = *oti->oti_logcookies;
745         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
746         LASSERT(body);
747         lustre_set_wire_obdo(&body->oa, oa);
748
749         osc_pack_capa(req, body, (struct obd_capa *)capa);
750         ptlrpc_request_set_replen(req);
751
752         /* don't throttle destroy RPCs for the MDT */
753         if (!(cli->cl_import->imp_connect_flags_orig & OBD_CONNECT_MDS)) {
754                 req->rq_interpret_reply = osc_destroy_interpret;
755                 if (!osc_can_send_destroy(cli)) {
756                         struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP,
757                                                           NULL);
758
759                         /*
760                          * Wait until the number of on-going destroy RPCs drops
761                          * under max_rpc_in_flight
762                          */
763                         l_wait_event_exclusive(cli->cl_destroy_waitq,
764                                                osc_can_send_destroy(cli), &lwi);
765                 }
766         }
767
768         /* Do not wait for response */
769         ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
770         RETURN(0);
771 }
772
773 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
774                                 long writing_bytes)
775 {
776         obd_flag bits = OBD_MD_FLBLOCKS|OBD_MD_FLGRANT;
777
778         LASSERT(!(oa->o_valid & bits));
779
780         oa->o_valid |= bits;
781         client_obd_list_lock(&cli->cl_loi_list_lock);
782         oa->o_dirty = cli->cl_dirty;
783         if (cli->cl_dirty - cli->cl_dirty_transit > cli->cl_dirty_max) {
784                 CERROR("dirty %lu - %lu > dirty_max %lu\n",
785                        cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
786                 oa->o_undirty = 0;
787         } else if (cfs_atomic_read(&obd_dirty_pages) -
788                    cfs_atomic_read(&obd_dirty_transit_pages) >
789                    obd_max_dirty_pages + 1){
790                 /* The cfs_atomic_read() allowing the cfs_atomic_inc() are
791                  * not covered by a lock thus they may safely race and trip
792                  * this CERROR() unless we add in a small fudge factor (+1). */
793                 CERROR("dirty %d - %d > system dirty_max %d\n",
794                        cfs_atomic_read(&obd_dirty_pages),
795                        cfs_atomic_read(&obd_dirty_transit_pages),
796                        obd_max_dirty_pages);
797                 oa->o_undirty = 0;
798         } else if (cli->cl_dirty_max - cli->cl_dirty > 0x7fffffff) {
799                 CERROR("dirty %lu - dirty_max %lu too big???\n",
800                        cli->cl_dirty, cli->cl_dirty_max);
801                 oa->o_undirty = 0;
802         } else {
803                 long max_in_flight = (cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT)*
804                                 (cli->cl_max_rpcs_in_flight + 1);
805                 oa->o_undirty = max(cli->cl_dirty_max, max_in_flight);
806         }
807         oa->o_grant = cli->cl_avail_grant;
808         oa->o_dropped = cli->cl_lost_grant;
809         cli->cl_lost_grant = 0;
810         client_obd_list_unlock(&cli->cl_loi_list_lock);
811         CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
812                oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
813
814 }
815
816 static void osc_update_next_shrink(struct client_obd *cli)
817 {
818         cli->cl_next_shrink_grant =
819                 cfs_time_shift(cli->cl_grant_shrink_interval);
820         CDEBUG(D_CACHE, "next time %ld to shrink grant \n",
821                cli->cl_next_shrink_grant);
822 }
823
824 /* caller must hold loi_list_lock */
825 static void osc_consume_write_grant(struct client_obd *cli,
826                                     struct brw_page *pga)
827 {
828         LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock);
829         LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
830         cfs_atomic_inc(&obd_dirty_pages);
831         cli->cl_dirty += CFS_PAGE_SIZE;
832         cli->cl_avail_grant -= CFS_PAGE_SIZE;
833         pga->flag |= OBD_BRW_FROM_GRANT;
834         CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
835                CFS_PAGE_SIZE, pga, pga->pg);
836         LASSERT(cli->cl_avail_grant >= 0);
837         osc_update_next_shrink(cli);
838 }
839
840 /* the companion to osc_consume_write_grant, called when a brw has completed.
841  * must be called with the loi lock held. */
842 static void osc_release_write_grant(struct client_obd *cli,
843                                     struct brw_page *pga, int sent)
844 {
845         int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
846         ENTRY;
847
848         LASSERT_SPIN_LOCKED(&cli->cl_loi_list_lock.lock);
849         if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
850                 EXIT;
851                 return;
852         }
853
854         pga->flag &= ~OBD_BRW_FROM_GRANT;
855         cfs_atomic_dec(&obd_dirty_pages);
856         cli->cl_dirty -= CFS_PAGE_SIZE;
857         if (pga->flag & OBD_BRW_NOCACHE) {
858                 pga->flag &= ~OBD_BRW_NOCACHE;
859                 cfs_atomic_dec(&obd_dirty_transit_pages);
860                 cli->cl_dirty_transit -= CFS_PAGE_SIZE;
861         }
862         if (!sent) {
863                 /* Reclaim grant from truncated pages. This is used to solve
864                  * write-truncate and grant all gone(to lost_grant) problem.
865                  * For a vfs write this problem can be easily solved by a sync
866                  * write, however, this is not an option for page_mkwrite()
867                  * because grant has to be allocated before a page becomes
868                  * dirty. */
869                 if (cli->cl_avail_grant < PTLRPC_MAX_BRW_SIZE)
870                         cli->cl_avail_grant += CFS_PAGE_SIZE;
871                 else
872                         cli->cl_lost_grant += CFS_PAGE_SIZE;
873                 CDEBUG(D_CACHE, "lost grant: %lu avail grant: %lu dirty: %lu\n",
874                        cli->cl_lost_grant, cli->cl_avail_grant, cli->cl_dirty);
875         } else if (CFS_PAGE_SIZE != blocksize && pga->count != CFS_PAGE_SIZE) {
876                 /* For short writes we shouldn't count parts of pages that
877                  * span a whole block on the OST side, or our accounting goes
878                  * wrong.  Should match the code in filter_grant_check. */
879                 int offset = pga->off & ~CFS_PAGE_MASK;
880                 int count = pga->count + (offset & (blocksize - 1));
881                 int end = (offset + pga->count) & (blocksize - 1);
882                 if (end)
883                         count += blocksize - end;
884
885                 cli->cl_lost_grant += CFS_PAGE_SIZE - count;
886                 CDEBUG(D_CACHE, "lost %lu grant: %lu avail: %lu dirty: %lu\n",
887                        CFS_PAGE_SIZE - count, cli->cl_lost_grant,
888                        cli->cl_avail_grant, cli->cl_dirty);
889         }
890
891         EXIT;
892 }
893
894 static unsigned long rpcs_in_flight(struct client_obd *cli)
895 {
896         return cli->cl_r_in_flight + cli->cl_w_in_flight;
897 }
898
899 /* caller must hold loi_list_lock */
900 void osc_wake_cache_waiters(struct client_obd *cli)
901 {
902         cfs_list_t *l, *tmp;
903         struct osc_cache_waiter *ocw;
904
905         ENTRY;
906         cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
907                 /* if we can't dirty more, we must wait until some is written */
908                 if ((cli->cl_dirty + CFS_PAGE_SIZE > cli->cl_dirty_max) ||
909                    (cfs_atomic_read(&obd_dirty_pages) + 1 >
910                     obd_max_dirty_pages)) {
911                         CDEBUG(D_CACHE, "no dirty room: dirty: %ld "
912                                "osc max %ld, sys max %d\n", cli->cl_dirty,
913                                cli->cl_dirty_max, obd_max_dirty_pages);
914                         return;
915                 }
916
917                 /* if still dirty cache but no grant wait for pending RPCs that
918                  * may yet return us some grant before doing sync writes */
919                 if (cli->cl_w_in_flight && cli->cl_avail_grant < CFS_PAGE_SIZE) {
920                         CDEBUG(D_CACHE, "%u BRW writes in flight, no grant\n",
921                                cli->cl_w_in_flight);
922                         return;
923                 }
924
925                 ocw = cfs_list_entry(l, struct osc_cache_waiter, ocw_entry);
926                 cfs_list_del_init(&ocw->ocw_entry);
927                 if (cli->cl_avail_grant < CFS_PAGE_SIZE) {
928                         /* no more RPCs in flight to return grant, do sync IO */
929                         ocw->ocw_rc = -EDQUOT;
930                         CDEBUG(D_INODE, "wake oap %p for sync\n", ocw->ocw_oap);
931                 } else {
932                         osc_consume_write_grant(cli,
933                                                 &ocw->ocw_oap->oap_brw_page);
934                 }
935
936                 CDEBUG(D_CACHE, "wake up %p for oap %p, avail grant %ld\n",
937                        ocw, ocw->ocw_oap, cli->cl_avail_grant);
938
939                 cfs_waitq_signal(&ocw->ocw_waitq);
940         }
941
942         EXIT;
943 }
944
945 static void __osc_update_grant(struct client_obd *cli, obd_size grant)
946 {
947         client_obd_list_lock(&cli->cl_loi_list_lock);
948         cli->cl_avail_grant += grant;
949         client_obd_list_unlock(&cli->cl_loi_list_lock);
950 }
951
952 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
953 {
954         if (body->oa.o_valid & OBD_MD_FLGRANT) {
955                 CDEBUG(D_CACHE, "got "LPU64" extra grant\n", body->oa.o_grant);
956                 __osc_update_grant(cli, body->oa.o_grant);
957         }
958 }
959
960 static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
961                               obd_count keylen, void *key, obd_count vallen,
962                               void *val, struct ptlrpc_request_set *set);
963
964 static int osc_shrink_grant_interpret(const struct lu_env *env,
965                                       struct ptlrpc_request *req,
966                                       void *aa, int rc)
967 {
968         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
969         struct obdo *oa = ((struct osc_grant_args *)aa)->aa_oa;
970         struct ost_body *body;
971
972         if (rc != 0) {
973                 __osc_update_grant(cli, oa->o_grant);
974                 GOTO(out, rc);
975         }
976
977         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
978         LASSERT(body);
979         osc_update_grant(cli, body);
980 out:
981         OBDO_FREE(oa);
982         return rc;
983 }
984
985 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
986 {
987         client_obd_list_lock(&cli->cl_loi_list_lock);
988         oa->o_grant = cli->cl_avail_grant / 4;
989         cli->cl_avail_grant -= oa->o_grant;
990         client_obd_list_unlock(&cli->cl_loi_list_lock);
991         if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
992                 oa->o_valid |= OBD_MD_FLFLAGS;
993                 oa->o_flags = 0;
994         }
995         oa->o_flags |= OBD_FL_SHRINK_GRANT;
996         osc_update_next_shrink(cli);
997 }
998
999 /* Shrink the current grant, either from some large amount to enough for a
1000  * full set of in-flight RPCs, or if we have already shrunk to that limit
1001  * then to enough for a single RPC.  This avoids keeping more grant than
1002  * needed, and avoids shrinking the grant piecemeal. */
1003 static int osc_shrink_grant(struct client_obd *cli)
1004 {
1005         long target = (cli->cl_max_rpcs_in_flight + 1) *
1006                       cli->cl_max_pages_per_rpc;
1007
1008         client_obd_list_lock(&cli->cl_loi_list_lock);
1009         if (cli->cl_avail_grant <= target)
1010                 target = cli->cl_max_pages_per_rpc;
1011         client_obd_list_unlock(&cli->cl_loi_list_lock);
1012
1013         return osc_shrink_grant_to_target(cli, target);
1014 }
1015
1016 int osc_shrink_grant_to_target(struct client_obd *cli, long target)
1017 {
1018         int    rc = 0;
1019         struct ost_body     *body;
1020         ENTRY;
1021
1022         client_obd_list_lock(&cli->cl_loi_list_lock);
1023         /* Don't shrink if we are already above or below the desired limit
1024          * We don't want to shrink below a single RPC, as that will negatively
1025          * impact block allocation and long-term performance. */
1026         if (target < cli->cl_max_pages_per_rpc)
1027                 target = cli->cl_max_pages_per_rpc;
1028
1029         if (target >= cli->cl_avail_grant) {
1030                 client_obd_list_unlock(&cli->cl_loi_list_lock);
1031                 RETURN(0);
1032         }
1033         client_obd_list_unlock(&cli->cl_loi_list_lock);
1034
1035         OBD_ALLOC_PTR(body);
1036         if (!body)
1037                 RETURN(-ENOMEM);
1038
1039         osc_announce_cached(cli, &body->oa, 0);
1040
1041         client_obd_list_lock(&cli->cl_loi_list_lock);
1042         body->oa.o_grant = cli->cl_avail_grant - target;
1043         cli->cl_avail_grant = target;
1044         client_obd_list_unlock(&cli->cl_loi_list_lock);
1045         if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
1046                 body->oa.o_valid |= OBD_MD_FLFLAGS;
1047                 body->oa.o_flags = 0;
1048         }
1049         body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
1050         osc_update_next_shrink(cli);
1051
1052         rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
1053                                 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
1054                                 sizeof(*body), body, NULL);
1055         if (rc != 0)
1056                 __osc_update_grant(cli, body->oa.o_grant);
1057         OBD_FREE_PTR(body);
1058         RETURN(rc);
1059 }
1060
1061 #define GRANT_SHRINK_LIMIT PTLRPC_MAX_BRW_SIZE
1062 static int osc_should_shrink_grant(struct client_obd *client)
1063 {
1064         cfs_time_t time = cfs_time_current();
1065         cfs_time_t next_shrink = client->cl_next_shrink_grant;
1066
1067         if ((client->cl_import->imp_connect_data.ocd_connect_flags &
1068              OBD_CONNECT_GRANT_SHRINK) == 0)
1069                 return 0;
1070
1071         if (cfs_time_aftereq(time, next_shrink - 5 * CFS_TICK)) {
1072                 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
1073                     client->cl_avail_grant > GRANT_SHRINK_LIMIT)
1074                         return 1;
1075                 else
1076                         osc_update_next_shrink(client);
1077         }
1078         return 0;
1079 }
1080
1081 static int osc_grant_shrink_grant_cb(struct timeout_item *item, void *data)
1082 {
1083         struct client_obd *client;
1084
1085         cfs_list_for_each_entry(client, &item->ti_obd_list,
1086                                 cl_grant_shrink_list) {
1087                 if (osc_should_shrink_grant(client))
1088                         osc_shrink_grant(client);
1089         }
1090         return 0;
1091 }
1092
1093 static int osc_add_shrink_grant(struct client_obd *client)
1094 {
1095         int rc;
1096
1097         rc = ptlrpc_add_timeout_client(client->cl_grant_shrink_interval,
1098                                        TIMEOUT_GRANT,
1099                                        osc_grant_shrink_grant_cb, NULL,
1100                                        &client->cl_grant_shrink_list);
1101         if (rc) {
1102                 CERROR("add grant client %s error %d\n",
1103                         client->cl_import->imp_obd->obd_name, rc);
1104                 return rc;
1105         }
1106         CDEBUG(D_CACHE, "add grant client %s \n",
1107                client->cl_import->imp_obd->obd_name);
1108         osc_update_next_shrink(client);
1109         return 0;
1110 }
1111
1112 static int osc_del_shrink_grant(struct client_obd *client)
1113 {
1114         return ptlrpc_del_timeout_client(&client->cl_grant_shrink_list,
1115                                          TIMEOUT_GRANT);
1116 }
1117
1118 static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1119 {
1120         /*
1121          * ocd_grant is the total grant amount we're expect to hold: if we've
1122          * been evicted, it's the new avail_grant amount, cl_dirty will drop
1123          * to 0 as inflight RPCs fail out; otherwise, it's avail_grant + dirty.
1124          *
1125          * race is tolerable here: if we're evicted, but imp_state already
1126          * left EVICTED state, then cl_dirty must be 0 already.
1127          */
1128         client_obd_list_lock(&cli->cl_loi_list_lock);
1129         if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
1130                 cli->cl_avail_grant = ocd->ocd_grant;
1131         else
1132                 cli->cl_avail_grant = ocd->ocd_grant - cli->cl_dirty;
1133
1134         if (cli->cl_avail_grant < 0) {
1135                 CWARN("%s: available grant < 0, the OSS is probably not running"
1136                       " with patch from bug20278 (%ld) \n",
1137                       cli->cl_import->imp_obd->obd_name, cli->cl_avail_grant);
1138                 /* workaround for 1.6 servers which do not have
1139                  * the patch from bug20278 */
1140                 cli->cl_avail_grant = ocd->ocd_grant;
1141         }
1142
1143         client_obd_list_unlock(&cli->cl_loi_list_lock);
1144
1145         CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld \n",
1146                cli->cl_import->imp_obd->obd_name,
1147                cli->cl_avail_grant, cli->cl_lost_grant);
1148
1149         if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT_SHRINK &&
1150             cfs_list_empty(&cli->cl_grant_shrink_list))
1151                 osc_add_shrink_grant(cli);
1152 }
1153
1154 /* We assume that the reason this OSC got a short read is because it read
1155  * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1156  * via the LOV, and it _knows_ it's reading inside the file, it's just that
1157  * this stripe never got written at or beyond this stripe offset yet. */
1158 static void handle_short_read(int nob_read, obd_count page_count,
1159                               struct brw_page **pga)
1160 {
1161         char *ptr;
1162         int i = 0;
1163
1164         /* skip bytes read OK */
1165         while (nob_read > 0) {
1166                 LASSERT (page_count > 0);
1167
1168                 if (pga[i]->count > nob_read) {
1169                         /* EOF inside this page */
1170                         ptr = cfs_kmap(pga[i]->pg) +
1171                                 (pga[i]->off & ~CFS_PAGE_MASK);
1172                         memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1173                         cfs_kunmap(pga[i]->pg);
1174                         page_count--;
1175                         i++;
1176                         break;
1177                 }
1178
1179                 nob_read -= pga[i]->count;
1180                 page_count--;
1181                 i++;
1182         }
1183
1184         /* zero remaining pages */
1185         while (page_count-- > 0) {
1186                 ptr = cfs_kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK);
1187                 memset(ptr, 0, pga[i]->count);
1188                 cfs_kunmap(pga[i]->pg);
1189                 i++;
1190         }
1191 }
1192
1193 static int check_write_rcs(struct ptlrpc_request *req,
1194                            int requested_nob, int niocount,
1195                            obd_count page_count, struct brw_page **pga)
1196 {
1197         int     i;
1198         __u32   *remote_rcs;
1199
1200         remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1201                                                   sizeof(*remote_rcs) *
1202                                                   niocount);
1203         if (remote_rcs == NULL) {
1204                 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1205                 return(-EPROTO);
1206         }
1207
1208         /* return error if any niobuf was in error */
1209         for (i = 0; i < niocount; i++) {
1210                 if ((int)remote_rcs[i] < 0)
1211                         return(remote_rcs[i]);
1212
1213                 if (remote_rcs[i] != 0) {
1214                         CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1215                                 i, remote_rcs[i], req);
1216                         return(-EPROTO);
1217                 }
1218         }
1219
1220         if (req->rq_bulk->bd_nob_transferred != requested_nob) {
1221                 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1222                        req->rq_bulk->bd_nob_transferred, requested_nob);
1223                 return(-EPROTO);
1224         }
1225
1226         return (0);
1227 }
1228
1229 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1230 {
1231         if (p1->flag != p2->flag) {
1232                 unsigned mask = ~(OBD_BRW_FROM_GRANT| OBD_BRW_NOCACHE|
1233                                   OBD_BRW_SYNC|OBD_BRW_ASYNC|OBD_BRW_NOQUOTA);
1234
1235                 /* warn if we try to combine flags that we don't know to be
1236                  * safe to combine */
1237                 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1238                         CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
1239                               "report this at http://bugs.whamcloud.com/\n",
1240                               p1->flag, p2->flag);
1241                 }
1242                 return 0;
1243         }
1244
1245         return (p1->off + p1->count == p2->off);
1246 }
1247
1248 static obd_count osc_checksum_bulk(int nob, obd_count pg_count,
1249                                    struct brw_page **pga, int opc,
1250                                    cksum_type_t cksum_type)
1251 {
1252         __u32 cksum;
1253         int i = 0;
1254
1255         LASSERT (pg_count > 0);
1256         cksum = init_checksum(cksum_type);
1257         while (nob > 0 && pg_count > 0) {
1258                 unsigned char *ptr = cfs_kmap(pga[i]->pg);
1259                 int off = pga[i]->off & ~CFS_PAGE_MASK;
1260                 int count = pga[i]->count > nob ? nob : pga[i]->count;
1261
1262                 /* corrupt the data before we compute the checksum, to
1263                  * simulate an OST->client data error */
1264                 if (i == 0 && opc == OST_READ &&
1265                     OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))
1266                         memcpy(ptr + off, "bad1", min(4, nob));
1267                 cksum = compute_checksum(cksum, ptr + off, count, cksum_type);
1268                 cfs_kunmap(pga[i]->pg);
1269                 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d checksum %x\n",
1270                                off, cksum);
1271
1272                 nob -= pga[i]->count;
1273                 pg_count--;
1274                 i++;
1275         }
1276         /* For sending we only compute the wrong checksum instead
1277          * of corrupting the data so it is still correct on a redo */
1278         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1279                 cksum++;
1280
1281         return fini_checksum(cksum, cksum_type);
1282 }
1283
1284 static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
1285                                 struct lov_stripe_md *lsm, obd_count page_count,
1286                                 struct brw_page **pga,
1287                                 struct ptlrpc_request **reqp,
1288                                 struct obd_capa *ocapa, int reserve,
1289                                 int resend)
1290 {
1291         struct ptlrpc_request   *req;
1292         struct ptlrpc_bulk_desc *desc;
1293         struct ost_body         *body;
1294         struct obd_ioobj        *ioobj;
1295         struct niobuf_remote    *niobuf;
1296         int niocount, i, requested_nob, opc, rc;
1297         struct osc_brw_async_args *aa;
1298         struct req_capsule      *pill;
1299         struct brw_page *pg_prev;
1300
1301         ENTRY;
1302         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1303                 RETURN(-ENOMEM); /* Recoverable */
1304         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1305                 RETURN(-EINVAL); /* Fatal */
1306
1307         if ((cmd & OBD_BRW_WRITE) != 0) {
1308                 opc = OST_WRITE;
1309                 req = ptlrpc_request_alloc_pool(cli->cl_import,
1310                                                 cli->cl_import->imp_rq_pool,
1311                                                 &RQF_OST_BRW_WRITE);
1312         } else {
1313                 opc = OST_READ;
1314                 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1315         }
1316         if (req == NULL)
1317                 RETURN(-ENOMEM);
1318
1319         for (niocount = i = 1; i < page_count; i++) {
1320                 if (!can_merge_pages(pga[i - 1], pga[i]))
1321                         niocount++;
1322         }
1323
1324         pill = &req->rq_pill;
1325         req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1326                              sizeof(*ioobj));
1327         req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1328                              niocount * sizeof(*niobuf));
1329         osc_set_capa_size(req, &RMF_CAPA1, ocapa);
1330
1331         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1332         if (rc) {
1333                 ptlrpc_request_free(req);
1334                 RETURN(rc);
1335         }
1336         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
1337         ptlrpc_at_set_req_timeout(req);
1338
1339         if (opc == OST_WRITE)
1340                 desc = ptlrpc_prep_bulk_imp(req, page_count,
1341                                             BULK_GET_SOURCE, OST_BULK_PORTAL);
1342         else
1343                 desc = ptlrpc_prep_bulk_imp(req, page_count,
1344                                             BULK_PUT_SINK, OST_BULK_PORTAL);
1345
1346         if (desc == NULL)
1347                 GOTO(out, rc = -ENOMEM);
1348         /* NB request now owns desc and will free it when it gets freed */
1349
1350         body = req_capsule_client_get(pill, &RMF_OST_BODY);
1351         ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1352         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1353         LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1354
1355         lustre_set_wire_obdo(&body->oa, oa);
1356
1357         obdo_to_ioobj(oa, ioobj);
1358         ioobj->ioo_bufcnt = niocount;
1359         osc_pack_capa(req, body, ocapa);
1360         LASSERT (page_count > 0);
1361         pg_prev = pga[0];
1362         for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1363                 struct brw_page *pg = pga[i];
1364                 int poff = pg->off & ~CFS_PAGE_MASK;
1365
1366                 LASSERT(pg->count > 0);
1367                 /* make sure there is no gap in the middle of page array */
1368                 LASSERTF(page_count == 1 ||
1369                          (ergo(i == 0, poff + pg->count == CFS_PAGE_SIZE) &&
1370                           ergo(i > 0 && i < page_count - 1,
1371                                poff == 0 && pg->count == CFS_PAGE_SIZE)   &&
1372                           ergo(i == page_count - 1, poff == 0)),
1373                          "i: %d/%d pg: %p off: "LPU64", count: %u\n",
1374                          i, page_count, pg, pg->off, pg->count);
1375 #ifdef __linux__
1376                 LASSERTF(i == 0 || pg->off > pg_prev->off,
1377                          "i %d p_c %u pg %p [pri %lu ind %lu] off "LPU64
1378                          " prev_pg %p [pri %lu ind %lu] off "LPU64"\n",
1379                          i, page_count,
1380                          pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1381                          pg_prev->pg, page_private(pg_prev->pg),
1382                          pg_prev->pg->index, pg_prev->off);
1383 #else
1384                 LASSERTF(i == 0 || pg->off > pg_prev->off,
1385                          "i %d p_c %u\n", i, page_count);
1386 #endif
1387                 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1388                         (pg->flag & OBD_BRW_SRVLOCK));
1389
1390                 ptlrpc_prep_bulk_page(desc, pg->pg, poff, pg->count);
1391                 requested_nob += pg->count;
1392
1393                 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1394                         niobuf--;
1395                         niobuf->len += pg->count;
1396                 } else {
1397                         niobuf->offset = pg->off;
1398                         niobuf->len    = pg->count;
1399                         niobuf->flags  = pg->flag;
1400                 }
1401                 pg_prev = pg;
1402         }
1403
1404         LASSERTF((void *)(niobuf - niocount) ==
1405                 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1406                 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1407                 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1408
1409         osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1410         if (resend) {
1411                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1412                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1413                         body->oa.o_flags = 0;
1414                 }
1415                 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1416         }
1417
1418         if (osc_should_shrink_grant(cli))
1419                 osc_shrink_grant_local(cli, &body->oa);
1420
1421         /* size[REQ_REC_OFF] still sizeof (*body) */
1422         if (opc == OST_WRITE) {
1423                 if (cli->cl_checksum &&
1424                     !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1425                         /* store cl_cksum_type in a local variable since
1426                          * it can be changed via lprocfs */
1427                         cksum_type_t cksum_type = cli->cl_cksum_type;
1428
1429                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1430                                 oa->o_flags &= OBD_FL_LOCAL_MASK;
1431                                 body->oa.o_flags = 0;
1432                         }
1433                         body->oa.o_flags |= cksum_type_pack(cksum_type);
1434                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1435                         body->oa.o_cksum = osc_checksum_bulk(requested_nob,
1436                                                              page_count, pga,
1437                                                              OST_WRITE,
1438                                                              cksum_type);
1439                         CDEBUG(D_PAGE, "checksum at write origin: %x\n",
1440                                body->oa.o_cksum);
1441                         /* save this in 'oa', too, for later checking */
1442                         oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1443                         oa->o_flags |= cksum_type_pack(cksum_type);
1444                 } else {
1445                         /* clear out the checksum flag, in case this is a
1446                          * resend but cl_checksum is no longer set. b=11238 */
1447                         oa->o_valid &= ~OBD_MD_FLCKSUM;
1448                 }
1449                 oa->o_cksum = body->oa.o_cksum;
1450                 /* 1 RC per niobuf */
1451                 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1452                                      sizeof(__u32) * niocount);
1453         } else {
1454                 if (cli->cl_checksum &&
1455                     !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1456                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1457                                 body->oa.o_flags = 0;
1458                         body->oa.o_flags |= cksum_type_pack(cli->cl_cksum_type);
1459                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1460                 }
1461         }
1462         ptlrpc_request_set_replen(req);
1463
1464         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1465         aa = ptlrpc_req_async_args(req);
1466         aa->aa_oa = oa;
1467         aa->aa_requested_nob = requested_nob;
1468         aa->aa_nio_count = niocount;
1469         aa->aa_page_count = page_count;
1470         aa->aa_resends = 0;
1471         aa->aa_ppga = pga;
1472         aa->aa_cli = cli;
1473         CFS_INIT_LIST_HEAD(&aa->aa_oaps);
1474         if (ocapa && reserve)
1475                 aa->aa_ocapa = capa_get(ocapa);
1476
1477         *reqp = req;
1478         RETURN(0);
1479
1480  out:
1481         ptlrpc_req_finished(req);
1482         RETURN(rc);
1483 }
1484
1485 static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
1486                                 __u32 client_cksum, __u32 server_cksum, int nob,
1487                                 obd_count page_count, struct brw_page **pga,
1488                                 cksum_type_t client_cksum_type)
1489 {
1490         __u32 new_cksum;
1491         char *msg;
1492         cksum_type_t cksum_type;
1493
1494         if (server_cksum == client_cksum) {
1495                 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1496                 return 0;
1497         }
1498
1499         cksum_type = cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1500                                        oa->o_flags : 0);
1501         new_cksum = osc_checksum_bulk(nob, page_count, pga, OST_WRITE,
1502                                       cksum_type);
1503
1504         if (cksum_type != client_cksum_type)
1505                 msg = "the server did not use the checksum type specified in "
1506                       "the original request - likely a protocol problem";
1507         else if (new_cksum == server_cksum)
1508                 msg = "changed on the client after we checksummed it - "
1509                       "likely false positive due to mmap IO (bug 11742)";
1510         else if (new_cksum == client_cksum)
1511                 msg = "changed in transit before arrival at OST";
1512         else
1513                 msg = "changed in transit AND doesn't match the original - "
1514                       "likely false positive due to mmap IO (bug 11742)";
1515
1516         LCONSOLE_ERROR_MSG(0x132, "BAD WRITE CHECKSUM: %s: from %s inode "DFID
1517                            " object "LPU64"/"LPU64" extent ["LPU64"-"LPU64"]\n",
1518                            msg, libcfs_nid2str(peer->nid),
1519                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1520                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1521                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1522                            oa->o_id,
1523                            oa->o_valid & OBD_MD_FLGROUP ? oa->o_seq : (__u64)0,
1524                            pga[0]->off,
1525                            pga[page_count-1]->off + pga[page_count-1]->count - 1);
1526         CERROR("original client csum %x (type %x), server csum %x (type %x), "
1527                "client csum now %x\n", client_cksum, client_cksum_type,
1528                server_cksum, cksum_type, new_cksum);
1529         return 1;
1530 }
1531
1532 /* Note rc enters this function as number of bytes transferred */
1533 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1534 {
1535         struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1536         const lnet_process_id_t *peer =
1537                         &req->rq_import->imp_connection->c_peer;
1538         struct client_obd *cli = aa->aa_cli;
1539         struct ost_body *body;
1540         __u32 client_cksum = 0;
1541         ENTRY;
1542
1543         if (rc < 0 && rc != -EDQUOT) {
1544                 DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc);
1545                 RETURN(rc);
1546         }
1547
1548         LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1549         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1550         if (body == NULL) {
1551                 DEBUG_REQ(D_INFO, req, "Can't unpack body\n");
1552                 RETURN(-EPROTO);
1553         }
1554
1555         /* set/clear over quota flag for a uid/gid */
1556         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1557             body->oa.o_valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) {
1558                 unsigned int qid[MAXQUOTAS] = { body->oa.o_uid, body->oa.o_gid };
1559
1560                 CDEBUG(D_QUOTA, "setdq for [%u %u] with valid "LPX64", flags %x\n",
1561                        body->oa.o_uid, body->oa.o_gid, body->oa.o_valid,
1562                        body->oa.o_flags);
1563                 osc_quota_setdq(cli, qid, body->oa.o_valid, body->oa.o_flags);
1564         }
1565
1566         osc_update_grant(cli, body);
1567
1568         if (rc < 0)
1569                 RETURN(rc);
1570
1571         if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1572                 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1573
1574         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1575                 if (rc > 0) {
1576                         CERROR("Unexpected +ve rc %d\n", rc);
1577                         RETURN(-EPROTO);
1578                 }
1579                 LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob);
1580
1581                 if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
1582                         RETURN(-EAGAIN);
1583
1584                 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
1585                     check_write_checksum(&body->oa, peer, client_cksum,
1586                                          body->oa.o_cksum, aa->aa_requested_nob,
1587                                          aa->aa_page_count, aa->aa_ppga,
1588                                          cksum_type_unpack(aa->aa_oa->o_flags)))
1589                         RETURN(-EAGAIN);
1590
1591                 rc = check_write_rcs(req, aa->aa_requested_nob,aa->aa_nio_count,
1592                                      aa->aa_page_count, aa->aa_ppga);
1593                 GOTO(out, rc);
1594         }
1595
1596         /* The rest of this function executes only for OST_READs */
1597
1598         /* if unwrap_bulk failed, return -EAGAIN to retry */
1599         rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
1600         if (rc < 0)
1601                 GOTO(out, rc = -EAGAIN);
1602
1603         if (rc > aa->aa_requested_nob) {
1604                 CERROR("Unexpected rc %d (%d requested)\n", rc,
1605                        aa->aa_requested_nob);
1606                 RETURN(-EPROTO);
1607         }
1608
1609         if (rc != req->rq_bulk->bd_nob_transferred) {
1610                 CERROR ("Unexpected rc %d (%d transferred)\n",
1611                         rc, req->rq_bulk->bd_nob_transferred);
1612                 return (-EPROTO);
1613         }
1614
1615         if (rc < aa->aa_requested_nob)
1616                 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
1617
1618         if (body->oa.o_valid & OBD_MD_FLCKSUM) {
1619                 static int cksum_counter;
1620                 __u32      server_cksum = body->oa.o_cksum;
1621                 char      *via;
1622                 char      *router;
1623                 cksum_type_t cksum_type;
1624
1625                 cksum_type = cksum_type_unpack(body->oa.o_valid &OBD_MD_FLFLAGS?
1626                                                body->oa.o_flags : 0);
1627                 client_cksum = osc_checksum_bulk(rc, aa->aa_page_count,
1628                                                  aa->aa_ppga, OST_READ,
1629                                                  cksum_type);
1630
1631                 if (peer->nid == req->rq_bulk->bd_sender) {
1632                         via = router = "";
1633                 } else {
1634                         via = " via ";
1635                         router = libcfs_nid2str(req->rq_bulk->bd_sender);
1636                 }
1637
1638                 if (server_cksum == ~0 && rc > 0) {
1639                         CERROR("Protocol error: server %s set the 'checksum' "
1640                                "bit, but didn't send a checksum.  Not fatal, "
1641                                "but please notify on http://bugs.whamcloud.com/\n",
1642                                libcfs_nid2str(peer->nid));
1643                 } else if (server_cksum != client_cksum) {
1644                         LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
1645                                            "%s%s%s inode "DFID" object "
1646                                            LPU64"/"LPU64" extent "
1647                                            "["LPU64"-"LPU64"]\n",
1648                                            req->rq_import->imp_obd->obd_name,
1649                                            libcfs_nid2str(peer->nid),
1650                                            via, router,
1651                                            body->oa.o_valid & OBD_MD_FLFID ?
1652                                                 body->oa.o_parent_seq : (__u64)0,
1653                                            body->oa.o_valid & OBD_MD_FLFID ?
1654                                                 body->oa.o_parent_oid : 0,
1655                                            body->oa.o_valid & OBD_MD_FLFID ?
1656                                                 body->oa.o_parent_ver : 0,
1657                                            body->oa.o_id,
1658                                            body->oa.o_valid & OBD_MD_FLGROUP ?
1659                                                 body->oa.o_seq : (__u64)0,
1660                                            aa->aa_ppga[0]->off,
1661                                            aa->aa_ppga[aa->aa_page_count-1]->off +
1662                                            aa->aa_ppga[aa->aa_page_count-1]->count -
1663                                                                         1);
1664                         CERROR("client %x, server %x, cksum_type %x\n",
1665                                client_cksum, server_cksum, cksum_type);
1666                         cksum_counter = 0;
1667                         aa->aa_oa->o_cksum = client_cksum;
1668                         rc = -EAGAIN;
1669                 } else {
1670                         cksum_counter++;
1671                         CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1672                         rc = 0;
1673                 }
1674         } else if (unlikely(client_cksum)) {
1675                 static int cksum_missed;
1676
1677                 cksum_missed++;
1678                 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
1679                         CERROR("Checksum %u requested from %s but not sent\n",
1680                                cksum_missed, libcfs_nid2str(peer->nid));
1681         } else {
1682                 rc = 0;
1683         }
1684 out:
1685         if (rc >= 0)
1686                 lustre_get_wire_obdo(aa->aa_oa, &body->oa);
1687
1688         RETURN(rc);
1689 }
1690
1691 static int osc_brw_internal(int cmd, struct obd_export *exp, struct obdo *oa,
1692                             struct lov_stripe_md *lsm,
1693                             obd_count page_count, struct brw_page **pga,
1694                             struct obd_capa *ocapa)
1695 {
1696         struct ptlrpc_request *req;
1697         int                    rc;
1698         cfs_waitq_t            waitq;
1699         int                    generation, resends = 0;
1700         struct l_wait_info     lwi;
1701
1702         ENTRY;
1703
1704         cfs_waitq_init(&waitq);
1705         generation = exp->exp_obd->u.cli.cl_import->imp_generation;
1706
1707 restart_bulk:
1708         rc = osc_brw_prep_request(cmd, &exp->exp_obd->u.cli, oa, lsm,
1709                                   page_count, pga, &req, ocapa, 0, resends);
1710         if (rc != 0)
1711                 return (rc);
1712
1713         if (resends) {
1714                 req->rq_generation_set = 1;
1715                 req->rq_import_generation = generation;
1716                 req->rq_sent = cfs_time_current_sec() + resends;
1717         }
1718
1719         rc = ptlrpc_queue_wait(req);
1720
1721         if (rc == -ETIMEDOUT && req->rq_resend) {
1722                 DEBUG_REQ(D_HA, req,  "BULK TIMEOUT");
1723                 ptlrpc_req_finished(req);
1724                 goto restart_bulk;
1725         }
1726
1727         rc = osc_brw_fini_request(req, rc);
1728
1729         ptlrpc_req_finished(req);
1730         /* When server return -EINPROGRESS, client should always retry
1731          * regardless of the number of times the bulk was resent already.*/
1732         if (osc_recoverable_error(rc)) {
1733                 resends++;
1734                 if (rc != -EINPROGRESS &&
1735                     !client_should_resend(resends, &exp->exp_obd->u.cli)) {
1736                         CERROR("%s: too many resend retries for object: "
1737                                ""LPU64":"LPU64", rc = %d.\n",
1738                                exp->exp_obd->obd_name, oa->o_id, oa->o_seq, rc);
1739                         goto out;
1740                 }
1741                 if (generation !=
1742                     exp->exp_obd->u.cli.cl_import->imp_generation) {
1743                         CDEBUG(D_HA, "%s: resend cross eviction for object: "
1744                                ""LPU64":"LPU64", rc = %d.\n",
1745                                exp->exp_obd->obd_name, oa->o_id, oa->o_seq, rc);
1746                         goto out;
1747                 }
1748
1749                 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends), NULL, NULL,
1750                                        NULL);
1751                 l_wait_event(waitq, 0, &lwi);
1752
1753                 goto restart_bulk;
1754         }
1755 out:
1756         if (rc == -EAGAIN || rc == -EINPROGRESS)
1757                 rc = -EIO;
1758         RETURN (rc);
1759 }
1760
1761 int osc_brw_redo_request(struct ptlrpc_request *request,
1762                          struct osc_brw_async_args *aa)
1763 {
1764         struct ptlrpc_request *new_req;
1765         struct ptlrpc_request_set *set = request->rq_set;
1766         struct osc_brw_async_args *new_aa;
1767         struct osc_async_page *oap;
1768         int rc = 0;
1769         ENTRY;
1770
1771         DEBUG_REQ(D_ERROR, request, "redo for recoverable error");
1772
1773         rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
1774                                         OST_WRITE ? OBD_BRW_WRITE :OBD_BRW_READ,
1775                                   aa->aa_cli, aa->aa_oa,
1776                                   NULL /* lsm unused by osc currently */,
1777                                   aa->aa_page_count, aa->aa_ppga,
1778                                   &new_req, aa->aa_ocapa, 0, 1);
1779         if (rc)
1780                 RETURN(rc);
1781
1782         client_obd_list_lock(&aa->aa_cli->cl_loi_list_lock);
1783
1784         cfs_list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
1785                 if (oap->oap_request != NULL) {
1786                         LASSERTF(request == oap->oap_request,
1787                                  "request %p != oap_request %p\n",
1788                                  request, oap->oap_request);
1789                         if (oap->oap_interrupted) {
1790                                 client_obd_list_unlock(&aa->aa_cli->cl_loi_list_lock);
1791                                 ptlrpc_req_finished(new_req);
1792                                 RETURN(-EINTR);
1793                         }
1794                 }
1795         }
1796         /* New request takes over pga and oaps from old request.
1797          * Note that copying a list_head doesn't work, need to move it... */
1798         aa->aa_resends++;
1799         new_req->rq_interpret_reply = request->rq_interpret_reply;
1800         new_req->rq_async_args = request->rq_async_args;
1801         new_req->rq_sent = cfs_time_current_sec() + aa->aa_resends;
1802         new_req->rq_generation_set = 1;
1803         new_req->rq_import_generation = request->rq_import_generation;
1804
1805         new_aa = ptlrpc_req_async_args(new_req);
1806
1807         CFS_INIT_LIST_HEAD(&new_aa->aa_oaps);
1808         cfs_list_splice(&aa->aa_oaps, &new_aa->aa_oaps);
1809         CFS_INIT_LIST_HEAD(&aa->aa_oaps);
1810
1811         cfs_list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
1812                 if (oap->oap_request) {
1813                         ptlrpc_req_finished(oap->oap_request);
1814                         oap->oap_request = ptlrpc_request_addref(new_req);
1815                 }
1816         }
1817
1818         new_aa->aa_ocapa = aa->aa_ocapa;
1819         aa->aa_ocapa = NULL;
1820
1821         /* use ptlrpc_set_add_req is safe because interpret functions work
1822          * in check_set context. only one way exist with access to request
1823          * from different thread got -EINTR - this way protected with
1824          * cl_loi_list_lock */
1825         ptlrpc_set_add_req(set, new_req);
1826
1827         client_obd_list_unlock(&aa->aa_cli->cl_loi_list_lock);
1828
1829         DEBUG_REQ(D_INFO, new_req, "new request");
1830         RETURN(0);
1831 }
1832
1833 /*
1834  * ugh, we want disk allocation on the target to happen in offset order.  we'll
1835  * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
1836  * fine for our small page arrays and doesn't require allocation.  its an
1837  * insertion sort that swaps elements that are strides apart, shrinking the
1838  * stride down until its '1' and the array is sorted.
1839  */
1840 static void sort_brw_pages(struct brw_page **array, int num)
1841 {
1842         int stride, i, j;
1843         struct brw_page *tmp;
1844
1845         if (num == 1)
1846                 return;
1847         for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1848                 ;
1849
1850         do {
1851                 stride /= 3;
1852                 for (i = stride ; i < num ; i++) {
1853                         tmp = array[i];
1854                         j = i;
1855                         while (j >= stride && array[j - stride]->off > tmp->off) {
1856                                 array[j] = array[j - stride];
1857                                 j -= stride;
1858                         }
1859                         array[j] = tmp;
1860                 }
1861         } while (stride > 1);
1862 }
1863
1864 static obd_count max_unfragmented_pages(struct brw_page **pg, obd_count pages)
1865 {
1866         int count = 1;
1867         int offset;
1868         int i = 0;
1869
1870         LASSERT (pages > 0);
1871         offset = pg[i]->off & ~CFS_PAGE_MASK;
1872
1873         for (;;) {
1874                 pages--;
1875                 if (pages == 0)         /* that's all */
1876                         return count;
1877
1878                 if (offset + pg[i]->count < CFS_PAGE_SIZE)
1879                         return count;   /* doesn't end on page boundary */
1880
1881                 i++;
1882                 offset = pg[i]->off & ~CFS_PAGE_MASK;
1883                 if (offset != 0)        /* doesn't start on page boundary */
1884                         return count;
1885
1886                 count++;
1887         }
1888 }
1889
1890 static struct brw_page **osc_build_ppga(struct brw_page *pga, obd_count count)
1891 {
1892         struct brw_page **ppga;
1893         int i;
1894
1895         OBD_ALLOC(ppga, sizeof(*ppga) * count);
1896         if (ppga == NULL)
1897                 return NULL;
1898
1899         for (i = 0; i < count; i++)
1900                 ppga[i] = pga + i;
1901         return ppga;
1902 }
1903
1904 static void osc_release_ppga(struct brw_page **ppga, obd_count count)
1905 {
1906         LASSERT(ppga != NULL);
1907         OBD_FREE(ppga, sizeof(*ppga) * count);
1908 }
1909
1910 static int osc_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
1911                    obd_count page_count, struct brw_page *pga,
1912                    struct obd_trans_info *oti)
1913 {
1914         struct obdo *saved_oa = NULL;
1915         struct brw_page **ppga, **orig;
1916         struct obd_import *imp = class_exp2cliimp(exp);
1917         struct client_obd *cli;
1918         int rc, page_count_orig;
1919         ENTRY;
1920
1921         LASSERT((imp != NULL) && (imp->imp_obd != NULL));
1922         cli = &imp->imp_obd->u.cli;
1923
1924         if (cmd & OBD_BRW_CHECK) {
1925                 /* The caller just wants to know if there's a chance that this
1926                  * I/O can succeed */
1927
1928                 if (imp->imp_invalid)
1929                         RETURN(-EIO);
1930                 RETURN(0);
1931         }
1932
1933         /* test_brw with a failed create can trip this, maybe others. */
1934         LASSERT(cli->cl_max_pages_per_rpc);
1935
1936         rc = 0;
1937
1938         orig = ppga = osc_build_ppga(pga, page_count);
1939         if (ppga == NULL)
1940                 RETURN(-ENOMEM);
1941         page_count_orig = page_count;
1942
1943         sort_brw_pages(ppga, page_count);
1944         while (page_count) {
1945                 obd_count pages_per_brw;
1946
1947                 if (page_count > cli->cl_max_pages_per_rpc)
1948                         pages_per_brw = cli->cl_max_pages_per_rpc;
1949                 else
1950                         pages_per_brw = page_count;
1951
1952                 pages_per_brw = max_unfragmented_pages(ppga, pages_per_brw);
1953
1954                 if (saved_oa != NULL) {
1955                         /* restore previously saved oa */
1956                         *oinfo->oi_oa = *saved_oa;
1957                 } else if (page_count > pages_per_brw) {
1958                         /* save a copy of oa (brw will clobber it) */
1959                         OBDO_ALLOC(saved_oa);
1960                         if (saved_oa == NULL)
1961                                 GOTO(out, rc = -ENOMEM);
1962                         *saved_oa = *oinfo->oi_oa;
1963                 }
1964
1965                 rc = osc_brw_internal(cmd, exp, oinfo->oi_oa, oinfo->oi_md,
1966                                       pages_per_brw, ppga, oinfo->oi_capa);
1967
1968                 if (rc != 0)
1969                         break;
1970
1971                 page_count -= pages_per_brw;
1972                 ppga += pages_per_brw;
1973         }
1974
1975 out:
1976         osc_release_ppga(orig, page_count_orig);
1977
1978         if (saved_oa != NULL)
1979                 OBDO_FREE(saved_oa);
1980
1981         RETURN(rc);
1982 }
1983
1984 /* The companion to osc_enter_cache(), called when @oap is no longer part of
1985  * the dirty accounting.  Writeback completes or truncate happens before
1986  * writing starts.  Must be called with the loi lock held. */
1987 static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap,
1988                            int sent)
1989 {
1990         osc_release_write_grant(cli, &oap->oap_brw_page, sent);
1991 }
1992
1993
1994 /* This maintains the lists of pending pages to read/write for a given object
1995  * (lop).  This is used by osc_check_rpcs->osc_next_loi() and loi_list_maint()
1996  * to quickly find objects that are ready to send an RPC. */
1997 static int lop_makes_rpc(struct client_obd *cli, struct loi_oap_pages *lop,
1998                          int cmd)
1999 {
2000         ENTRY;
2001
2002         if (lop->lop_num_pending == 0)
2003                 RETURN(0);
2004
2005         /* if we have an invalid import we want to drain the queued pages
2006          * by forcing them through rpcs that immediately fail and complete
2007          * the pages.  recovery relies on this to empty the queued pages
2008          * before canceling the locks and evicting down the llite pages */
2009         if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
2010                 RETURN(1);
2011
2012         /* stream rpcs in queue order as long as as there is an urgent page
2013          * queued.  this is our cheap solution for good batching in the case
2014          * where writepage marks some random page in the middle of the file
2015          * as urgent because of, say, memory pressure */
2016         if (!cfs_list_empty(&lop->lop_urgent)) {
2017                 CDEBUG(D_CACHE, "urgent request forcing RPC\n");
2018                 RETURN(1);
2019         }
2020
2021         if (cmd & OBD_BRW_WRITE) {
2022                 /* trigger a write rpc stream as long as there are dirtiers
2023                  * waiting for space.  as they're waiting, they're not going to
2024                  * create more pages to coalesce with what's waiting.. */
2025                 if (!cfs_list_empty(&cli->cl_cache_waiters)) {
2026                         CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
2027                         RETURN(1);
2028                 }
2029         }
2030         if (lop->lop_num_pending >= cli->cl_max_pages_per_rpc)
2031                 RETURN(1);
2032
2033         RETURN(0);
2034 }
2035
2036 static int lop_makes_hprpc(struct loi_oap_pages *lop)
2037 {
2038         struct osc_async_page *oap;
2039         ENTRY;
2040
2041         if (cfs_list_empty(&lop->lop_urgent))
2042                 RETURN(0);
2043
2044         oap = cfs_list_entry(lop->lop_urgent.next,
2045                          struct osc_async_page, oap_urgent_item);
2046
2047         if (oap->oap_async_flags & ASYNC_HP) {
2048                 CDEBUG(D_CACHE, "hp request forcing RPC\n");
2049                 RETURN(1);
2050         }
2051
2052         RETURN(0);
2053 }
2054
2055 static void on_list(cfs_list_t *item, cfs_list_t *list,
2056                     int should_be_on)
2057 {
2058         if (cfs_list_empty(item) && should_be_on)
2059                 cfs_list_add_tail(item, list);
2060         else if (!cfs_list_empty(item) && !should_be_on)
2061                 cfs_list_del_init(item);
2062 }
2063
2064 /* maintain the loi's cli list membership invariants so that osc_send_oap_rpc
2065  * can find pages to build into rpcs quickly */
2066 void loi_list_maint(struct client_obd *cli, struct lov_oinfo *loi)
2067 {
2068         if (lop_makes_hprpc(&loi->loi_write_lop) ||
2069             lop_makes_hprpc(&loi->loi_read_lop)) {
2070                 /* HP rpc */
2071                 on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list, 0);
2072                 on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 1);
2073         } else {
2074                 on_list(&loi->loi_hp_ready_item, &cli->cl_loi_hp_ready_list, 0);
2075                 on_list(&loi->loi_ready_item, &cli->cl_loi_ready_list,
2076                         lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)||
2077                         lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ));
2078         }
2079
2080         on_list(&loi->loi_write_item, &cli->cl_loi_write_list,
2081                 loi->loi_write_lop.lop_num_pending);
2082
2083         on_list(&loi->loi_read_item, &cli->cl_loi_read_list,
2084                 loi->loi_read_lop.lop_num_pending);
2085 }
2086
2087 static void lop_update_pending(struct client_obd *cli,
2088                                struct loi_oap_pages *lop, int cmd, int delta)
2089 {
2090         lop->lop_num_pending += delta;
2091         if (cmd & OBD_BRW_WRITE)
2092                 cli->cl_pending_w_pages += delta;
2093         else
2094                 cli->cl_pending_r_pages += delta;
2095 }
2096
2097 /**
2098  * this is called when a sync waiter receives an interruption.  Its job is to
2099  * get the caller woken as soon as possible.  If its page hasn't been put in an
2100  * rpc yet it can dequeue immediately.  Otherwise it has to mark the rpc as
2101  * desiring interruption which will forcefully complete the rpc once the rpc
2102  * has timed out.
2103  */
2104 int osc_oap_interrupted(const struct lu_env *env, struct osc_async_page *oap)
2105 {
2106         struct loi_oap_pages *lop;
2107         struct lov_oinfo *loi;
2108         int rc = -EBUSY;
2109         ENTRY;
2110
2111         LASSERT(!oap->oap_interrupted);
2112         oap->oap_interrupted = 1;
2113
2114         /* ok, it's been put in an rpc. only one oap gets a request reference */
2115         if (oap->oap_request != NULL) {
2116                 ptlrpc_mark_interrupted(oap->oap_request);
2117                 ptlrpcd_wake(oap->oap_request);
2118                 ptlrpc_req_finished(oap->oap_request);
2119                 oap->oap_request = NULL;
2120         }
2121
2122         /*
2123          * page completion may be called only if ->cpo_prep() method was
2124          * executed by osc_io_submit(), that also adds page the to pending list
2125          */
2126         if (!cfs_list_empty(&oap->oap_pending_item)) {
2127                 cfs_list_del_init(&oap->oap_pending_item);
2128                 cfs_list_del_init(&oap->oap_urgent_item);
2129
2130                 loi = oap->oap_loi;
2131                 lop = (oap->oap_cmd & OBD_BRW_WRITE) ?
2132                         &loi->loi_write_lop : &loi->loi_read_lop;
2133                 lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, -1);
2134                 loi_list_maint(oap->oap_cli, oap->oap_loi);
2135                 rc = oap->oap_caller_ops->ap_completion(env,
2136                                           oap->oap_caller_data,
2137                                           oap->oap_cmd, NULL, -EINTR);
2138         }
2139
2140         RETURN(rc);
2141 }
2142
2143 /* this is trying to propogate async writeback errors back up to the
2144  * application.  As an async write fails we record the error code for later if
2145  * the app does an fsync.  As long as errors persist we force future rpcs to be
2146  * sync so that the app can get a sync error and break the cycle of queueing
2147  * pages for which writeback will fail. */
2148 static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
2149                            int rc)
2150 {
2151         if (rc) {
2152                 if (!ar->ar_rc)
2153                         ar->ar_rc = rc;
2154
2155                 ar->ar_force_sync = 1;
2156                 ar->ar_min_xid = ptlrpc_sample_next_xid();
2157                 return;
2158
2159         }
2160
2161         if (ar->ar_force_sync && (xid >= ar->ar_min_xid))
2162                 ar->ar_force_sync = 0;
2163 }
2164
2165 void osc_oap_to_pending(struct osc_async_page *oap)
2166 {
2167         struct loi_oap_pages *lop;
2168
2169         if (oap->oap_cmd & OBD_BRW_WRITE)
2170                 lop = &oap->oap_loi->loi_write_lop;
2171         else
2172                 lop = &oap->oap_loi->loi_read_lop;
2173
2174         if (oap->oap_async_flags & ASYNC_HP)
2175                 cfs_list_add(&oap->oap_urgent_item, &lop->lop_urgent);
2176         else if (oap->oap_async_flags & ASYNC_URGENT)
2177                 cfs_list_add_tail(&oap->oap_urgent_item, &lop->lop_urgent);
2178         cfs_list_add_tail(&oap->oap_pending_item, &lop->lop_pending);
2179         lop_update_pending(oap->oap_cli, lop, oap->oap_cmd, 1);
2180 }
2181
2182 /* this must be called holding the loi list lock to give coverage to exit_cache,
2183  * async_flag maintenance, and oap_request */
2184 static void osc_ap_completion(const struct lu_env *env,
2185                               struct client_obd *cli, struct obdo *oa,
2186                               struct osc_async_page *oap, int sent, int rc)
2187 {
2188         __u64 xid = 0;
2189
2190         ENTRY;
2191         if (oap->oap_request != NULL) {
2192                 xid = ptlrpc_req_xid(oap->oap_request);
2193                 ptlrpc_req_finished(oap->oap_request);
2194                 oap->oap_request = NULL;
2195         }
2196
2197         cfs_spin_lock(&oap->oap_lock);
2198         oap->oap_async_flags = 0;
2199         cfs_spin_unlock(&oap->oap_lock);
2200         oap->oap_interrupted = 0;
2201
2202         if (oap->oap_cmd & OBD_BRW_WRITE) {
2203                 osc_process_ar(&cli->cl_ar, xid, rc);
2204                 osc_process_ar(&oap->oap_loi->loi_ar, xid, rc);
2205         }
2206
2207         if (rc == 0 && oa != NULL) {
2208                 if (oa->o_valid & OBD_MD_FLBLOCKS)
2209                         oap->oap_loi->loi_lvb.lvb_blocks = oa->o_blocks;
2210                 if (oa->o_valid & OBD_MD_FLMTIME)
2211                         oap->oap_loi->loi_lvb.lvb_mtime = oa->o_mtime;
2212                 if (oa->o_valid & OBD_MD_FLATIME)
2213                         oap->oap_loi->loi_lvb.lvb_atime = oa->o_atime;
2214                 if (oa->o_valid & OBD_MD_FLCTIME)
2215                         oap->oap_loi->loi_lvb.lvb_ctime = oa->o_ctime;
2216         }
2217
2218         rc = oap->oap_caller_ops->ap_completion(env, oap->oap_caller_data,
2219                                                 oap->oap_cmd, oa, rc);
2220
2221         /* cl_page_completion() drops PG_locked. so, a new I/O on the page could
2222          * start, but OSC calls it under lock and thus we can add oap back to
2223          * pending safely */
2224         if (rc)
2225                 /* upper layer wants to leave the page on pending queue */
2226                 osc_oap_to_pending(oap);
2227         else
2228                 osc_exit_cache(cli, oap, sent);
2229         EXIT;
2230 }
2231
2232 static int brw_queue_work(const struct lu_env *env, void *data)
2233 {
2234         struct client_obd *cli = data;
2235
2236         CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
2237
2238         client_obd_list_lock(&cli->cl_loi_list_lock);
2239         osc_check_rpcs0(env, cli, 1);
2240         client_obd_list_unlock(&cli->cl_loi_list_lock);
2241         RETURN(0);
2242 }
2243
2244 static int brw_interpret(const struct lu_env *env,
2245                          struct ptlrpc_request *req, void *data, int rc)
2246 {
2247         struct osc_brw_async_args *aa = data;
2248         struct client_obd *cli;
2249         int async;
2250         ENTRY;
2251
2252         rc = osc_brw_fini_request(req, rc);
2253         CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
2254         /* When server return -EINPROGRESS, client should always retry
2255          * regardless of the number of times the bulk was resent already. */
2256         if (osc_recoverable_error(rc)) {
2257                 if (req->rq_import_generation !=
2258                     req->rq_import->imp_generation) {
2259                         CDEBUG(D_HA, "%s: resend cross eviction for object: "
2260                                ""LPU64":"LPU64", rc = %d.\n",
2261                                req->rq_import->imp_obd->obd_name,
2262                                aa->aa_oa->o_id, aa->aa_oa->o_seq, rc);
2263                 } else if (rc == -EINPROGRESS ||
2264                     client_should_resend(aa->aa_resends, aa->aa_cli)) {
2265                         rc = osc_brw_redo_request(req, aa);
2266                 } else {
2267                         CERROR("%s: too many resent retries for object: "
2268                                ""LPU64":"LPU64", rc = %d.\n",
2269                                req->rq_import->imp_obd->obd_name,
2270                                aa->aa_oa->o_id, aa->aa_oa->o_seq, rc);
2271                 }
2272
2273                 if (rc == 0)
2274                         RETURN(0);
2275                 else if (rc == -EAGAIN || rc == -EINPROGRESS)
2276                         rc = -EIO;
2277         }
2278
2279         if (aa->aa_ocapa) {
2280                 capa_put(aa->aa_ocapa);
2281                 aa->aa_ocapa = NULL;
2282         }
2283
2284         cli = aa->aa_cli;
2285         client_obd_list_lock(&cli->cl_loi_list_lock);
2286
2287         /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
2288          * is called so we know whether to go to sync BRWs or wait for more
2289          * RPCs to complete */
2290         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
2291                 cli->cl_w_in_flight--;
2292         else
2293                 cli->cl_r_in_flight--;
2294
2295         async = cfs_list_empty(&aa->aa_oaps);
2296         if (!async) { /* from osc_send_oap_rpc() */
2297                 struct osc_async_page *oap, *tmp;
2298                 /* the caller may re-use the oap after the completion call so
2299                  * we need to clean it up a little */
2300                 cfs_list_for_each_entry_safe(oap, tmp, &aa->aa_oaps,
2301                                              oap_rpc_item) {
2302                         cfs_list_del_init(&oap->oap_rpc_item);
2303                         osc_ap_completion(env, cli, aa->aa_oa, oap, 1, rc);
2304                 }
2305                 OBDO_FREE(aa->aa_oa);
2306         } else { /* from async_internal() */
2307                 obd_count i;
2308                 for (i = 0; i < aa->aa_page_count; i++)
2309                         osc_release_write_grant(aa->aa_cli, aa->aa_ppga[i], 1);
2310         }
2311         osc_wake_cache_waiters(cli);
2312         osc_check_rpcs0(env, cli, 1);
2313         client_obd_list_unlock(&cli->cl_loi_list_lock);
2314
2315         if (!async)
2316                 cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
2317                                   req->rq_bulk->bd_nob_transferred);
2318         osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
2319         ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
2320
2321         RETURN(rc);
2322 }
2323
2324 static struct ptlrpc_request *osc_build_req(const struct lu_env *env,
2325                                             struct client_obd *cli,
2326                                             cfs_list_t *rpc_list,
2327                                             int page_count, int cmd)
2328 {
2329         struct ptlrpc_request *req;
2330         struct brw_page **pga = NULL;
2331         struct osc_brw_async_args *aa;
2332         struct obdo *oa = NULL;
2333         const struct obd_async_page_ops *ops = NULL;
2334         struct osc_async_page *oap;
2335         struct osc_async_page *tmp;
2336         struct cl_req *clerq = NULL;
2337         enum cl_req_type crt = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
2338         struct ldlm_lock *lock = NULL;
2339         struct cl_req_attr crattr;
2340         int i, rc, mpflag = 0;
2341
2342         ENTRY;
2343         LASSERT(!cfs_list_empty(rpc_list));
2344
2345         if (cmd & OBD_BRW_MEMALLOC)
2346                 mpflag = cfs_memory_pressure_get_and_set();
2347
2348         memset(&crattr, 0, sizeof crattr);
2349         OBD_ALLOC(pga, sizeof(*pga) * page_count);
2350         if (pga == NULL)
2351                 GOTO(out, req = ERR_PTR(-ENOMEM));
2352
2353         OBDO_ALLOC(oa);
2354         if (oa == NULL)
2355                 GOTO(out, req = ERR_PTR(-ENOMEM));
2356
2357         i = 0;
2358         cfs_list_for_each_entry(oap, rpc_list, oap_rpc_item) {
2359                 struct cl_page *page = osc_oap2cl_page(oap);
2360                 if (ops == NULL) {
2361                         ops = oap->oap_caller_ops;
2362
2363                         clerq = cl_req_alloc(env, page, crt,
2364                                              1 /* only 1-object rpcs for
2365                                                 * now */);
2366                         if (IS_ERR(clerq))
2367                                 GOTO(out, req = (void *)clerq);
2368                         lock = oap->oap_ldlm_lock;
2369                 }
2370                 pga[i] = &oap->oap_brw_page;
2371                 pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
2372                 CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
2373                        pga[i]->pg, cfs_page_index(oap->oap_page), oap, pga[i]->flag);
2374                 i++;
2375                 cl_req_page_add(env, clerq, page);
2376         }
2377
2378         /* always get the data for the obdo for the rpc */
2379         LASSERT(ops != NULL);
2380         crattr.cra_oa = oa;
2381         crattr.cra_capa = NULL;
2382         cl_req_attr_set(env, clerq, &crattr, ~0ULL);
2383         if (lock) {
2384                 oa->o_handle = lock->l_remote_handle;
2385                 oa->o_valid |= OBD_MD_FLHANDLE;
2386         }
2387
2388         rc = cl_req_prep(env, clerq);
2389         if (rc != 0) {
2390                 CERROR("cl_req_prep failed: %d\n", rc);
2391                 GOTO(out, req = ERR_PTR(rc));
2392         }
2393
2394         sort_brw_pages(pga, page_count);
2395         rc = osc_brw_prep_request(cmd, cli, oa, NULL, page_count,
2396                                   pga, &req, crattr.cra_capa, 1, 0);
2397         if (rc != 0) {
2398                 CERROR("prep_req failed: %d\n", rc);
2399                 GOTO(out, req = ERR_PTR(rc));
2400         }
2401
2402         if (cmd & OBD_BRW_MEMALLOC)
2403                 req->rq_memalloc = 1;
2404
2405         /* Need to update the timestamps after the request is built in case
2406          * we race with setattr (locally or in queue at OST).  If OST gets
2407          * later setattr before earlier BRW (as determined by the request xid),
2408          * the OST will not use BRW timestamps.  Sadly, there is no obvious
2409          * way to do this in a single call.  bug 10150 */
2410         cl_req_attr_set(env, clerq, &crattr,
2411                         OBD_MD_FLMTIME|OBD_MD_FLCTIME|OBD_MD_FLATIME);
2412
2413         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2414         aa = ptlrpc_req_async_args(req);
2415         CFS_INIT_LIST_HEAD(&aa->aa_oaps);
2416         cfs_list_splice(rpc_list, &aa->aa_oaps);
2417         CFS_INIT_LIST_HEAD(rpc_list);
2418         aa->aa_clerq = clerq;
2419 out:
2420         if (cmd & OBD_BRW_MEMALLOC)
2421                 cfs_memory_pressure_restore(mpflag);
2422
2423         capa_put(crattr.cra_capa);
2424         if (IS_ERR(req)) {
2425                 if (oa)
2426                         OBDO_FREE(oa);
2427                 if (pga)
2428                         OBD_FREE(pga, sizeof(*pga) * page_count);
2429                 /* this should happen rarely and is pretty bad, it makes the
2430                  * pending list not follow the dirty order */
2431                 client_obd_list_lock(&cli->cl_loi_list_lock);
2432                 cfs_list_for_each_entry_safe(oap, tmp, rpc_list, oap_rpc_item) {
2433                         cfs_list_del_init(&oap->oap_rpc_item);
2434
2435                         /* queued sync pages can be torn down while the pages
2436                          * were between the pending list and the rpc */
2437                         if (oap->oap_interrupted) {
2438                                 CDEBUG(D_INODE, "oap %p interrupted\n", oap);
2439                                 osc_ap_completion(env, cli, NULL, oap, 0,
2440                                                   oap->oap_count);
2441                                 continue;
2442                         }
2443                         osc_ap_completion(env, cli, NULL, oap, 0, PTR_ERR(req));
2444                 }
2445                 if (clerq && !IS_ERR(clerq))
2446                         cl_req_completion(env, clerq, PTR_ERR(req));
2447         }
2448         RETURN(req);
2449 }
2450
2451 /**
2452  * prepare pages for ASYNC io and put pages in send queue.
2453  *
2454  * \param cmd OBD_BRW_* macroses
2455  * \param lop pending pages
2456  *
2457  * \return zero if no page added to send queue.
2458  * \return 1 if pages successfully added to send queue.
2459  * \return negative on errors.
2460  */
2461 static int
2462 osc_send_oap_rpc(const struct lu_env *env, struct client_obd *cli,
2463                  struct lov_oinfo *loi, int cmd,
2464                  struct loi_oap_pages *lop, pdl_policy_t pol)
2465 {
2466         struct ptlrpc_request *req;
2467         obd_count page_count = 0;
2468         struct osc_async_page *oap = NULL, *tmp;
2469         struct osc_brw_async_args *aa;
2470         const struct obd_async_page_ops *ops;
2471         CFS_LIST_HEAD(rpc_list);
2472         int srvlock = 0, mem_tight = 0;
2473         struct cl_object *clob = NULL;
2474         obd_off starting_offset = OBD_OBJECT_EOF;
2475         unsigned int ending_offset;
2476         int starting_page_off = 0;
2477         ENTRY;
2478
2479         /* ASYNC_HP pages first. At present, when the lock the pages is
2480          * to be canceled, the pages covered by the lock will be sent out
2481          * with ASYNC_HP. We have to send out them as soon as possible. */
2482         cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_urgent, oap_urgent_item) {
2483                 if (oap->oap_async_flags & ASYNC_HP)
2484                         cfs_list_move(&oap->oap_pending_item, &rpc_list);
2485                 else if (!(oap->oap_brw_flags & OBD_BRW_SYNC))
2486                         /* only do this for writeback pages. */
2487                         cfs_list_move_tail(&oap->oap_pending_item, &rpc_list);
2488                 if (++page_count >= cli->cl_max_pages_per_rpc)
2489                         break;
2490         }
2491         cfs_list_splice_init(&rpc_list, &lop->lop_pending);
2492         page_count = 0;
2493
2494         /* first we find the pages we're allowed to work with */
2495         cfs_list_for_each_entry_safe(oap, tmp, &lop->lop_pending,
2496                                      oap_pending_item) {
2497                 ops = oap->oap_caller_ops;
2498
2499                 LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, "
2500                          "magic 0x%x\n", oap, oap->oap_magic);
2501
2502                 if (clob == NULL) {
2503                         /* pin object in memory, so that completion call-backs
2504                          * can be safely called under client_obd_list lock. */
2505                         clob = osc_oap2cl_page(oap)->cp_obj;
2506                         cl_object_get(clob);
2507                 }
2508
2509                 if (page_count != 0 &&
2510                     srvlock != !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK)) {
2511                         CDEBUG(D_PAGE, "SRVLOCK flag mismatch,"
2512                                " oap %p, page %p, srvlock %u\n",
2513                                oap, oap->oap_brw_page.pg, (unsigned)!srvlock);
2514                         break;
2515                 }
2516
2517                 /* If there is a gap at the start of this page, it can't merge
2518                  * with any previous page, so we'll hand the network a
2519                  * "fragmented" page array that it can't transfer in 1 RDMA */
2520                 if (oap->oap_obj_off < starting_offset) {
2521                         if (starting_page_off != 0)
2522                                 break;
2523
2524                         starting_page_off = oap->oap_page_off;
2525                         starting_offset = oap->oap_obj_off + starting_page_off;
2526                 } else if (oap->oap_page_off != 0)
2527                         break;
2528
2529                 /* in llite being 'ready' equates to the page being locked
2530                  * until completion unlocks it.  commit_write submits a page
2531                  * as not ready because its unlock will happen unconditionally
2532                  * as the call returns.  if we race with commit_write giving
2533                  * us that page we don't want to create a hole in the page
2534                  * stream, so we stop and leave the rpc to be fired by
2535                  * another dirtier or kupdated interval (the not ready page
2536                  * will still be on the dirty list).  we could call in
2537                  * at the end of ll_file_write to process the queue again. */
2538                 if (!(oap->oap_async_flags & ASYNC_READY)) {
2539                         int rc = ops->ap_make_ready(env, oap->oap_caller_data,
2540                                                     cmd);
2541                         if (rc < 0)
2542                                 CDEBUG(D_INODE, "oap %p page %p returned %d "
2543                                                 "instead of ready\n", oap,
2544                                                 oap->oap_page, rc);
2545                         switch (rc) {
2546                         case -EAGAIN:
2547                                 /* llite is telling us that the page is still
2548                                  * in commit_write and that we should try
2549                                  * and put it in an rpc again later.  we
2550                                  * break out of the loop so we don't create
2551                                  * a hole in the sequence of pages in the rpc
2552                                  * stream.*/
2553                                 oap = NULL;
2554                                 break;
2555                         case -EINTR:
2556                                 /* the io isn't needed.. tell the checks
2557                                  * below to complete the rpc with EINTR */
2558                                 cfs_spin_lock(&oap->oap_lock);
2559                                 oap->oap_async_flags |= ASYNC_COUNT_STABLE;
2560                                 cfs_spin_unlock(&oap->oap_lock);
2561                                 oap->oap_count = -EINTR;
2562                                 break;
2563                         case 0:
2564                                 cfs_spin_lock(&oap->oap_lock);
2565                                 oap->oap_async_flags |= ASYNC_READY;
2566                                 cfs_spin_unlock(&oap->oap_lock);
2567                                 break;
2568                         default:
2569                                 LASSERTF(0, "oap %p page %p returned %d "
2570                                             "from make_ready\n", oap,
2571                                             oap->oap_page, rc);
2572                                 break;
2573                         }
2574                 }
2575                 if (oap == NULL)
2576                         break;
2577
2578                 /* take the page out of our book-keeping */
2579                 cfs_list_del_init(&oap->oap_pending_item);
2580                 lop_update_pending(cli, lop, cmd, -1);
2581                 cfs_list_del_init(&oap->oap_urgent_item);
2582
2583                 /* ask the caller for the size of the io as the rpc leaves. */
2584                 if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
2585                         oap->oap_count =
2586                                 ops->ap_refresh_count(env, oap->oap_caller_data,
2587                                                       cmd);
2588                         LASSERT(oap->oap_page_off + oap->oap_count <= CFS_PAGE_SIZE);
2589                 }
2590                 if (oap->oap_count <= 0) {
2591                         CDEBUG(D_CACHE, "oap %p count %d, completing\n", oap,
2592                                oap->oap_count);
2593                         osc_ap_completion(env, cli, NULL,
2594                                           oap, 0, oap->oap_count);
2595                         continue;
2596                 }
2597
2598                 /* now put the page back in our accounting */
2599                 cfs_list_add_tail(&oap->oap_rpc_item, &rpc_list);
2600                 if (page_count++ == 0)
2601                         srvlock = !!(oap->oap_brw_flags & OBD_BRW_SRVLOCK);
2602
2603                 if (oap->oap_brw_flags & OBD_BRW_MEMALLOC)
2604                         mem_tight = 1;
2605
2606                 /* End on a PTLRPC_MAX_BRW_SIZE boundary.  We want full-sized
2607                  * RPCs aligned on PTLRPC_MAX_BRW_SIZE boundaries to help reads
2608                  * have the same alignment as the initial writes that allocated
2609                  * extents on the server. */
2610                 ending_offset = oap->oap_obj_off + oap->oap_page_off +
2611                                 oap->oap_count;
2612                 if (!(ending_offset & (PTLRPC_MAX_BRW_SIZE - 1)))
2613                         break;
2614
2615                 if (page_count >= cli->cl_max_pages_per_rpc)
2616                         break;
2617
2618                 /* If there is a gap at the end of this page, it can't merge
2619                  * with any subsequent pages, so we'll hand the network a
2620                  * "fragmented" page array that it can't transfer in 1 RDMA */
2621                 if (oap->oap_page_off + oap->oap_count < CFS_PAGE_SIZE)
2622                         break;
2623         }
2624
2625         loi_list_maint(cli, loi);
2626
2627         client_obd_list_unlock(&cli->cl_loi_list_lock);
2628
2629         if (clob != NULL)
2630                 cl_object_put(env, clob);
2631
2632         if (page_count == 0) {
2633                 client_obd_list_lock(&cli->cl_loi_list_lock);
2634                 RETURN(0);
2635         }
2636
2637         req = osc_build_req(env, cli, &rpc_list, page_count,
2638                             mem_tight ? (cmd | OBD_BRW_MEMALLOC) : cmd);
2639         if (IS_ERR(req)) {
2640                 LASSERT(cfs_list_empty(&rpc_list));
2641                 loi_list_maint(cli, loi);
2642                 RETURN(PTR_ERR(req));
2643         }
2644
2645         aa = ptlrpc_req_async_args(req);
2646
2647         starting_offset &= PTLRPC_MAX_BRW_SIZE - 1;
2648         if (cmd == OBD_BRW_READ) {
2649                 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2650                 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2651                 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2652                                       (starting_offset >> CFS_PAGE_SHIFT) + 1);
2653         } else {
2654                 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2655                 lprocfs_oh_tally(&cli->cl_write_rpc_hist,
2656                                  cli->cl_w_in_flight);
2657                 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2658                                       (starting_offset >> CFS_PAGE_SHIFT) + 1);
2659         }
2660
2661         client_obd_list_lock(&cli->cl_loi_list_lock);
2662
2663         if (cmd == OBD_BRW_READ)
2664                 cli->cl_r_in_flight++;
2665         else
2666                 cli->cl_w_in_flight++;
2667
2668         /* queued sync pages can be torn down while the pages
2669          * were between the pending list and the rpc */
2670         tmp = NULL;
2671         cfs_list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2672                 /* only one oap gets a request reference */
2673                 if (tmp == NULL)
2674                         tmp = oap;
2675                 if (oap->oap_interrupted && !req->rq_intr) {
2676                         CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
2677                                oap, req);
2678                         ptlrpc_mark_interrupted(req);
2679                 }
2680         }
2681         if (tmp != NULL)
2682                 tmp->oap_request = ptlrpc_request_addref(req);
2683
2684         DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
2685                   page_count, aa, cli->cl_r_in_flight, cli->cl_w_in_flight);
2686
2687         req->rq_interpret_reply = brw_interpret;
2688
2689         /* XXX: Maybe the caller can check the RPC bulk descriptor to see which
2690          *      CPU/NUMA node the majority of pages were allocated on, and try
2691          *      to assign the async RPC to the CPU core (PDL_POLICY_PREFERRED)
2692          *      to reduce cross-CPU memory traffic.
2693          *
2694          *      But on the other hand, we expect that multiple ptlrpcd threads
2695          *      and the initial write sponsor can run in parallel, especially
2696          *      when data checksum is enabled, which is CPU-bound operation and
2697          *      single ptlrpcd thread cannot process in time. So more ptlrpcd
2698          *      threads sharing BRW load (with PDL_POLICY_ROUND) seems better.
2699          */
2700         ptlrpcd_add_req(req, pol, -1);
2701         RETURN(1);
2702 }
2703
2704 #define LOI_DEBUG(LOI, STR, args...)                                     \
2705         CDEBUG(D_INODE, "loi ready %d wr %d:%d rd %d:%d " STR,           \
2706                !cfs_list_empty(&(LOI)->loi_ready_item) ||                \
2707                !cfs_list_empty(&(LOI)->loi_hp_ready_item),               \
2708                (LOI)->loi_write_lop.lop_num_pending,                     \
2709                !cfs_list_empty(&(LOI)->loi_write_lop.lop_urgent),        \
2710                (LOI)->loi_read_lop.lop_num_pending,                      \
2711                !cfs_list_empty(&(LOI)->loi_read_lop.lop_urgent),         \
2712                args)                                                     \
2713
2714 /* This is called by osc_check_rpcs() to find which objects have pages that
2715  * we could be sending.  These lists are maintained by lop_makes_rpc(). */
2716 struct lov_oinfo *osc_next_loi(struct client_obd *cli)
2717 {
2718         ENTRY;
2719
2720         /* First return objects that have blocked locks so that they
2721          * will be flushed quickly and other clients can get the lock,
2722          * then objects which have pages ready to be stuffed into RPCs */
2723         if (!cfs_list_empty(&cli->cl_loi_hp_ready_list))
2724                 RETURN(cfs_list_entry(cli->cl_loi_hp_ready_list.next,
2725                                       struct lov_oinfo, loi_hp_ready_item));
2726         if (!cfs_list_empty(&cli->cl_loi_ready_list))
2727                 RETURN(cfs_list_entry(cli->cl_loi_ready_list.next,
2728                                       struct lov_oinfo, loi_ready_item));
2729
2730         /* then if we have cache waiters, return all objects with queued
2731          * writes.  This is especially important when many small files
2732          * have filled up the cache and not been fired into rpcs because
2733          * they don't pass the nr_pending/object threshhold */
2734         if (!cfs_list_empty(&cli->cl_cache_waiters) &&
2735             !cfs_list_empty(&cli->cl_loi_write_list))
2736                 RETURN(cfs_list_entry(cli->cl_loi_write_list.next,
2737                                       struct lov_oinfo, loi_write_item));
2738
2739         /* then return all queued objects when we have an invalid import
2740          * so that they get flushed */
2741         if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
2742                 if (!cfs_list_empty(&cli->cl_loi_write_list))
2743                         RETURN(cfs_list_entry(cli->cl_loi_write_list.next,
2744                                               struct lov_oinfo,
2745                                               loi_write_item));
2746                 if (!cfs_list_empty(&cli->cl_loi_read_list))
2747                         RETURN(cfs_list_entry(cli->cl_loi_read_list.next,
2748                                               struct lov_oinfo, loi_read_item));
2749         }
2750         RETURN(NULL);
2751 }
2752
2753 static int osc_max_rpc_in_flight(struct client_obd *cli, struct lov_oinfo *loi)
2754 {
2755         struct osc_async_page *oap;
2756         int hprpc = 0;
2757
2758         if (!cfs_list_empty(&loi->loi_write_lop.lop_urgent)) {
2759                 oap = cfs_list_entry(loi->loi_write_lop.lop_urgent.next,
2760                                      struct osc_async_page, oap_urgent_item);
2761                 hprpc = !!(oap->oap_async_flags & ASYNC_HP);
2762         }
2763
2764         if (!hprpc && !cfs_list_empty(&loi->loi_read_lop.lop_urgent)) {
2765                 oap = cfs_list_entry(loi->loi_read_lop.lop_urgent.next,
2766                                      struct osc_async_page, oap_urgent_item);
2767                 hprpc = !!(oap->oap_async_flags & ASYNC_HP);
2768         }
2769
2770         return rpcs_in_flight(cli) >= cli->cl_max_rpcs_in_flight + hprpc;
2771 }
2772
2773 /* called with the loi list lock held */
2774 static void osc_check_rpcs0(const struct lu_env *env, struct client_obd *cli, int ptlrpc)
2775 {
2776         struct lov_oinfo *loi;
2777         int rc = 0, race_counter = 0;
2778         pdl_policy_t pol;
2779         ENTRY;
2780
2781         pol = ptlrpc ? PDL_POLICY_SAME : PDL_POLICY_ROUND;
2782
2783         while ((loi = osc_next_loi(cli)) != NULL) {
2784                 LOI_DEBUG(loi, "%lu in flight\n", rpcs_in_flight(cli));
2785
2786                 if (osc_max_rpc_in_flight(cli, loi))
2787                         break;
2788
2789                 /* attempt some read/write balancing by alternating between
2790                  * reads and writes in an object.  The makes_rpc checks here
2791                  * would be redundant if we were getting read/write work items
2792                  * instead of objects.  we don't want send_oap_rpc to drain a
2793                  * partial read pending queue when we're given this object to
2794                  * do io on writes while there are cache waiters */
2795                 if (lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
2796                         rc = osc_send_oap_rpc(env, cli, loi, OBD_BRW_WRITE,
2797                                               &loi->loi_write_lop, pol);
2798                         if (rc < 0) {
2799                                 CERROR("Write request failed with %d\n", rc);
2800
2801                                 /* osc_send_oap_rpc failed, mostly because of
2802                                  * memory pressure.
2803                                  *
2804                                  * It can't break here, because if:
2805                                  *  - a page was submitted by osc_io_submit, so
2806                                  *    page locked;
2807                                  *  - no request in flight
2808                                  *  - no subsequent request
2809                                  * The system will be in live-lock state,
2810                                  * because there is no chance to call
2811                                  * osc_io_unplug() and osc_check_rpcs() any
2812                                  * more. pdflush can't help in this case,
2813                                  * because it might be blocked at grabbing
2814                                  * the page lock as we mentioned.
2815                                  *
2816                                  * Anyway, continue to drain pages. */
2817                                 /* break; */
2818                         }
2819
2820                         if (rc > 0)
2821                                 race_counter = 0;
2822                         else if (rc == 0)
2823                                 race_counter++;
2824                 }
2825                 if (lop_makes_rpc(cli, &loi->loi_read_lop, OBD_BRW_READ)) {
2826                         rc = osc_send_oap_rpc(env, cli, loi, OBD_BRW_READ,
2827                                               &loi->loi_read_lop, pol);
2828                         if (rc < 0)
2829                                 CERROR("Read request failed with %d\n", rc);
2830
2831                         if (rc > 0)
2832                                 race_counter = 0;
2833                         else if (rc == 0)
2834                                 race_counter++;
2835                 }
2836
2837                 /* attempt some inter-object balancing by issuing rpcs
2838                  * for each object in turn */
2839                 if (!cfs_list_empty(&loi->loi_hp_ready_item))
2840                         cfs_list_del_init(&loi->loi_hp_ready_item);
2841                 if (!cfs_list_empty(&loi->loi_ready_item))
2842                         cfs_list_del_init(&loi->loi_ready_item);
2843                 if (!cfs_list_empty(&loi->loi_write_item))
2844                         cfs_list_del_init(&loi->loi_write_item);
2845                 if (!cfs_list_empty(&loi->loi_read_item))
2846                         cfs_list_del_init(&loi->loi_read_item);
2847
2848                 loi_list_maint(cli, loi);
2849
2850                 /* send_oap_rpc fails with 0 when make_ready tells it to
2851                  * back off.  llite's make_ready does this when it tries
2852                  * to lock a page queued for write that is already locked.
2853                  * we want to try sending rpcs from many objects, but we
2854                  * don't want to spin failing with 0.  */
2855                 if (race_counter == 10)
2856                         break;
2857         }
2858 }
2859
2860 void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
2861 {
2862         osc_check_rpcs0(env, cli, 0);
2863 }
2864
2865 /**
2866  * Non-blocking version of osc_enter_cache() that consumes grant only when it
2867  * is available.
2868  */
2869 int osc_enter_cache_try(const struct lu_env *env,
2870                         struct client_obd *cli, struct lov_oinfo *loi,
2871                         struct osc_async_page *oap, int transient)
2872 {
2873         int has_grant;
2874
2875         has_grant = cli->cl_avail_grant >= CFS_PAGE_SIZE;
2876         if (has_grant) {
2877                 osc_consume_write_grant(cli, &oap->oap_brw_page);
2878                 if (transient) {
2879                         cli->cl_dirty_transit += CFS_PAGE_SIZE;
2880                         cfs_atomic_inc(&obd_dirty_transit_pages);
2881                         oap->oap_brw_flags |= OBD_BRW_NOCACHE;
2882                 }
2883         }
2884         return has_grant;
2885 }
2886
2887 /* Caller must hold loi_list_lock - we drop/regain it if we need to wait for
2888  * grant or cache space. */
2889 static int osc_enter_cache(const struct lu_env *env,
2890                            struct client_obd *cli, struct lov_oinfo *loi,
2891                            struct osc_async_page *oap)
2892 {
2893         struct osc_cache_waiter ocw;
2894         struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
2895         int rc = -EDQUOT;
2896         ENTRY;
2897
2898         CDEBUG(D_CACHE, "dirty: %ld/%d dirty_max: %ld/%d dropped: %lu "
2899                "grant: %lu\n", cli->cl_dirty, cfs_atomic_read(&obd_dirty_pages),
2900                cli->cl_dirty_max, obd_max_dirty_pages,
2901                cli->cl_lost_grant, cli->cl_avail_grant);
2902
2903         /* force the caller to try sync io.  this can jump the list
2904          * of queued writes and create a discontiguous rpc stream */
2905         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
2906             cli->cl_dirty_max < CFS_PAGE_SIZE     ||
2907             cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync)
2908                 RETURN(-EDQUOT);
2909
2910         /* Hopefully normal case - cache space and write credits available */
2911         if (cli->cl_dirty + CFS_PAGE_SIZE <= cli->cl_dirty_max &&
2912             cfs_atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages &&
2913             osc_enter_cache_try(env, cli, loi, oap, 0))
2914                 RETURN(0);
2915
2916         /* We can get here for two reasons: too many dirty pages in cache, or
2917          * run out of grants. In both cases we should write dirty pages out.
2918          * Adding a cache waiter will trigger urgent write-out no matter what
2919          * RPC size will be.
2920          * The exiting condition is no avail grants and no dirty pages caching,
2921          * that really means there is no space on the OST. */
2922         cfs_waitq_init(&ocw.ocw_waitq);
2923         ocw.ocw_oap = oap;
2924         while (cli->cl_dirty > 0) {
2925                 cfs_list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
2926                 ocw.ocw_rc = 0;
2927
2928                 loi_list_maint(cli, loi);
2929                 osc_check_rpcs(env, cli);
2930                 client_obd_list_unlock(&cli->cl_loi_list_lock);
2931
2932                 CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n",
2933                        cli->cl_import->imp_obd->obd_name, &ocw, oap);
2934
2935                 rc = l_wait_event(ocw.ocw_waitq, cfs_list_empty(&ocw.ocw_entry), &lwi);
2936
2937                 client_obd_list_lock(&cli->cl_loi_list_lock);
2938                 cfs_list_del_init(&ocw.ocw_entry);
2939                 if (rc < 0)
2940                         break;
2941
2942                 rc = ocw.ocw_rc;
2943                 if (rc != -EDQUOT)
2944                         break;
2945         }
2946
2947         RETURN(rc);
2948 }
2949
2950
2951 int osc_prep_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
2952                         struct lov_oinfo *loi, cfs_page_t *page,
2953                         obd_off offset, const struct obd_async_page_ops *ops,
2954                         void *data, void **res, int nocache,
2955                         struct lustre_handle *lockh)
2956 {
2957         struct osc_async_page *oap;
2958
2959         ENTRY;
2960
2961         if (!page)
2962                 return cfs_size_round(sizeof(*oap));
2963
2964         oap = *res;
2965         oap->oap_magic = OAP_MAGIC;
2966         oap->oap_cli = &exp->exp_obd->u.cli;
2967         oap->oap_loi = loi;
2968
2969         oap->oap_caller_ops = ops;
2970         oap->oap_caller_data = data;
2971
2972         oap->oap_page = page;
2973         oap->oap_obj_off = offset;
2974         if (!client_is_remote(exp) &&
2975             cfs_capable(CFS_CAP_SYS_RESOURCE))
2976                 oap->oap_brw_flags = OBD_BRW_NOQUOTA;
2977
2978         LASSERT(!(offset & ~CFS_PAGE_MASK));
2979
2980         CFS_INIT_LIST_HEAD(&oap->oap_pending_item);
2981         CFS_INIT_LIST_HEAD(&oap->oap_urgent_item);
2982         CFS_INIT_LIST_HEAD(&oap->oap_rpc_item);
2983         CFS_INIT_LIST_HEAD(&oap->oap_page_list);
2984
2985         cfs_spin_lock_init(&oap->oap_lock);
2986         CDEBUG(D_CACHE, "oap %p page %p obj off "LPU64"\n", oap, page, offset);
2987         RETURN(0);
2988 }
2989
2990 int osc_queue_async_io(const struct lu_env *env, struct obd_export *exp,
2991                        struct lov_stripe_md *lsm, struct lov_oinfo *loi,
2992                        struct osc_async_page *oap, int cmd, int off,
2993                        int count, obd_flag brw_flags, enum async_flags async_flags)
2994 {
2995         struct client_obd *cli = &exp->exp_obd->u.cli;
2996         int rc = 0;
2997         ENTRY;
2998
2999         if (oap->oap_magic != OAP_MAGIC)
3000                 RETURN(-EINVAL);
3001
3002         if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
3003                 RETURN(-EIO);
3004
3005         if (!cfs_list_empty(&oap->oap_pending_item) ||
3006             !cfs_list_empty(&oap->oap_urgent_item) ||
3007             !cfs_list_empty(&oap->oap_rpc_item))
3008                 RETURN(-EBUSY);
3009
3010         /* check if the file's owner/group is over quota */
3011         if ((cmd & OBD_BRW_WRITE) && !(cmd & OBD_BRW_NOQUOTA)) {
3012                 struct cl_object *obj;
3013                 struct cl_attr    attr; /* XXX put attr into thread info */
3014                 unsigned int qid[MAXQUOTAS];
3015
3016                 obj = cl_object_top(osc_oap2cl_page(oap)->cp_obj);
3017
3018                 cl_object_attr_lock(obj);
3019                 rc = cl_object_attr_get(env, obj, &attr);
3020                 cl_object_attr_unlock(obj);
3021
3022                 qid[USRQUOTA] = attr.cat_uid;
3023                 qid[GRPQUOTA] = attr.cat_gid;
3024                 if (rc == 0 &&
3025                     osc_quota_chkdq(cli, qid) == NO_QUOTA)
3026                         rc = -EDQUOT;
3027                 if (rc)
3028                         RETURN(rc);
3029         }
3030
3031         if (loi == NULL)
3032                 loi = lsm->lsm_oinfo[0];
3033
3034         client_obd_list_lock(&cli->cl_loi_list_lock);
3035
3036         LASSERT(off + count <= CFS_PAGE_SIZE);
3037         oap->oap_cmd = cmd;
3038         oap->oap_page_off = off;
3039         oap->oap_count = count;
3040         oap->oap_brw_flags = brw_flags;
3041         /* Give a hint to OST that requests are coming from kswapd - bug19529 */
3042         if (cfs_memory_pressure_get())
3043                 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
3044         cfs_spin_lock(&oap->oap_lock);
3045         oap->oap_async_flags = async_flags;
3046         cfs_spin_unlock(&oap->oap_lock);
3047
3048         if (cmd & OBD_BRW_WRITE) {
3049                 rc = osc_enter_cache(env, cli, loi, oap);
3050                 if (rc) {
3051                         client_obd_list_unlock(&cli->cl_loi_list_lock);
3052                         RETURN(rc);
3053                 }
3054         }
3055
3056         LOI_DEBUG(loi, "oap %p page %p added for cmd %d\n", oap, oap->oap_page,
3057                   cmd);
3058
3059         osc_oap_to_pending(oap);
3060         loi_list_maint(cli, loi);
3061         if (!osc_max_rpc_in_flight(cli, loi) &&
3062             lop_makes_rpc(cli, &loi->loi_write_lop, OBD_BRW_WRITE)) {
3063                 LASSERT(cli->cl_writeback_work != NULL);
3064                 rc = ptlrpcd_queue_work(cli->cl_writeback_work);
3065
3066                 CDEBUG(D_CACHE, "Queued writeback work for client obd %p/%d.\n",
3067                        cli, rc);
3068         }
3069         client_obd_list_unlock(&cli->cl_loi_list_lock);
3070
3071         RETURN(0);
3072 }
3073
3074 /* aka (~was & now & flag), but this is more clear :) */
3075 #define SETTING(was, now, flag) (!(was & flag) && (now & flag))
3076
3077 int osc_set_async_flags_base(struct client_obd *cli,
3078                              struct lov_oinfo *loi, struct osc_async_page *oap,
3079                              obd_flag async_flags)
3080 {
3081         struct loi_oap_pages *lop;
3082         int flags = 0;
3083         ENTRY;
3084
3085         LASSERT(!cfs_list_empty(&oap->oap_pending_item));
3086
3087         if (oap->oap_cmd & OBD_BRW_WRITE) {
3088                 lop = &loi->loi_write_lop;
3089         } else {
3090                 lop = &loi->loi_read_lop;
3091         }
3092
3093         if ((oap->oap_async_flags & async_flags) == async_flags)
3094                 RETURN(0);
3095
3096         if (SETTING(oap->oap_async_flags, async_flags, ASYNC_READY))
3097                 flags |= ASYNC_READY;
3098
3099         if (SETTING(oap->oap_async_flags, async_flags, ASYNC_URGENT) &&
3100             cfs_list_empty(&oap->oap_rpc_item)) {
3101                 if (oap->oap_async_flags & ASYNC_HP)
3102                         cfs_list_add(&oap->oap_urgent_item, &lop->lop_urgent);
3103                 else
3104                         cfs_list_add_tail(&oap->oap_urgent_item,
3105                                           &lop->lop_urgent);
3106                 flags |= ASYNC_URGENT;
3107                 loi_list_maint(cli, loi);
3108         }
3109         cfs_spin_lock(&oap->oap_lock);
3110         oap->oap_async_flags |= flags;
3111         cfs_spin_unlock(&oap->oap_lock);
3112
3113         LOI_DEBUG(loi, "oap %p page %p has flags %x\n", oap, oap->oap_page,
3114                         oap->oap_async_flags);
3115         RETURN(0);
3116 }
3117
3118 int osc_teardown_async_page(struct obd_export *exp, struct lov_stripe_md *lsm,
3119                             struct lov_oinfo *loi, struct osc_async_page *oap)
3120 {
3121         struct client_obd *cli = &exp->exp_obd->u.cli;
3122         struct loi_oap_pages *lop;
3123         int rc = 0;
3124         ENTRY;
3125
3126         if (oap->oap_magic != OAP_MAGIC)
3127                 RETURN(-EINVAL);
3128
3129         if (loi == NULL)
3130                 loi = lsm->lsm_oinfo[0];
3131
3132         if (oap->oap_cmd & OBD_BRW_WRITE) {
3133                 lop = &loi->loi_write_lop;
3134         } else {
3135                 lop = &loi->loi_read_lop;
3136         }
3137
3138         client_obd_list_lock(&cli->cl_loi_list_lock);
3139
3140         if (!cfs_list_empty(&oap->oap_rpc_item))
3141                 GOTO(out, rc = -EBUSY);
3142
3143         osc_exit_cache(cli, oap, 0);
3144         osc_wake_cache_waiters(cli);
3145
3146         if (!cfs_list_empty(&oap->oap_urgent_item)) {
3147                 cfs_list_del_init(&oap->oap_urgent_item);
3148                 cfs_spin_lock(&oap->oap_lock);
3149                 oap->oap_async_flags &= ~(ASYNC_URGENT | ASYNC_HP);
3150                 cfs_spin_unlock(&oap->oap_lock);
3151         }
3152         if (!cfs_list_empty(&oap->oap_pending_item)) {
3153                 cfs_list_del_init(&oap->oap_pending_item);
3154                 lop_update_pending(cli, lop, oap->oap_cmd, -1);
3155         }
3156         loi_list_maint(cli, loi);
3157         LOI_DEBUG(loi, "oap %p page %p torn down\n", oap, oap->oap_page);
3158 out:
3159         client_obd_list_unlock(&cli->cl_loi_list_lock);
3160         RETURN(rc);
3161 }
3162
3163 static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
3164                                         struct ldlm_enqueue_info *einfo)
3165 {
3166         void *data = einfo->ei_cbdata;
3167         int set = 0;
3168
3169         LASSERT(lock != NULL);
3170         LASSERT(lock->l_blocking_ast == einfo->ei_cb_bl);
3171         LASSERT(lock->l_resource->lr_type == einfo->ei_type);
3172         LASSERT(lock->l_completion_ast == einfo->ei_cb_cp);
3173         LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
3174
3175         lock_res_and_lock(lock);
3176         cfs_spin_lock(&osc_ast_guard);
3177
3178         if (lock->l_ast_data == NULL)
3179                 lock->l_ast_data = data;
3180         if (lock->l_ast_data == data)
3181                 set = 1;
3182
3183         cfs_spin_unlock(&osc_ast_guard);
3184         unlock_res_and_lock(lock);
3185
3186         return set;
3187 }
3188
3189 static int osc_set_data_with_check(struct lustre_handle *lockh,
3190                                    struct ldlm_enqueue_info *einfo)
3191 {
3192         struct ldlm_lock *lock = ldlm_handle2lock(lockh);
3193         int set = 0;
3194
3195         if (lock != NULL) {
3196                 set = osc_set_lock_data_with_check(lock, einfo);
3197                 LDLM_LOCK_PUT(lock);
3198         } else
3199                 CERROR("lockh %p, data %p - client evicted?\n",
3200                        lockh, einfo->ei_cbdata);
3201         return set;
3202 }
3203
3204 static int osc_change_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
3205                              ldlm_iterator_t replace, void *data)
3206 {
3207         struct ldlm_res_id res_id;
3208         struct obd_device *obd = class_exp2obd(exp);
3209
3210         osc_build_res_name(lsm->lsm_object_id, lsm->lsm_object_seq, &res_id);
3211         ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
3212         return 0;
3213 }
3214
3215 /* find any ldlm lock of the inode in osc
3216  * return 0    not find
3217  *        1    find one
3218  *      < 0    error */
3219 static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
3220                            ldlm_iterator_t replace, void *data)
3221 {
3222         struct ldlm_res_id res_id;
3223         struct obd_device *obd = class_exp2obd(exp);
3224         int rc = 0;
3225
3226         osc_build_res_name(lsm->lsm_object_id, lsm->lsm_object_seq, &res_id);
3227         rc = ldlm_resource_iterate(obd->obd_namespace, &res_id, replace, data);
3228         if (rc == LDLM_ITER_STOP)
3229                 return(1);
3230         if (rc == LDLM_ITER_CONTINUE)
3231                 return(0);
3232         return(rc);
3233 }
3234
3235 static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
3236                             obd_enqueue_update_f upcall, void *cookie,
3237                             int *flags, int agl, int rc)
3238 {
3239         int intent = *flags & LDLM_FL_HAS_INTENT;
3240         ENTRY;
3241
3242         if (intent) {
3243                 /* The request was created before ldlm_cli_enqueue call. */
3244                 if (rc == ELDLM_LOCK_ABORTED) {
3245                         struct ldlm_reply *rep;
3246                         rep = req_capsule_server_get(&req->rq_pill,
3247                                                      &RMF_DLM_REP);
3248
3249                         LASSERT(rep != NULL);
3250                         if (rep->lock_policy_res1)
3251                                 rc = rep->lock_policy_res1;
3252                 }
3253         }
3254
3255         if ((intent != 0 && rc == ELDLM_LOCK_ABORTED && agl == 0) ||
3256             (rc == 0)) {
3257                 *flags |= LDLM_FL_LVB_READY;
3258                 CDEBUG(D_INODE,"got kms "LPU64" blocks "LPU64" mtime "LPU64"\n",
3259                        lvb->lvb_size, lvb->lvb_blocks, lvb->lvb_mtime);
3260         }
3261
3262         /* Call the update callback. */
3263         rc = (*upcall)(cookie, rc);
3264         RETURN(rc);
3265 }
3266
3267 static int osc_enqueue_interpret(const struct lu_env *env,
3268                                  struct ptlrpc_request *req,
3269                                  struct osc_enqueue_args *aa, int rc)
3270 {
3271         struct ldlm_lock *lock;
3272         struct lustre_handle handle;
3273         __u32 mode;
3274         struct ost_lvb *lvb;
3275         __u32 lvb_len;
3276         int *flags = aa->oa_flags;
3277
3278         /* Make a local copy of a lock handle and a mode, because aa->oa_*
3279          * might be freed anytime after lock upcall has been called. */
3280         lustre_handle_copy(&handle, aa->oa_lockh);
3281         mode = aa->oa_ei->ei_mode;
3282
3283         /* ldlm_cli_enqueue is holding a reference on the lock, so it must
3284          * be valid. */
3285         lock = ldlm_handle2lock(&handle);
3286
3287         /* Take an additional reference so that a blocking AST that
3288          * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
3289          * to arrive after an upcall has been executed by
3290          * osc_enqueue_fini(). */
3291         ldlm_lock_addref(&handle, mode);
3292
3293         /* Let CP AST to grant the lock first. */
3294         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
3295
3296         if (aa->oa_agl && rc == ELDLM_LOCK_ABORTED) {
3297                 lvb = NULL;
3298                 lvb_len = 0;
3299         } else {
3300                 lvb = aa->oa_lvb;
3301                 lvb_len = sizeof(*aa->oa_lvb);
3302         }
3303
3304         /* Complete obtaining the lock procedure. */
3305         rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
3306                                    mode, flags, lvb, lvb_len, &handle, rc);
3307         /* Complete osc stuff. */
3308         rc = osc_enqueue_fini(req, aa->oa_lvb, aa->oa_upcall, aa->oa_cookie,
3309                               flags, aa->oa_agl, rc);
3310
3311         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
3312
3313         /* Release the lock for async request. */
3314         if (lustre_handle_is_used(&handle) && rc == ELDLM_OK)
3315                 /*
3316                  * Releases a reference taken by ldlm_cli_enqueue(), if it is
3317                  * not already released by
3318                  * ldlm_cli_enqueue_fini()->failed_lock_cleanup()
3319                  */
3320                 ldlm_lock_decref(&handle, mode);
3321
3322         LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n",
3323                  aa->oa_lockh, req, aa);
3324         ldlm_lock_decref(&handle, mode);
3325         LDLM_LOCK_PUT(lock);
3326         return rc;
3327 }
3328
3329 void osc_update_enqueue(struct lustre_handle *lov_lockhp,
3330                         struct lov_oinfo *loi, int flags,
3331                         struct ost_lvb *lvb, __u32 mode, int rc)
3332 {
3333         struct ldlm_lock *lock = ldlm_handle2lock(lov_lockhp);
3334
3335         if (rc == ELDLM_OK) {
3336                 __u64 tmp;
3337
3338                 LASSERT(lock != NULL);
3339                 loi->loi_lvb = *lvb;
3340                 tmp = loi->loi_lvb.lvb_size;
3341                 /* Extend KMS up to the end of this lock and no further
3342                  * A lock on [x,y] means a KMS of up to y + 1 bytes! */
3343                 if (tmp > lock->l_policy_data.l_extent.end)
3344                         tmp = lock->l_policy_data.l_extent.end + 1;
3345                 if (tmp >= loi->loi_kms) {
3346                         LDLM_DEBUG(lock, "lock acquired, setting rss="LPU64
3347                                    ", kms="LPU64, loi->loi_lvb.lvb_size, tmp);
3348                         loi_kms_set(loi, tmp);
3349                 } else {
3350                         LDLM_DEBUG(lock, "lock acquired, setting rss="
3351                                    LPU64"; leaving kms="LPU64", end="LPU64,
3352                                    loi->loi_lvb.lvb_size, loi->loi_kms,
3353                                    lock->l_policy_data.l_extent.end);
3354                 }
3355                 ldlm_lock_allow_match(lock);
3356         } else if (rc == ELDLM_LOCK_ABORTED && (flags & LDLM_FL_HAS_INTENT)) {
3357                 LASSERT(lock != NULL);
3358                 loi->loi_lvb = *lvb;
3359                 ldlm_lock_allow_match(lock);
3360                 CDEBUG(D_INODE, "glimpsed, setting rss="LPU64"; leaving"
3361                        " kms="LPU64"\n", loi->loi_lvb.lvb_size, loi->loi_kms);
3362                 rc = ELDLM_OK;
3363         }
3364
3365         if (lock != NULL) {
3366                 if (rc != ELDLM_OK)
3367                         ldlm_lock_fail_match(lock);
3368
3369                 LDLM_LOCK_PUT(lock);
3370         }
3371 }
3372 EXPORT_SYMBOL(osc_update_enqueue);
3373
3374 struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
3375
3376 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
3377  * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
3378  * other synchronous requests, however keeping some locks and trying to obtain
3379  * others may take a considerable amount of time in a case of ost failure; and
3380  * when other sync requests do not get released lock from a client, the client
3381  * is excluded from the cluster -- such scenarious make the life difficult, so
3382  * release locks just after they are obtained. */
3383 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
3384                      int *flags, ldlm_policy_data_t *policy,
3385                      struct ost_lvb *lvb, int kms_valid,
3386                      obd_enqueue_update_f upcall, void *cookie,
3387                      struct ldlm_enqueue_info *einfo,
3388                      struct lustre_handle *lockh,
3389                      struct ptlrpc_request_set *rqset, int async, int agl)
3390 {
3391         struct obd_device *obd = exp->exp_obd;
3392         struct ptlrpc_request *req = NULL;
3393         int intent = *flags & LDLM_FL_HAS_INTENT;
3394         int match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY);
3395         ldlm_mode_t mode;
3396         int rc;
3397         ENTRY;
3398
3399         /* Filesystem lock extents are extended to page boundaries so that
3400          * dealing with the page cache is a little smoother.  */
3401         policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
3402         policy->l_extent.end |= ~CFS_PAGE_MASK;
3403
3404         /*
3405          * kms is not valid when either object is completely fresh (so that no
3406          * locks are cached), or object was evicted. In the latter case cached
3407          * lock cannot be used, because it would prime inode state with
3408          * potentially stale LVB.
3409          */
3410         if (!kms_valid)
3411                 goto no_match;
3412
3413         /* Next, search for already existing extent locks that will cover us */
3414         /* If we're trying to read, we also search for an existing PW lock.  The
3415          * VFS and page cache already protect us locally, so lots of readers/
3416          * writers can share a single PW lock.
3417          *
3418          * There are problems with conversion deadlocks, so instead of
3419          * converting a read lock to a write lock, we'll just enqueue a new
3420          * one.
3421          *
3422          * At some point we should cancel the read lock instead of making them
3423          * send us a blocking callback, but there are problems with canceling
3424          * locks out from other users right now, too. */
3425         mode = einfo->ei_mode;
3426         if (einfo->ei_mode == LCK_PR)
3427                 mode |= LCK_PW;
3428         mode = ldlm_lock_match(obd->obd_namespace, *flags | match_lvb, res_id,
3429                                einfo->ei_type, policy, mode, lockh, 0);
3430         if (mode) {
3431                 struct ldlm_lock *matched = ldlm_handle2lock(lockh);
3432
3433                 if ((agl != 0) && !(matched->l_flags & LDLM_FL_LVB_READY)) {
3434                         /* For AGL, if enqueue RPC is sent but the lock is not
3435                          * granted, then skip to process this strpe.
3436                          * Return -ECANCELED to tell the caller. */
3437                         ldlm_lock_decref(lockh, mode);
3438                         LDLM_LOCK_PUT(matched);
3439                         RETURN(-ECANCELED);
3440                 } else if (osc_set_lock_data_with_check(matched, einfo)) {
3441                         *flags |= LDLM_FL_LVB_READY;
3442                         /* addref the lock only if not async requests and PW
3443                          * lock is matched whereas we asked for PR. */
3444                         if (!rqset && einfo->ei_mode != mode)
3445                                 ldlm_lock_addref(lockh, LCK_PR);
3446                         if (intent) {
3447                                 /* I would like to be able to ASSERT here that
3448                                  * rss <= kms, but I can't, for reasons which
3449                                  * are explained in lov_enqueue() */
3450                         }
3451
3452                         /* We already have a lock, and it's referenced */
3453                         (*upcall)(cookie, ELDLM_OK);
3454
3455                         if (einfo->ei_mode != mode)
3456                                 ldlm_lock_decref(lockh, LCK_PW);
3457                         else if (rqset)
3458                                 /* For async requests, decref the lock. */
3459                                 ldlm_lock_decref(lockh, einfo->ei_mode);
3460                         LDLM_LOCK_PUT(matched);
3461                         RETURN(ELDLM_OK);
3462                 } else {
3463                         ldlm_lock_decref(lockh, mode);
3464                         LDLM_LOCK_PUT(matched);
3465                 }
3466         }
3467
3468  no_match:
3469         if (intent) {
3470                 CFS_LIST_HEAD(cancels);
3471                 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
3472                                            &RQF_LDLM_ENQUEUE_LVB);
3473                 if (req == NULL)
3474                         RETURN(-ENOMEM);
3475
3476                 rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
3477                 if (rc) {
3478                         ptlrpc_request_free(req);
3479                         RETURN(rc);
3480                 }
3481
3482                 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
3483                                      sizeof *lvb);
3484                 ptlrpc_request_set_replen(req);
3485         }
3486
3487         /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
3488         *flags &= ~LDLM_FL_BLOCK_GRANTED;
3489
3490         rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
3491                               sizeof(*lvb), lockh, async);
3492         if (rqset) {
3493                 if (!rc) {
3494                         struct osc_enqueue_args *aa;
3495                         CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
3496                         aa = ptlrpc_req_async_args(req);
3497                         aa->oa_ei = einfo;
3498                         aa->oa_exp = exp;
3499                         aa->oa_flags  = flags;
3500                         aa->oa_upcall = upcall;
3501                         aa->oa_cookie = cookie;
3502                         aa->oa_lvb    = lvb;
3503                         aa->oa_lockh  = lockh;
3504                         aa->oa_agl    = !!agl;
3505
3506                         req->rq_interpret_reply =
3507                                 (ptlrpc_interpterer_t)osc_enqueue_interpret;
3508                         if (rqset == PTLRPCD_SET)
3509                                 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
3510                         else
3511                                 ptlrpc_set_add_req(rqset, req);
3512                 } else if (intent) {
3513                         ptlrpc_req_finished(req);
3514                 }
3515                 RETURN(rc);
3516         }
3517
3518         rc = osc_enqueue_fini(req, lvb, upcall, cookie, flags, agl, rc);
3519         if (intent)
3520                 ptlrpc_req_finished(req);
3521
3522         RETURN(rc);
3523 }
3524
3525 static int osc_enqueue(struct obd_export *exp, struct obd_info *oinfo,
3526                        struct ldlm_enqueue_info *einfo,
3527                        struct ptlrpc_request_set *rqset)
3528 {
3529         struct ldlm_res_id res_id;
3530         int rc;
3531         ENTRY;
3532
3533         osc_build_res_name(oinfo->oi_md->lsm_object_id,
3534                            oinfo->oi_md->lsm_object_seq, &res_id);
3535
3536         rc = osc_enqueue_base(exp, &res_id, &oinfo->oi_flags, &oinfo->oi_policy,
3537                               &oinfo->oi_md->lsm_oinfo[0]->loi_lvb,
3538                               oinfo->oi_md->lsm_oinfo[0]->loi_kms_valid,
3539                               oinfo->oi_cb_up, oinfo, einfo, oinfo->oi_lockh,
3540                               rqset, rqset != NULL, 0);
3541         RETURN(rc);
3542 }
3543
3544 int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
3545                    __u32 type, ldlm_policy_data_t *policy, __u32 mode,
3546                    int *flags, void *data, struct lustre_handle *lockh,
3547                    int unref)
3548 {
3549         struct obd_device *obd = exp->exp_obd;
3550         int lflags = *flags;
3551         ldlm_mode_t rc;
3552         ENTRY;
3553
3554         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
3555                 RETURN(-EIO);
3556
3557         /* Filesystem lock extents are extended to page boundaries so that
3558          * dealing with the page cache is a little smoother */
3559         policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
3560         policy->l_extent.end |= ~CFS_PAGE_MASK;
3561
3562         /* Next, search for already existing extent locks that will cover us */
3563         /* If we're trying to read, we also search for an existing PW lock.  The
3564          * VFS and page cache already protect us locally, so lots of readers/
3565          * writers can share a single PW lock. */
3566         rc = mode;
3567         if (mode == LCK_PR)
3568                 rc |= LCK_PW;
3569         rc = ldlm_lock_match(obd->obd_namespace, lflags,
3570                              res_id, type, policy, rc, lockh, unref);
3571         if (rc) {
3572                 if (data != NULL) {
3573                         if (!osc_set_data_with_check(lockh, data)) {
3574                                 if (!(lflags & LDLM_FL_TEST_LOCK))
3575                                         ldlm_lock_decref(lockh, rc);
3576                                 RETURN(0);
3577                         }
3578                 }
3579                 if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
3580                         ldlm_lock_addref(lockh, LCK_PR);
3581                         ldlm_lock_decref(lockh, LCK_PW);
3582                 }
3583                 RETURN(rc);
3584         }
3585         RETURN(rc);
3586 }
3587
3588 int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
3589 {
3590         ENTRY;
3591
3592         if (unlikely(mode == LCK_GROUP))
3593                 ldlm_lock_decref_and_cancel(lockh, mode);
3594         else
3595                 ldlm_lock_decref(lockh, mode);
3596
3597         RETURN(0);
3598 }
3599
3600 static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
3601                       __u32 mode, struct lustre_handle *lockh)
3602 {
3603         ENTRY;
3604         RETURN(osc_cancel_base(lockh, mode));
3605 }
3606
3607 static int osc_cancel_unused(struct obd_export *exp,
3608                              struct lov_stripe_md *lsm,
3609                              ldlm_cancel_flags_t flags,
3610                              void *opaque)
3611 {
3612         struct obd_device *obd = class_exp2obd(exp);
3613         struct ldlm_res_id res_id, *resp = NULL;
3614
3615         if (lsm != NULL) {
3616                 resp = osc_build_res_name(lsm->lsm_object_id,
3617                                           lsm->lsm_object_seq, &res_id);
3618         }
3619
3620         return ldlm_cli_cancel_unused(obd->obd_namespace, resp, flags, opaque);
3621 }
3622
3623 static int osc_statfs_interpret(const struct lu_env *env,
3624                                 struct ptlrpc_request *req,
3625                                 struct osc_async_args *aa, int rc)
3626 {
3627         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
3628         struct obd_statfs *msfs;
3629         __u64 used;
3630         ENTRY;
3631
3632         if (rc == -EBADR)
3633                 /* The request has in fact never been sent
3634                  * due to issues at a higher level (LOV).
3635                  * Exit immediately since the caller is
3636                  * aware of the problem and takes care
3637                  * of the clean up */
3638                  RETURN(rc);
3639
3640         if ((rc == -ENOTCONN || rc == -EAGAIN) &&
3641             (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
3642                 GOTO(out, rc = 0);
3643
3644         if (rc != 0)
3645                 GOTO(out, rc);
3646
3647         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3648         if (msfs == NULL) {
3649                 GOTO(out, rc = -EPROTO);
3650         }
3651
3652         /* Reinitialize the RDONLY and DEGRADED flags at the client
3653          * on each statfs, so they don't stay set permanently. */
3654         cfs_spin_lock(&cli->cl_oscc.oscc_lock);
3655
3656         if (unlikely(msfs->os_state & OS_STATE_DEGRADED))
3657                 cli->cl_oscc.oscc_flags |= OSCC_FLAG_DEGRADED;
3658         else if (unlikely(cli->cl_oscc.oscc_flags & OSCC_FLAG_DEGRADED))
3659                 cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_DEGRADED;
3660
3661         if (unlikely(msfs->os_state & OS_STATE_READONLY))
3662                 cli->cl_oscc.oscc_flags |= OSCC_FLAG_RDONLY;
3663         else if (unlikely(cli->cl_oscc.oscc_flags & OSCC_FLAG_RDONLY))
3664                 cli->cl_oscc.oscc_flags &= ~OSCC_FLAG_RDONLY;
3665
3666         /* Add a bit of hysteresis so this flag isn't continually flapping,
3667          * and ensure that new files don't get extremely fragmented due to
3668          * only a small amount of available space in the filesystem.
3669          * We want to set the NOSPC flag when there is less than ~0.1% free
3670          * and clear it when there is at least ~0.2% free space, so:
3671          *                   avail < ~0.1% max          max = avail + used
3672          *            1025 * avail < avail + used       used = blocks - free
3673          *            1024 * avail < used
3674          *            1024 * avail < blocks - free
3675          *                   avail < ((blocks - free) >> 10)
3676          *
3677          * On very large disk, say 16TB 0.1% will be 16 GB. We don't want to
3678          * lose that amount of space so in those cases we report no space left
3679          * if their is less than 1 GB left.                             */
3680         used = min_t(__u64,(msfs->os_blocks - msfs->os_bfree) >> 10, 1 << 30);
3681         if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) == 0) &&
3682                      ((msfs->os_ffree < 32) || (msfs->os_bavail < used))))
3683                 cli->cl_oscc.oscc_flags |= OSCC_FLAG_NOSPC;
3684         else if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) != 0) &&
3685                           (msfs->os_ffree > 64) &&
3686                           (msfs->os_bavail > (used << 1)))) {
3687                 cli->cl_oscc.oscc_flags &= ~(OSCC_FLAG_NOSPC |
3688                                              OSCC_FLAG_NOSPC_BLK);
3689         }
3690
3691         if (unlikely(((cli->cl_oscc.oscc_flags & OSCC_FLAG_NOSPC) != 0) &&
3692                      (msfs->os_bavail < used)))
3693                 cli->cl_oscc.oscc_flags |= OSCC_FLAG_NOSPC_BLK;
3694
3695         cfs_spin_unlock(&cli->cl_oscc.oscc_lock);
3696
3697         *aa->aa_oi->oi_osfs = *msfs;
3698 out:
3699         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
3700         RETURN(rc);
3701 }
3702
3703 static int osc_statfs_async(struct obd_export *exp,
3704                             struct obd_info *oinfo, __u64 max_age,
3705                             struct ptlrpc_request_set *rqset)
3706 {
3707         struct obd_device     *obd = class_exp2obd(exp);
3708         struct ptlrpc_request *req;
3709         struct osc_async_args *aa;
3710         int                    rc;
3711         ENTRY;
3712
3713         /* We could possibly pass max_age in the request (as an absolute
3714          * timestamp or a "seconds.usec ago") so the target can avoid doing
3715          * extra calls into the filesystem if that isn't necessary (e.g.
3716          * during mount that would help a bit).  Having relative timestamps
3717          * is not so great if request processing is slow, while absolute
3718          * timestamps are not ideal because they need time synchronization. */
3719         req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
3720         if (req == NULL)
3721                 RETURN(-ENOMEM);
3722
3723         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3724         if (rc) {
3725                 ptlrpc_request_free(req);
3726                 RETURN(rc);
3727         }
3728         ptlrpc_request_set_replen(req);
3729         req->rq_request_portal = OST_CREATE_PORTAL;
3730         ptlrpc_at_set_req_timeout(req);
3731
3732         if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
3733                 /* procfs requests not want stat in wait for avoid deadlock */
3734                 req->rq_no_resend = 1;
3735                 req->rq_no_delay = 1;
3736         }
3737
3738         req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
3739         CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
3740         aa = ptlrpc_req_async_args(req);
3741         aa->aa_oi = oinfo;
3742
3743         ptlrpc_set_add_req(rqset, req);
3744         RETURN(0);
3745 }
3746
3747 static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
3748                       struct obd_statfs *osfs, __u64 max_age, __u32 flags)
3749 {
3750         struct obd_device     *obd = class_exp2obd(exp);
3751         struct obd_statfs     *msfs;
3752         struct ptlrpc_request *req;
3753         struct obd_import     *imp = NULL;
3754         int rc;
3755         ENTRY;
3756
3757         /*Since the request might also come from lprocfs, so we need
3758          *sync this with client_disconnect_export Bug15684*/
3759         cfs_down_read(&obd->u.cli.cl_sem);
3760         if (obd->u.cli.cl_import)
3761                 imp = class_import_get(obd->u.cli.cl_import);
3762         cfs_up_read(&obd->u.cli.cl_sem);
3763         if (!imp)
3764                 RETURN(-ENODEV);
3765
3766         /* We could possibly pass max_age in the request (as an absolute
3767          * timestamp or a "seconds.usec ago") so the target can avoid doing
3768          * extra calls into the filesystem if that isn't necessary (e.g.
3769          * during mount that would help a bit).  Having relative timestamps
3770          * is not so great if request processing is slow, while absolute
3771          * timestamps are not ideal because they need time synchronization. */
3772         req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
3773
3774         class_import_put(imp);
3775
3776         if (req == NULL)
3777                 RETURN(-ENOMEM);
3778
3779         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3780         if (rc) {
3781                 ptlrpc_request_free(req);
3782                 RETURN(rc);
3783         }
3784         ptlrpc_request_set_replen(req);
3785         req->rq_request_portal = OST_CREATE_PORTAL;
3786         ptlrpc_at_set_req_timeout(req);
3787
3788         if (flags & OBD_STATFS_NODELAY) {
3789                 /* procfs requests not want stat in wait for avoid deadlock */
3790                 req->rq_no_resend = 1;
3791                 req->rq_no_delay = 1;
3792         }
3793
3794         rc = ptlrpc_queue_wait(req);
3795         if (rc)
3796                 GOTO(out, rc);
3797
3798         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3799         if (msfs == NULL) {
3800                 GOTO(out, rc = -EPROTO);
3801         }
3802
3803         *osfs = *msfs;
3804
3805         EXIT;
3806  out:
3807         ptlrpc_req_finished(req);
3808         return rc;
3809 }
3810
3811 /* Retrieve object striping information.
3812  *
3813  * @lmmu is a pointer to an in-core struct with lmm_ost_count indicating
3814  * the maximum number of OST indices which will fit in the user buffer.
3815  * lmm_magic must be LOV_MAGIC (we only use 1 slot here).
3816  */
3817 static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
3818 {
3819         /* we use lov_user_md_v3 because it is larger than lov_user_md_v1 */
3820         struct lov_user_md_v3 lum, *lumk;
3821         struct lov_user_ost_data_v1 *lmm_objects;
3822         int rc = 0, lum_size;
3823         ENTRY;
3824
3825         if (!lsm)
3826                 RETURN(-ENODATA);
3827
3828         /* we only need the header part from user space to get lmm_magic and
3829          * lmm_stripe_count, (the header part is common to v1 and v3) */
3830         lum_size = sizeof(struct lov_user_md_v1);
3831         if (cfs_copy_from_user(&lum, lump, lum_size))
3832                 RETURN(-EFAULT);
3833
3834         if ((lum.lmm_magic != LOV_USER_MAGIC_V1) &&
3835             (lum.lmm_magic != LOV_USER_MAGIC_V3))
3836                 RETURN(-EINVAL);
3837
3838         /* lov_user_md_vX and lov_mds_md_vX must have the same size */
3839         LASSERT(sizeof(struct lov_user_md_v1) == sizeof(struct lov_mds_md_v1));
3840         LASSERT(sizeof(struct lov_user_md_v3) == sizeof(struct lov_mds_md_v3));
3841         LASSERT(sizeof(lum.lmm_objects[0]) == sizeof(lumk->lmm_objects[0]));
3842
3843         /* we can use lov_mds_md_size() to compute lum_size
3844          * because lov_user_md_vX and lov_mds_md_vX have the same size */
3845         if (lum.lmm_stripe_count > 0) {
3846                 lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic);
3847                 OBD_ALLOC(lumk, lum_size);
3848                 if (!lumk)
3849                         RETURN(-ENOMEM);
3850
3851                 if (lum.lmm_magic == LOV_USER_MAGIC_V1)
3852                         lmm_objects = &(((struct lov_user_md_v1 *)lumk)->lmm_objects[0]);
3853                 else
3854                         lmm_objects = &(lumk->lmm_objects[0]);
3855                 lmm_objects->l_object_id = lsm->lsm_object_id;
3856         } else {
3857                 lum_size = lov_mds_md_size(0, lum.lmm_magic);
3858                 lumk = &lum;
3859         }
3860
3861         lumk->lmm_object_id = lsm->lsm_object_id;
3862         lumk->lmm_object_seq = lsm->lsm_object_seq;
3863         lumk->lmm_stripe_count = 1;
3864
3865         if (cfs_copy_to_user(lump, lumk, lum_size))
3866                 rc = -EFAULT;
3867
3868         if (lumk != &lum)
3869                 OBD_FREE(lumk, lum_size);
3870
3871         RETURN(rc);
3872 }
3873
3874
3875 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
3876                          void *karg, void *uarg)
3877 {
3878         struct obd_device *obd = exp->exp_obd;
3879         struct obd_ioctl_data *data = karg;
3880         int err = 0;
3881         ENTRY;
3882
3883         if (!cfs_try_module_get(THIS_MODULE)) {
3884                 CERROR("Can't get module. Is it alive?");
3885                 return -EINVAL;
3886         }
3887         switch (cmd) {
3888         case OBD_IOC_LOV_GET_CONFIG: {
3889                 char *buf;
3890                 struct lov_desc *desc;
3891                 struct obd_uuid uuid;
3892
3893                 buf = NULL;
3894                 len = 0;
3895                 if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
3896                         GOTO(out, err = -EINVAL);
3897
3898                 data = (struct obd_ioctl_data *)buf;
3899
3900                 if (sizeof(*desc) > data->ioc_inllen1) {
3901                         obd_ioctl_freedata(buf, len);
3902                         GOTO(out, err = -EINVAL);
3903                 }
3904
3905                 if (data->ioc_inllen2 < sizeof(uuid)) {
3906                         obd_ioctl_freedata(buf, len);
3907                         GOTO(out, err = -EINVAL);
3908                 }
3909
3910                 desc = (struct lov_desc *)data->ioc_inlbuf1;
3911                 desc->ld_tgt_count = 1;
3912                 desc->ld_active_tgt_count = 1;
3913                 desc->ld_default_stripe_count = 1;
3914                 desc->ld_default_stripe_size = 0;
3915                 desc->ld_default_stripe_offset = 0;
3916                 desc->ld_pattern = 0;
3917                 memcpy(&desc->ld_uuid, &obd->obd_uuid, sizeof(uuid));
3918
3919                 memcpy(data->ioc_inlbuf2, &obd->obd_uuid, sizeof(uuid));
3920
3921                 err = cfs_copy_to_user((void *)uarg, buf, len);
3922                 if (err)
3923                         err = -EFAULT;
3924                 obd_ioctl_freedata(buf, len);
3925                 GOTO(out, err);
3926         }
3927         case LL_IOC_LOV_SETSTRIPE:
3928                 err = obd_alloc_memmd(exp, karg);
3929                 if (err > 0)
3930                         err = 0;
3931                 GOTO(out, err);
3932         case LL_IOC_LOV_GETSTRIPE:
3933                 err = osc_getstripe(karg, uarg);
3934                 GOTO(out, err);
3935         case OBD_IOC_CLIENT_RECOVER:
3936                 err = ptlrpc_recover_import(obd->u.cli.cl_import,
3937                                             data->ioc_inlbuf1, 0);
3938                 if (err > 0)
3939                         err = 0;
3940                 GOTO(out, err);
3941         case IOC_OSC_SET_ACTIVE:
3942                 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
3943                                                data->ioc_offset);
3944                 GOTO(out, err);
3945         case OBD_IOC_POLL_QUOTACHECK:
3946                 err = osc_quota_poll_check(exp, (struct if_quotacheck *)karg);
3947                 GOTO(out, err);
3948         case OBD_IOC_PING_TARGET:
3949                 err = ptlrpc_obd_ping(obd);
3950                 GOTO(out, err);
3951         default:
3952                 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
3953                        cmd, cfs_curproc_comm());
3954                 GOTO(out, err = -ENOTTY);
3955         }
3956 out:
3957         cfs_module_put(THIS_MODULE);
3958         return err;
3959 }
3960
3961 static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
3962                         obd_count keylen, void *key, __u32 *vallen, void *val,
3963                         struct lov_stripe_md *lsm)
3964 {
3965         ENTRY;
3966         if (!vallen || !val)
3967                 RETURN(-EFAULT);
3968
3969         if (KEY_IS(KEY_LOCK_TO_STRIPE)) {
3970                 __u32 *stripe = val;
3971                 *vallen = sizeof(*stripe);
3972                 *stripe = 0;
3973                 RETURN(0);
3974         } else if (KEY_IS(KEY_LAST_ID)) {
3975                 struct ptlrpc_request *req;
3976                 obd_id                *reply;
3977                 char                  *tmp;
3978                 int                    rc;
3979
3980                 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
3981                                            &RQF_OST_GET_INFO_LAST_ID);
3982                 if (req == NULL)
3983                         RETURN(-ENOMEM);
3984
3985                 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3986                                      RCL_CLIENT, keylen);
3987                 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
3988                 if (rc) {
3989                         ptlrpc_request_free(req);
3990                         RETURN(rc);
3991                 }
3992
3993                 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3994                 memcpy(tmp, key, keylen);
3995
3996                 req->rq_no_delay = req->rq_no_resend = 1;
3997                 ptlrpc_request_set_replen(req);
3998                 rc = ptlrpc_queue_wait(req);
3999                 if (rc)
4000                         GOTO(out, rc);
4001
4002                 reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID);
4003                 if (reply == NULL)
4004                         GOTO(out, rc = -EPROTO);
4005
4006                 *((obd_id *)val) = *reply;
4007         out:
4008                 ptlrpc_req_finished(req);
4009                 RETURN(rc);
4010         } else if (KEY_IS(KEY_FIEMAP)) {
4011                 struct ptlrpc_request *req;
4012                 struct ll_user_fiemap *reply;
4013                 char *tmp;
4014                 int rc;
4015
4016                 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
4017                                            &RQF_OST_GET_INFO_FIEMAP);
4018                 if (req == NULL)
4019                         RETURN(-ENOMEM);
4020
4021                 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY,
4022                                      RCL_CLIENT, keylen);
4023                 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
4024                                      RCL_CLIENT, *vallen);
4025                 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL,
4026                                      RCL_SERVER, *vallen);
4027
4028                 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
4029                 if (rc) {
4030                         ptlrpc_request_free(req);
4031                         RETURN(rc);
4032                 }
4033
4034                 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY);
4035                 memcpy(tmp, key, keylen);
4036                 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL);
4037                 memcpy(tmp, val, *vallen);
4038
4039                 ptlrpc_request_set_replen(req);
4040                 rc = ptlrpc_queue_wait(req);
4041                 if (rc)
4042                         GOTO(out1, rc);
4043
4044                 reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL);
4045                 if (reply == NULL)
4046                         GOTO(out1, rc = -EPROTO);
4047
4048                 memcpy(val, reply, *vallen);
4049         out1:
4050                 ptlrpc_req_finished(req);
4051
4052                 RETURN(rc);
4053         }
4054
4055         RETURN(-EINVAL);
4056 }
4057
4058 static int osc_setinfo_mds_connect_import(struct obd_import *imp)
4059 {
4060         struct llog_ctxt *ctxt;
4061         int rc = 0;
4062         ENTRY;
4063
4064         ctxt = llog_get_context(imp->imp_obd, LLOG_MDS_OST_ORIG_CTXT);
4065         if (ctxt) {
4066                 rc = llog_initiator_connect(ctxt);
4067                 llog_ctxt_put(ctxt);
4068         } else {
4069                 /* XXX return an error? skip setting below flags? */
4070         }
4071
4072         cfs_spin_lock(&imp->imp_lock);
4073         imp->imp_server_timeout = 1;
4074         imp->imp_pingable = 1;
4075         cfs_spin_unlock(&imp->imp_lock);
4076         CDEBUG(D_RPCTRACE, "pinging OST %s\n", obd2cli_tgt(imp->imp_obd));
4077
4078         RETURN(rc);
4079 }
4080
4081 static int osc_setinfo_mds_conn_interpret(const struct lu_env *env,
4082                                           struct ptlrpc_request *req,
4083                                           void *aa, int rc)
4084 {
4085         ENTRY;
4086         if (rc != 0)
4087                 RETURN(rc);
4088
4089         RETURN(osc_setinfo_mds_connect_import(req->rq_import));
4090 }
4091
4092 static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
4093                               obd_count keylen, void *key, obd_count vallen,
4094                               void *val, struct ptlrpc_request_set *set)
4095 {
4096         struct ptlrpc_request *req;
4097         struct obd_device     *obd = exp->exp_obd;
4098         struct obd_import     *imp = class_exp2cliimp(exp);
4099         char                  *tmp;
4100         int                    rc;
4101         ENTRY;
4102
4103         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
4104
4105         if (KEY_IS(KEY_NEXT_ID)) {
4106                 obd_id new_val;
4107                 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4108
4109                 if (vallen != sizeof(obd_id))
4110                         RETURN(-ERANGE);
4111                 if (val == NULL)
4112                         RETURN(-EINVAL);
4113
4114                 if (vallen != sizeof(obd_id))
4115                         RETURN(-EINVAL);
4116
4117                 /* avoid race between allocate new object and set next id
4118                  * from ll_sync thread */
4119                 cfs_spin_lock(&oscc->oscc_lock);
4120                 new_val = *((obd_id*)val) + 1;
4121                 if (new_val > oscc->oscc_next_id)
4122                         oscc->oscc_next_id = new_val;
4123                 cfs_spin_unlock(&oscc->oscc_lock);
4124                 CDEBUG(D_HA, "%s: set oscc_next_id = "LPU64"\n",
4125                        exp->exp_obd->obd_name,
4126                        obd->u.cli.cl_oscc.oscc_next_id);
4127
4128                 RETURN(0);
4129         }
4130
4131         if (KEY_IS(KEY_CHECKSUM)) {
4132                 if (vallen != sizeof(int))
4133                         RETURN(-EINVAL);
4134                 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
4135                 RETURN(0);
4136         }
4137
4138         if (KEY_IS(KEY_SPTLRPC_CONF)) {
4139                 sptlrpc_conf_client_adapt(obd);
4140                 RETURN(0);
4141         }
4142
4143         if (KEY_IS(KEY_FLUSH_CTX)) {
4144                 sptlrpc_import_flush_my_ctx(imp);
4145                 RETURN(0);
4146         }
4147
4148         if (!set && !KEY_IS(KEY_GRANT_SHRINK))
4149                 RETURN(-EINVAL);
4150
4151         /* We pass all other commands directly to OST. Since nobody calls osc
4152            methods directly and everybody is supposed to go through LOV, we
4153            assume lov checked invalid values for us.
4154            The only recognised values so far are evict_by_nid and mds_conn.
4155            Even if something bad goes through, we'd get a -EINVAL from OST
4156            anyway. */
4157
4158         if (KEY_IS(KEY_GRANT_SHRINK))
4159                 req = ptlrpc_request_alloc(imp, &RQF_OST_SET_GRANT_INFO);
4160         else
4161                 req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO);
4162
4163         if (req == NULL)
4164                 RETURN(-ENOMEM);
4165
4166         req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
4167                              RCL_CLIENT, keylen);
4168         req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
4169                              RCL_CLIENT, vallen);
4170         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
4171         if (rc) {
4172                 ptlrpc_request_free(req);
4173                 RETURN(rc);
4174         }
4175
4176         tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
4177         memcpy(tmp, key, keylen);
4178         tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
4179         memcpy(tmp, val, vallen);
4180
4181         if (KEY_IS(KEY_MDS_CONN)) {
4182                 struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4183
4184                 oscc->oscc_oa.o_seq = (*(__u32 *)val);
4185                 oscc->oscc_oa.o_valid |= OBD_MD_FLGROUP;
4186                 LASSERT_SEQ_IS_MDT(oscc->oscc_oa.o_seq);
4187                 req->rq_no_delay = req->rq_no_resend = 1;
4188                 req->rq_interpret_reply = osc_setinfo_mds_conn_interpret;
4189         } else if (KEY_IS(KEY_GRANT_SHRINK)) {
4190                 struct osc_grant_args *aa;
4191                 struct obdo *oa;
4192
4193                 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
4194                 aa = ptlrpc_req_async_args(req);
4195                 OBDO_ALLOC(oa);
4196                 if (!oa) {
4197                         ptlrpc_req_finished(req);
4198                         RETURN(-ENOMEM);
4199                 }
4200                 *oa = ((struct ost_body *)val)->oa;
4201                 aa->aa_oa = oa;
4202                 req->rq_interpret_reply = osc_shrink_grant_interpret;
4203         }
4204
4205         ptlrpc_request_set_replen(req);
4206         if (!KEY_IS(KEY_GRANT_SHRINK)) {
4207                 LASSERT(set != NULL);
4208                 ptlrpc_set_add_req(set, req);
4209                 ptlrpc_check_set(NULL, set);
4210         } else
4211                 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
4212
4213         RETURN(0);
4214 }
4215
4216
4217 static struct llog_operations osc_size_repl_logops = {
4218         lop_cancel: llog_obd_repl_cancel
4219 };
4220
4221 static struct llog_operations osc_mds_ost_orig_logops;
4222
4223 static int __osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
4224                            struct obd_device *tgt, struct llog_catid *catid)
4225 {
4226         int rc;
4227         ENTRY;
4228
4229         rc = llog_setup(obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, tgt, 1,
4230                         &catid->lci_logid, &osc_mds_ost_orig_logops);
4231         if (rc) {
4232                 CERROR("failed LLOG_MDS_OST_ORIG_CTXT\n");
4233                 GOTO(out, rc);
4234         }
4235
4236         rc = llog_setup(obd, &obd->obd_olg, LLOG_SIZE_REPL_CTXT, tgt, 1,
4237                         NULL, &osc_size_repl_logops);
4238         if (rc) {
4239                 struct llog_ctxt *ctxt =
4240                         llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
4241                 if (ctxt)
4242                         llog_cleanup(ctxt);
4243                 CERROR("failed LLOG_SIZE_REPL_CTXT\n");
4244         }
4245         GOTO(out, rc);
4246 out:
4247         if (rc) {
4248                 CERROR("osc '%s' tgt '%s' catid %p rc=%d\n",
4249                        obd->obd_name, tgt->obd_name, catid, rc);
4250                 CERROR("logid "LPX64":0x%x\n",
4251                        catid->lci_logid.lgl_oid, catid->lci_logid.lgl_ogen);
4252         }
4253         return rc;
4254 }
4255
4256 static int osc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
4257                          struct obd_device *disk_obd, int *index)
4258 {
4259         struct llog_catid catid;
4260         static char name[32] = CATLIST;
4261         int rc;
4262         ENTRY;
4263
4264         LASSERT(olg == &obd->obd_olg);
4265
4266         cfs_mutex_lock(&olg->olg_cat_processing);
4267         rc = llog_get_cat_list(disk_obd, name, *index, 1, &catid);
4268         if (rc) {
4269                 CERROR("rc: %d\n", rc);
4270                 GOTO(out, rc);
4271         }
4272
4273         CDEBUG(D_INFO, "%s: Init llog for %d - catid "LPX64"/"LPX64":%x\n",
4274                obd->obd_name, *index, catid.lci_logid.lgl_oid,
4275                catid.lci_logid.lgl_oseq, catid.lci_logid.lgl_ogen);
4276
4277         rc = __osc_llog_init(obd, olg, disk_obd, &catid);
4278         if (rc) {
4279                 CERROR("rc: %d\n", rc);
4280                 GOTO(out, rc);
4281         }
4282
4283         rc = llog_put_cat_list(disk_obd, name, *index, 1, &catid);
4284         if (rc) {
4285                 CERROR("rc: %d\n", rc);
4286                 GOTO(out, rc);
4287         }
4288
4289  out:
4290         cfs_mutex_unlock(&olg->olg_cat_processing);
4291
4292         return rc;
4293 }
4294
4295 static int osc_llog_finish(struct obd_device *obd, int count)
4296 {
4297         struct llog_ctxt *ctxt;
4298         int rc = 0, rc2 = 0;
4299         ENTRY;
4300
4301         ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
4302         if (ctxt)
4303                 rc = llog_cleanup(ctxt);
4304
4305         ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
4306         if (ctxt)
4307                 rc2 = llog_cleanup(ctxt);
4308         if (!rc)
4309                 rc = rc2;
4310
4311         RETURN(rc);
4312 }
4313
4314 static int osc_reconnect(const struct lu_env *env,
4315                          struct obd_export *exp, struct obd_device *obd,
4316                          struct obd_uuid *cluuid,
4317                          struct obd_connect_data *data,
4318                          void *localdata)
4319 {
4320         struct client_obd *cli = &obd->u.cli;
4321
4322         if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
4323                 long lost_grant;
4324
4325                 client_obd_list_lock(&cli->cl_loi_list_lock);
4326                 data->ocd_grant = (cli->cl_avail_grant + cli->cl_dirty) ?:
4327                                 2 * cli->cl_max_pages_per_rpc << CFS_PAGE_SHIFT;
4328                 lost_grant = cli->cl_lost_grant;
4329                 cli->cl_lost_grant = 0;
4330                 client_obd_list_unlock(&cli->cl_loi_list_lock);
4331
4332                 CDEBUG(D_CACHE, "request ocd_grant: %d cl_avail_grant: %ld "
4333                        "cl_dirty: %ld cl_lost_grant: %ld\n", data->ocd_grant,
4334                        cli->cl_avail_grant, cli->cl_dirty, lost_grant);
4335                 CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d"
4336                        " ocd_grant: %d\n", data->ocd_connect_flags,
4337                        data->ocd_version, data->ocd_grant);
4338         }
4339
4340         RETURN(0);
4341 }
4342
4343 static int osc_disconnect(struct obd_export *exp)
4344 {
4345         struct obd_device *obd = class_exp2obd(exp);
4346         struct llog_ctxt  *ctxt;
4347         int rc;
4348
4349         ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
4350         if (ctxt) {
4351                 if (obd->u.cli.cl_conn_count == 1) {
4352                         /* Flush any remaining cancel messages out to the
4353                          * target */
4354                         llog_sync(ctxt, exp);
4355                 }
4356                 llog_ctxt_put(ctxt);
4357         } else {
4358                 CDEBUG(D_HA, "No LLOG_SIZE_REPL_CTXT found in obd %p\n",
4359                        obd);
4360         }
4361
4362         rc = client_disconnect_export(exp);
4363         /**
4364          * Initially we put del_shrink_grant before disconnect_export, but it
4365          * causes the following problem if setup (connect) and cleanup
4366          * (disconnect) are tangled together.
4367          *      connect p1                     disconnect p2
4368          *   ptlrpc_connect_import
4369          *     ...............               class_manual_cleanup
4370          *                                     osc_disconnect
4371          *                                     del_shrink_grant
4372          *   ptlrpc_connect_interrupt
4373          *     init_grant_shrink
4374          *   add this client to shrink list
4375          *                                      cleanup_osc
4376          * Bang! pinger trigger the shrink.
4377          * So the osc should be disconnected from the shrink list, after we
4378          * are sure the import has been destroyed. BUG18662
4379          */
4380         if (obd->u.cli.cl_import == NULL)
4381                 osc_del_shrink_grant(&obd->u.cli);
4382         return rc;
4383 }
4384
4385 static int osc_import_event(struct obd_device *obd,
4386                             struct obd_import *imp,
4387                             enum obd_import_event event)
4388 {
4389         struct client_obd *cli;
4390         int rc = 0;
4391
4392         ENTRY;
4393         LASSERT(imp->imp_obd == obd);
4394
4395         switch (event) {
4396         case IMP_EVENT_DISCON: {
4397                 /* Only do this on the MDS OSC's */
4398                 if (imp->imp_server_timeout) {
4399                         struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4400
4401                         cfs_spin_lock(&oscc->oscc_lock);
4402                         oscc->oscc_flags |= OSCC_FLAG_RECOVERING;
4403                         cfs_spin_unlock(&oscc->oscc_lock);
4404                 }
4405                 cli = &obd->u.cli;
4406                 client_obd_list_lock(&cli->cl_loi_list_lock);
4407                 cli->cl_avail_grant = 0;
4408                 cli->cl_lost_grant = 0;
4409                 client_obd_list_unlock(&cli->cl_loi_list_lock);
4410                 break;
4411         }
4412         case IMP_EVENT_INACTIVE: {
4413                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
4414                 break;
4415         }
4416         case IMP_EVENT_INVALIDATE: {
4417                 struct ldlm_namespace *ns = obd->obd_namespace;
4418                 struct lu_env         *env;
4419                 int                    refcheck;
4420
4421                 env = cl_env_get(&refcheck);
4422                 if (!IS_ERR(env)) {
4423                         /* Reset grants */
4424                         cli = &obd->u.cli;
4425                         client_obd_list_lock(&cli->cl_loi_list_lock);
4426                         /* all pages go to failing rpcs due to the invalid
4427                          * import */
4428                         osc_check_rpcs(env, cli);
4429                         client_obd_list_unlock(&cli->cl_loi_list_lock);
4430
4431                         ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
4432                         cl_env_put(env, &refcheck);
4433                 } else
4434                         rc = PTR_ERR(env);
4435                 break;
4436         }
4437         case IMP_EVENT_ACTIVE: {
4438                 /* Only do this on the MDS OSC's */
4439                 if (imp->imp_server_timeout) {
4440                         struct osc_creator *oscc = &obd->u.cli.cl_oscc;
4441
4442                         cfs_spin_lock(&oscc->oscc_lock);
4443                         oscc->oscc_flags &= ~(OSCC_FLAG_NOSPC |
4444                                               OSCC_FLAG_NOSPC_BLK);
4445                         cfs_spin_unlock(&oscc->oscc_lock);
4446                 }
4447                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
4448                 break;
4449         }
4450         case IMP_EVENT_OCD: {
4451                 struct obd_connect_data *ocd = &imp->imp_connect_data;
4452
4453                 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
4454                         osc_init_grant(&obd->u.cli, ocd);
4455
4456                 /* See bug 7198 */
4457                 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
4458                         imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
4459
4460                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
4461                 break;
4462         }
4463         case IMP_EVENT_DEACTIVATE: {
4464                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE, NULL);
4465                 break;
4466         }
4467         case IMP_EVENT_ACTIVATE: {
4468                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE, NULL);
4469                 break;
4470         }
4471         default:
4472                 CERROR("Unknown import event %d\n", event);
4473                 LBUG();
4474         }
4475         RETURN(rc);
4476 }
4477
4478 /**
4479  * Determine whether the lock can be canceled before replaying the lock
4480  * during recovery, see bug16774 for detailed information.
4481  *
4482  * \retval zero the lock can't be canceled
4483  * \retval other ok to cancel
4484  */
4485 static int osc_cancel_for_recovery(struct ldlm_lock *lock)
4486 {
4487         check_res_locked(lock->l_resource);
4488
4489         /*
4490          * Cancel all unused extent lock in granted mode LCK_PR or LCK_CR.
4491          *
4492          * XXX as a future improvement, we can also cancel unused write lock
4493          * if it doesn't have dirty data and active mmaps.
4494          */
4495         if (lock->l_resource->lr_type == LDLM_EXTENT &&
4496             (lock->l_granted_mode == LCK_PR ||
4497              lock->l_granted_mode == LCK_CR) &&
4498             (osc_dlm_lock_pageref(lock) == 0))
4499                 RETURN(1);
4500
4501         RETURN(0);
4502 }
4503
4504 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
4505 {
4506         struct client_obd *cli = &obd->u.cli;
4507         int rc;
4508         ENTRY;
4509
4510         ENTRY;
4511         rc = ptlrpcd_addref();
4512         if (rc)
4513                 RETURN(rc);
4514
4515         rc = client_obd_setup(obd, lcfg);
4516         if (rc == 0) {
4517                 void *handler;
4518                 handler = ptlrpcd_alloc_work(cli->cl_import,
4519                                              brw_queue_work, cli);
4520                 if (!IS_ERR(handler))
4521                         cli->cl_writeback_work = handler;
4522                 else
4523                         rc = PTR_ERR(handler);
4524         }
4525
4526         if (rc == 0) {
4527                 struct lprocfs_static_vars lvars = { 0 };
4528
4529                 cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
4530                 lprocfs_osc_init_vars(&lvars);
4531                 if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0) {
4532                         lproc_osc_attach_seqstat(obd);
4533                         sptlrpc_lprocfs_cliobd_attach(obd);
4534                         ptlrpc_lprocfs_register_obd(obd);
4535                 }
4536
4537                 oscc_init(obd);
4538                 /* We need to allocate a few requests more, because
4539                    brw_interpret tries to create new requests before freeing
4540                    previous ones. Ideally we want to have 2x max_rpcs_in_flight
4541                    reserved, but I afraid that might be too much wasted RAM
4542                    in fact, so 2 is just my guess and still should work. */
4543                 cli->cl_import->imp_rq_pool =
4544                         ptlrpc_init_rq_pool(cli->cl_max_rpcs_in_flight + 2,
4545                                             OST_MAXREQSIZE,
4546                                             ptlrpc_add_rqs_to_pool);
4547
4548                 CFS_INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
4549
4550                 ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery);
4551         }
4552
4553         if (rc)
4554                 ptlrpcd_decref();
4555         RETURN(rc);
4556 }
4557
4558 static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
4559 {
4560         int rc = 0;
4561         ENTRY;
4562
4563         switch (stage) {
4564         case OBD_CLEANUP_EARLY: {
4565                 struct obd_import *imp;
4566                 imp = obd->u.cli.cl_import;
4567                 CDEBUG(D_HA, "Deactivating import %s\n", obd->obd_name);
4568                 /* ptlrpc_abort_inflight to stop an mds_lov_synchronize */
4569                 ptlrpc_deactivate_import(imp);
4570                 cfs_spin_lock(&imp->imp_lock);
4571                 imp->imp_pingable = 0;
4572                 cfs_spin_unlock(&imp->imp_lock);
4573                 break;
4574         }
4575         case OBD_CLEANUP_EXPORTS: {
4576                 struct client_obd *cli = &obd->u.cli;
4577                 /* LU-464
4578                  * for echo client, export may be on zombie list, wait for
4579                  * zombie thread to cull it, because cli.cl_import will be
4580                  * cleared in client_disconnect_export():
4581                  *   class_export_destroy() -> obd_cleanup() ->
4582                  *   echo_device_free() -> echo_client_cleanup() ->
4583                  *   obd_disconnect() -> osc_disconnect() ->
4584                  *   client_disconnect_export()
4585                  */
4586                 obd_zombie_barrier();
4587                 if (cli->cl_writeback_work) {
4588                         ptlrpcd_destroy_work(cli->cl_writeback_work);
4589                         cli->cl_writeback_work = NULL;
4590                 }
4591                 obd_cleanup_client_import(obd);
4592                 ptlrpc_lprocfs_unregister_obd(obd);
4593                 lprocfs_obd_cleanup(obd);
4594                 rc = obd_llog_finish(obd, 0);
4595                 if (rc != 0)
4596                         CERROR("failed to cleanup llogging subsystems\n");
4597                 break;
4598                 }
4599         }
4600         RETURN(rc);
4601 }
4602
4603 int osc_cleanup(struct obd_device *obd)
4604 {
4605         int rc;
4606
4607         ENTRY;
4608
4609         /* free memory of osc quota cache */
4610         osc_quota_cleanup(obd);
4611
4612         rc = client_obd_cleanup(obd);
4613
4614         ptlrpcd_decref();
4615         RETURN(rc);
4616 }
4617
4618 int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg)
4619 {
4620         struct lprocfs_static_vars lvars = { 0 };
4621         int rc = 0;
4622
4623         lprocfs_osc_init_vars(&lvars);
4624
4625         switch (lcfg->lcfg_command) {
4626         default:
4627                 rc = class_process_proc_param(PARAM_OSC, lvars.obd_vars,
4628                                               lcfg, obd);
4629                 if (rc > 0)
4630                         rc = 0;
4631                 break;
4632         }
4633
4634         return(rc);
4635 }
4636
4637 static int osc_process_config(struct obd_device *obd, obd_count len, void *buf)
4638 {
4639         return osc_process_config_base(obd, buf);
4640 }
4641
4642 struct obd_ops osc_obd_ops = {
4643         .o_owner                = THIS_MODULE,
4644         .o_setup                = osc_setup,
4645         .o_precleanup           = osc_precleanup,
4646         .o_cleanup              = osc_cleanup,
4647         .o_add_conn             = client_import_add_conn,
4648         .o_del_conn             = client_import_del_conn,
4649         .o_connect              = client_connect_import,
4650         .o_reconnect            = osc_reconnect,
4651         .o_disconnect           = osc_disconnect,
4652         .o_statfs               = osc_statfs,
4653         .o_statfs_async         = osc_statfs_async,
4654         .o_packmd               = osc_packmd,
4655         .o_unpackmd             = osc_unpackmd,
4656         .o_precreate            = osc_precreate,
4657         .o_create               = osc_create,
4658         .o_create_async         = osc_create_async,
4659         .o_destroy              = osc_destroy,
4660         .o_getattr              = osc_getattr,
4661         .o_getattr_async        = osc_getattr_async,
4662         .o_setattr              = osc_setattr,
4663         .o_setattr_async        = osc_setattr_async,
4664         .o_brw                  = osc_brw,
4665         .o_punch                = osc_punch,
4666         .o_sync                 = osc_sync,
4667         .o_enqueue              = osc_enqueue,
4668         .o_change_cbdata        = osc_change_cbdata,
4669         .o_find_cbdata          = osc_find_cbdata,
4670         .o_cancel               = osc_cancel,
4671         .o_cancel_unused        = osc_cancel_unused,
4672         .o_iocontrol            = osc_iocontrol,
4673         .o_get_info             = osc_get_info,
4674         .o_set_info_async       = osc_set_info_async,
4675         .o_import_event         = osc_import_event,
4676         .o_llog_init            = osc_llog_init,
4677         .o_llog_finish          = osc_llog_finish,
4678         .o_process_config       = osc_process_config,
4679         .o_quotactl             = osc_quotactl,
4680         .o_quotacheck           = osc_quotacheck,
4681         .o_quota_adjust_qunit   = osc_quota_adjust_qunit,
4682 };
4683
4684 extern struct lu_kmem_descr osc_caches[];
4685 extern cfs_spinlock_t       osc_ast_guard;
4686 extern cfs_lock_class_key_t osc_ast_guard_class;
4687
4688 int __init osc_init(void)
4689 {
4690         struct lprocfs_static_vars lvars = { 0 };
4691         int rc;
4692         ENTRY;
4693
4694         /* print an address of _any_ initialized kernel symbol from this
4695          * module, to allow debugging with gdb that doesn't support data
4696          * symbols from modules.*/
4697         CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
4698
4699         rc = lu_kmem_init(osc_caches);
4700
4701         lprocfs_osc_init_vars(&lvars);
4702
4703         osc_quota_init();
4704         rc = class_register_type(&osc_obd_ops, NULL, lvars.module_vars,
4705                                  LUSTRE_OSC_NAME, &osc_device_type);
4706         if (rc) {
4707                 lu_kmem_fini(osc_caches);
4708                 RETURN(rc);
4709         }
4710
4711         cfs_spin_lock_init(&osc_ast_guard);
4712         cfs_lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
4713
4714         osc_mds_ost_orig_logops = llog_lvfs_ops;
4715         osc_mds_ost_orig_logops.lop_setup = llog_obd_origin_setup;
4716         osc_mds_ost_orig_logops.lop_cleanup = llog_obd_origin_cleanup;
4717         osc_mds_ost_orig_logops.lop_add = llog_obd_origin_add;
4718         osc_mds_ost_orig_logops.lop_connect = llog_origin_connect;
4719
4720         RETURN(rc);
4721 }
4722
4723 #ifdef __KERNEL__
4724 static void /*__exit*/ osc_exit(void)
4725 {
4726         lu_device_type_fini(&osc_device_type);
4727
4728         osc_quota_exit();
4729         class_unregister_type(LUSTRE_OSC_NAME);
4730         lu_kmem_fini(osc_caches);
4731 }
4732
4733 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
4734 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
4735 MODULE_LICENSE("GPL");
4736
4737 cfs_module(osc, LUSTRE_VERSION_STRING, osc_init, osc_exit);
4738 #endif