Whamcloud - gitweb
6b56631fee32873b8b29fc0e42703069b60bbc0f
[fs/lustre-release.git] / lustre / osc / osc_request.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32
33 #define DEBUG_SUBSYSTEM S_OSC
34
35 #include <linux/workqueue.h>
36 #include <lprocfs_status.h>
37 #include <lustre_debug.h>
38 #include <lustre_dlm.h>
39 #include <lustre_fid.h>
40 #include <lustre_ha.h>
41 #include <uapi/linux/lustre/lustre_ioctl.h>
42 #include <lustre_net.h>
43 #include <lustre_obdo.h>
44 #include <uapi/linux/lustre/lustre_param.h>
45 #include <obd.h>
46 #include <obd_cksum.h>
47 #include <obd_class.h>
48 #include <lustre_osc.h>
49
50 #include "osc_internal.h"
51
52 atomic_t osc_pool_req_count;
53 unsigned int osc_reqpool_maxreqcount;
54 struct ptlrpc_request_pool *osc_rq_pool;
55
56 /* max memory used for request pool, unit is MB */
57 static unsigned int osc_reqpool_mem_max = 5;
58 module_param(osc_reqpool_mem_max, uint, 0444);
59
60 static int osc_idle_timeout = 20;
61 module_param(osc_idle_timeout, uint, 0644);
62
63 #define osc_grant_args osc_brw_async_args
64
65 struct osc_setattr_args {
66         struct obdo             *sa_oa;
67         obd_enqueue_update_f     sa_upcall;
68         void                    *sa_cookie;
69 };
70
71 struct osc_fsync_args {
72         struct osc_object       *fa_obj;
73         struct obdo             *fa_oa;
74         obd_enqueue_update_f    fa_upcall;
75         void                    *fa_cookie;
76 };
77
78 struct osc_ladvise_args {
79         struct obdo             *la_oa;
80         obd_enqueue_update_f     la_upcall;
81         void                    *la_cookie;
82 };
83
84 static void osc_release_ppga(struct brw_page **ppga, size_t count);
85 static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req,
86                          void *data, int rc);
87
88 void osc_pack_req_body(struct ptlrpc_request *req, struct obdo *oa)
89 {
90         struct ost_body *body;
91
92         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
93         LASSERT(body);
94
95         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
96 }
97
98 static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
99                        struct obdo *oa)
100 {
101         struct ptlrpc_request   *req;
102         struct ost_body         *body;
103         int                      rc;
104
105         ENTRY;
106         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
107         if (req == NULL)
108                 RETURN(-ENOMEM);
109
110         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
111         if (rc) {
112                 ptlrpc_request_free(req);
113                 RETURN(rc);
114         }
115
116         osc_pack_req_body(req, oa);
117
118         ptlrpc_request_set_replen(req);
119
120         rc = ptlrpc_queue_wait(req);
121         if (rc)
122                 GOTO(out, rc);
123
124         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
125         if (body == NULL)
126                 GOTO(out, rc = -EPROTO);
127
128         CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
129         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
130
131         oa->o_blksize = cli_brw_size(exp->exp_obd);
132         oa->o_valid |= OBD_MD_FLBLKSZ;
133
134         EXIT;
135 out:
136         ptlrpc_req_finished(req);
137
138         return rc;
139 }
140
141 static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
142                        struct obdo *oa)
143 {
144         struct ptlrpc_request   *req;
145         struct ost_body         *body;
146         int                      rc;
147
148         ENTRY;
149         LASSERT(oa->o_valid & OBD_MD_FLGROUP);
150
151         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
152         if (req == NULL)
153                 RETURN(-ENOMEM);
154
155         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
156         if (rc) {
157                 ptlrpc_request_free(req);
158                 RETURN(rc);
159         }
160
161         osc_pack_req_body(req, oa);
162
163         ptlrpc_request_set_replen(req);
164
165         rc = ptlrpc_queue_wait(req);
166         if (rc)
167                 GOTO(out, rc);
168
169         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
170         if (body == NULL)
171                 GOTO(out, rc = -EPROTO);
172
173         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
174
175         EXIT;
176 out:
177         ptlrpc_req_finished(req);
178
179         RETURN(rc);
180 }
181
182 static int osc_setattr_interpret(const struct lu_env *env,
183                                  struct ptlrpc_request *req,
184                                  struct osc_setattr_args *sa, int rc)
185 {
186         struct ost_body *body;
187         ENTRY;
188
189         if (rc != 0)
190                 GOTO(out, rc);
191
192         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
193         if (body == NULL)
194                 GOTO(out, rc = -EPROTO);
195
196         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
197                              &body->oa);
198 out:
199         rc = sa->sa_upcall(sa->sa_cookie, rc);
200         RETURN(rc);
201 }
202
203 int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
204                       obd_enqueue_update_f upcall, void *cookie,
205                       struct ptlrpc_request_set *rqset)
206 {
207         struct ptlrpc_request   *req;
208         struct osc_setattr_args *sa;
209         int                      rc;
210
211         ENTRY;
212
213         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
214         if (req == NULL)
215                 RETURN(-ENOMEM);
216
217         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
218         if (rc) {
219                 ptlrpc_request_free(req);
220                 RETURN(rc);
221         }
222
223         osc_pack_req_body(req, oa);
224
225         ptlrpc_request_set_replen(req);
226
227         /* do mds to ost setattr asynchronously */
228         if (!rqset) {
229                 /* Do not wait for response. */
230                 ptlrpcd_add_req(req);
231         } else {
232                 req->rq_interpret_reply =
233                         (ptlrpc_interpterer_t)osc_setattr_interpret;
234
235                 CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
236                 sa = ptlrpc_req_async_args(req);
237                 sa->sa_oa = oa;
238                 sa->sa_upcall = upcall;
239                 sa->sa_cookie = cookie;
240
241                 if (rqset == PTLRPCD_SET)
242                         ptlrpcd_add_req(req);
243                 else
244                         ptlrpc_set_add_req(rqset, req);
245         }
246
247         RETURN(0);
248 }
249
250 static int osc_ladvise_interpret(const struct lu_env *env,
251                                  struct ptlrpc_request *req,
252                                  void *arg, int rc)
253 {
254         struct osc_ladvise_args *la = arg;
255         struct ost_body *body;
256         ENTRY;
257
258         if (rc != 0)
259                 GOTO(out, rc);
260
261         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
262         if (body == NULL)
263                 GOTO(out, rc = -EPROTO);
264
265         *la->la_oa = body->oa;
266 out:
267         rc = la->la_upcall(la->la_cookie, rc);
268         RETURN(rc);
269 }
270
271 /**
272  * If rqset is NULL, do not wait for response. Upcall and cookie could also
273  * be NULL in this case
274  */
275 int osc_ladvise_base(struct obd_export *exp, struct obdo *oa,
276                      struct ladvise_hdr *ladvise_hdr,
277                      obd_enqueue_update_f upcall, void *cookie,
278                      struct ptlrpc_request_set *rqset)
279 {
280         struct ptlrpc_request   *req;
281         struct ost_body         *body;
282         struct osc_ladvise_args *la;
283         int                      rc;
284         struct lu_ladvise       *req_ladvise;
285         struct lu_ladvise       *ladvise = ladvise_hdr->lah_advise;
286         int                      num_advise = ladvise_hdr->lah_count;
287         struct ladvise_hdr      *req_ladvise_hdr;
288         ENTRY;
289
290         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_LADVISE);
291         if (req == NULL)
292                 RETURN(-ENOMEM);
293
294         req_capsule_set_size(&req->rq_pill, &RMF_OST_LADVISE, RCL_CLIENT,
295                              num_advise * sizeof(*ladvise));
296         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_LADVISE);
297         if (rc != 0) {
298                 ptlrpc_request_free(req);
299                 RETURN(rc);
300         }
301         req->rq_request_portal = OST_IO_PORTAL;
302         ptlrpc_at_set_req_timeout(req);
303
304         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
305         LASSERT(body);
306         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
307                              oa);
308
309         req_ladvise_hdr = req_capsule_client_get(&req->rq_pill,
310                                                  &RMF_OST_LADVISE_HDR);
311         memcpy(req_ladvise_hdr, ladvise_hdr, sizeof(*ladvise_hdr));
312
313         req_ladvise = req_capsule_client_get(&req->rq_pill, &RMF_OST_LADVISE);
314         memcpy(req_ladvise, ladvise, sizeof(*ladvise) * num_advise);
315         ptlrpc_request_set_replen(req);
316
317         if (rqset == NULL) {
318                 /* Do not wait for response. */
319                 ptlrpcd_add_req(req);
320                 RETURN(0);
321         }
322
323         req->rq_interpret_reply = osc_ladvise_interpret;
324         CLASSERT(sizeof(*la) <= sizeof(req->rq_async_args));
325         la = ptlrpc_req_async_args(req);
326         la->la_oa = oa;
327         la->la_upcall = upcall;
328         la->la_cookie = cookie;
329
330         if (rqset == PTLRPCD_SET)
331                 ptlrpcd_add_req(req);
332         else
333                 ptlrpc_set_add_req(rqset, req);
334
335         RETURN(0);
336 }
337
338 static int osc_create(const struct lu_env *env, struct obd_export *exp,
339                       struct obdo *oa)
340 {
341         struct ptlrpc_request *req;
342         struct ost_body       *body;
343         int                    rc;
344         ENTRY;
345
346         LASSERT(oa != NULL);
347         LASSERT(oa->o_valid & OBD_MD_FLGROUP);
348         LASSERT(fid_seq_is_echo(ostid_seq(&oa->o_oi)));
349
350         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
351         if (req == NULL)
352                 GOTO(out, rc = -ENOMEM);
353
354         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
355         if (rc) {
356                 ptlrpc_request_free(req);
357                 GOTO(out, rc);
358         }
359
360         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
361         LASSERT(body);
362
363         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
364
365         ptlrpc_request_set_replen(req);
366
367         rc = ptlrpc_queue_wait(req);
368         if (rc)
369                 GOTO(out_req, rc);
370
371         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
372         if (body == NULL)
373                 GOTO(out_req, rc = -EPROTO);
374
375         CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
376         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
377
378         oa->o_blksize = cli_brw_size(exp->exp_obd);
379         oa->o_valid |= OBD_MD_FLBLKSZ;
380
381         CDEBUG(D_HA, "transno: %lld\n",
382                lustre_msg_get_transno(req->rq_repmsg));
383 out_req:
384         ptlrpc_req_finished(req);
385 out:
386         RETURN(rc);
387 }
388
389 int osc_punch_send(struct obd_export *exp, struct obdo *oa,
390                    obd_enqueue_update_f upcall, void *cookie)
391 {
392         struct ptlrpc_request *req;
393         struct osc_setattr_args *sa;
394         struct obd_import *imp = class_exp2cliimp(exp);
395         struct ost_body *body;
396         int rc;
397
398         ENTRY;
399
400         req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
401         if (req == NULL)
402                 RETURN(-ENOMEM);
403
404         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
405         if (rc < 0) {
406                 ptlrpc_request_free(req);
407                 RETURN(rc);
408         }
409
410         osc_set_io_portal(req);
411
412         ptlrpc_at_set_req_timeout(req);
413
414         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
415
416         lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
417
418         ptlrpc_request_set_replen(req);
419
420         req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
421         CLASSERT(sizeof(*sa) <= sizeof(req->rq_async_args));
422         sa = ptlrpc_req_async_args(req);
423         sa->sa_oa = oa;
424         sa->sa_upcall = upcall;
425         sa->sa_cookie = cookie;
426
427         ptlrpcd_add_req(req);
428
429         RETURN(0);
430 }
431 EXPORT_SYMBOL(osc_punch_send);
432
433 static int osc_sync_interpret(const struct lu_env *env,
434                               struct ptlrpc_request *req,
435                               void *arg, int rc)
436 {
437         struct osc_fsync_args   *fa = arg;
438         struct ost_body         *body;
439         struct cl_attr          *attr = &osc_env_info(env)->oti_attr;
440         unsigned long           valid = 0;
441         struct cl_object        *obj;
442         ENTRY;
443
444         if (rc != 0)
445                 GOTO(out, rc);
446
447         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
448         if (body == NULL) {
449                 CERROR("can't unpack ost_body\n");
450                 GOTO(out, rc = -EPROTO);
451         }
452
453         *fa->fa_oa = body->oa;
454         obj = osc2cl(fa->fa_obj);
455
456         /* Update osc object's blocks attribute */
457         cl_object_attr_lock(obj);
458         if (body->oa.o_valid & OBD_MD_FLBLOCKS) {
459                 attr->cat_blocks = body->oa.o_blocks;
460                 valid |= CAT_BLOCKS;
461         }
462
463         if (valid != 0)
464                 cl_object_attr_update(env, obj, attr, valid);
465         cl_object_attr_unlock(obj);
466
467 out:
468         rc = fa->fa_upcall(fa->fa_cookie, rc);
469         RETURN(rc);
470 }
471
472 int osc_sync_base(struct osc_object *obj, struct obdo *oa,
473                   obd_enqueue_update_f upcall, void *cookie,
474                   struct ptlrpc_request_set *rqset)
475 {
476         struct obd_export     *exp = osc_export(obj);
477         struct ptlrpc_request *req;
478         struct ost_body       *body;
479         struct osc_fsync_args *fa;
480         int                    rc;
481         ENTRY;
482
483         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
484         if (req == NULL)
485                 RETURN(-ENOMEM);
486
487         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
488         if (rc) {
489                 ptlrpc_request_free(req);
490                 RETURN(rc);
491         }
492
493         /* overload the size and blocks fields in the oa with start/end */
494         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
495         LASSERT(body);
496         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
497
498         ptlrpc_request_set_replen(req);
499         req->rq_interpret_reply = osc_sync_interpret;
500
501         CLASSERT(sizeof(*fa) <= sizeof(req->rq_async_args));
502         fa = ptlrpc_req_async_args(req);
503         fa->fa_obj = obj;
504         fa->fa_oa = oa;
505         fa->fa_upcall = upcall;
506         fa->fa_cookie = cookie;
507
508         if (rqset == PTLRPCD_SET)
509                 ptlrpcd_add_req(req);
510         else
511                 ptlrpc_set_add_req(rqset, req);
512
513         RETURN (0);
514 }
515
516 /* Find and cancel locally locks matched by @mode in the resource found by
517  * @objid. Found locks are added into @cancel list. Returns the amount of
518  * locks added to @cancels list. */
519 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
520                                    struct list_head *cancels,
521                                    enum ldlm_mode mode, __u64 lock_flags)
522 {
523         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
524         struct ldlm_res_id res_id;
525         struct ldlm_resource *res;
526         int count;
527         ENTRY;
528
529         /* Return, i.e. cancel nothing, only if ELC is supported (flag in
530          * export) but disabled through procfs (flag in NS).
531          *
532          * This distinguishes from a case when ELC is not supported originally,
533          * when we still want to cancel locks in advance and just cancel them
534          * locally, without sending any RPC. */
535         if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
536                 RETURN(0);
537
538         ostid_build_res_name(&oa->o_oi, &res_id);
539         res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
540         if (IS_ERR(res))
541                 RETURN(0);
542
543         LDLM_RESOURCE_ADDREF(res);
544         count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
545                                            lock_flags, 0, NULL);
546         LDLM_RESOURCE_DELREF(res);
547         ldlm_resource_putref(res);
548         RETURN(count);
549 }
550
551 static int osc_destroy_interpret(const struct lu_env *env,
552                                  struct ptlrpc_request *req, void *data,
553                                  int rc)
554 {
555         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
556
557         atomic_dec(&cli->cl_destroy_in_flight);
558         wake_up(&cli->cl_destroy_waitq);
559         return 0;
560 }
561
562 static int osc_can_send_destroy(struct client_obd *cli)
563 {
564         if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
565             cli->cl_max_rpcs_in_flight) {
566                 /* The destroy request can be sent */
567                 return 1;
568         }
569         if (atomic_dec_return(&cli->cl_destroy_in_flight) <
570             cli->cl_max_rpcs_in_flight) {
571                 /*
572                  * The counter has been modified between the two atomic
573                  * operations.
574                  */
575                 wake_up(&cli->cl_destroy_waitq);
576         }
577         return 0;
578 }
579
580 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
581                        struct obdo *oa)
582 {
583         struct client_obd     *cli = &exp->exp_obd->u.cli;
584         struct ptlrpc_request *req;
585         struct ost_body       *body;
586         struct list_head       cancels = LIST_HEAD_INIT(cancels);
587         int rc, count;
588         ENTRY;
589
590         if (!oa) {
591                 CDEBUG(D_INFO, "oa NULL\n");
592                 RETURN(-EINVAL);
593         }
594
595         count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
596                                         LDLM_FL_DISCARD_DATA);
597
598         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
599         if (req == NULL) {
600                 ldlm_lock_list_put(&cancels, l_bl_ast, count);
601                 RETURN(-ENOMEM);
602         }
603
604         rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
605                                0, &cancels, count);
606         if (rc) {
607                 ptlrpc_request_free(req);
608                 RETURN(rc);
609         }
610
611         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
612         ptlrpc_at_set_req_timeout(req);
613
614         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
615         LASSERT(body);
616         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
617
618         ptlrpc_request_set_replen(req);
619
620         req->rq_interpret_reply = osc_destroy_interpret;
621         if (!osc_can_send_destroy(cli)) {
622                 struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
623
624                 /*
625                  * Wait until the number of on-going destroy RPCs drops
626                  * under max_rpc_in_flight
627                  */
628                 rc = l_wait_event_exclusive(cli->cl_destroy_waitq,
629                                             osc_can_send_destroy(cli), &lwi);
630                 if (rc) {
631                         ptlrpc_req_finished(req);
632                         RETURN(rc);
633                 }
634         }
635
636         /* Do not wait for response */
637         ptlrpcd_add_req(req);
638         RETURN(0);
639 }
640
641 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
642                                 long writing_bytes)
643 {
644         u64 bits = OBD_MD_FLBLOCKS | OBD_MD_FLGRANT;
645
646         LASSERT(!(oa->o_valid & bits));
647
648         oa->o_valid |= bits;
649         spin_lock(&cli->cl_loi_list_lock);
650         if (OCD_HAS_FLAG(&cli->cl_import->imp_connect_data, GRANT_PARAM))
651                 oa->o_dirty = cli->cl_dirty_grant;
652         else
653                 oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
654         if (unlikely(cli->cl_dirty_pages - cli->cl_dirty_transit >
655                      cli->cl_dirty_max_pages)) {
656                 CERROR("dirty %lu - %lu > dirty_max %lu\n",
657                        cli->cl_dirty_pages, cli->cl_dirty_transit,
658                        cli->cl_dirty_max_pages);
659                 oa->o_undirty = 0;
660         } else if (unlikely(atomic_long_read(&obd_dirty_pages) -
661                             atomic_long_read(&obd_dirty_transit_pages) >
662                             (long)(obd_max_dirty_pages + 1))) {
663                 /* The atomic_read() allowing the atomic_inc() are
664                  * not covered by a lock thus they may safely race and trip
665                  * this CERROR() unless we add in a small fudge factor (+1). */
666                 CERROR("%s: dirty %ld - %ld > system dirty_max %ld\n",
667                        cli_name(cli), atomic_long_read(&obd_dirty_pages),
668                        atomic_long_read(&obd_dirty_transit_pages),
669                        obd_max_dirty_pages);
670                 oa->o_undirty = 0;
671         } else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
672                             0x7fffffff)) {
673                 CERROR("dirty %lu - dirty_max %lu too big???\n",
674                        cli->cl_dirty_pages, cli->cl_dirty_max_pages);
675                 oa->o_undirty = 0;
676         } else {
677                 unsigned long nrpages;
678                 unsigned long undirty;
679
680                 nrpages = cli->cl_max_pages_per_rpc;
681                 nrpages *= cli->cl_max_rpcs_in_flight + 1;
682                 nrpages = max(nrpages, cli->cl_dirty_max_pages);
683                 undirty = nrpages << PAGE_SHIFT;
684                 if (OCD_HAS_FLAG(&cli->cl_import->imp_connect_data,
685                                  GRANT_PARAM)) {
686                         int nrextents;
687
688                         /* take extent tax into account when asking for more
689                          * grant space */
690                         nrextents = (nrpages + cli->cl_max_extent_pages - 1)  /
691                                      cli->cl_max_extent_pages;
692                         undirty += nrextents * cli->cl_grant_extent_tax;
693                 }
694                 /* Do not ask for more than OBD_MAX_GRANT - a margin for server
695                  * to add extent tax, etc.
696                  */
697                 oa->o_undirty = min(undirty, OBD_MAX_GRANT -
698                                     (PTLRPC_MAX_BRW_PAGES << PAGE_SHIFT)*4UL);
699         }
700         oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
701         oa->o_dropped = cli->cl_lost_grant;
702         cli->cl_lost_grant = 0;
703         spin_unlock(&cli->cl_loi_list_lock);
704         CDEBUG(D_CACHE, "dirty: %llu undirty: %u dropped %u grant: %llu\n",
705                oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
706 }
707
708 void osc_update_next_shrink(struct client_obd *cli)
709 {
710         cli->cl_next_shrink_grant = ktime_get_seconds() +
711                                     cli->cl_grant_shrink_interval;
712
713         CDEBUG(D_CACHE, "next time %lld to shrink grant\n",
714                cli->cl_next_shrink_grant);
715 }
716
717 static void __osc_update_grant(struct client_obd *cli, u64 grant)
718 {
719         spin_lock(&cli->cl_loi_list_lock);
720         cli->cl_avail_grant += grant;
721         spin_unlock(&cli->cl_loi_list_lock);
722 }
723
724 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
725 {
726         if (body->oa.o_valid & OBD_MD_FLGRANT) {
727                 CDEBUG(D_CACHE, "got %llu extra grant\n", body->oa.o_grant);
728                 __osc_update_grant(cli, body->oa.o_grant);
729         }
730 }
731
732 /**
733  * grant thread data for shrinking space.
734  */
735 struct grant_thread_data {
736         struct list_head        gtd_clients;
737         struct mutex            gtd_mutex;
738         unsigned long           gtd_stopped:1;
739 };
740 static struct grant_thread_data client_gtd;
741
742 static int osc_shrink_grant_interpret(const struct lu_env *env,
743                                       struct ptlrpc_request *req,
744                                       void *aa, int rc)
745 {
746         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
747         struct obdo *oa = ((struct osc_grant_args *)aa)->aa_oa;
748         struct ost_body *body;
749
750         if (rc != 0) {
751                 __osc_update_grant(cli, oa->o_grant);
752                 GOTO(out, rc);
753         }
754
755         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
756         LASSERT(body);
757         osc_update_grant(cli, body);
758 out:
759         OBD_SLAB_FREE_PTR(oa, osc_obdo_kmem);
760         return rc;
761 }
762
763 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
764 {
765         spin_lock(&cli->cl_loi_list_lock);
766         oa->o_grant = cli->cl_avail_grant / 4;
767         cli->cl_avail_grant -= oa->o_grant;
768         spin_unlock(&cli->cl_loi_list_lock);
769         if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
770                 oa->o_valid |= OBD_MD_FLFLAGS;
771                 oa->o_flags = 0;
772         }
773         oa->o_flags |= OBD_FL_SHRINK_GRANT;
774         osc_update_next_shrink(cli);
775 }
776
777 /* Shrink the current grant, either from some large amount to enough for a
778  * full set of in-flight RPCs, or if we have already shrunk to that limit
779  * then to enough for a single RPC.  This avoids keeping more grant than
780  * needed, and avoids shrinking the grant piecemeal. */
781 static int osc_shrink_grant(struct client_obd *cli)
782 {
783         __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
784                              (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
785
786         spin_lock(&cli->cl_loi_list_lock);
787         if (cli->cl_avail_grant <= target_bytes)
788                 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
789         spin_unlock(&cli->cl_loi_list_lock);
790
791         return osc_shrink_grant_to_target(cli, target_bytes);
792 }
793
794 int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
795 {
796         int                     rc = 0;
797         struct ost_body        *body;
798         ENTRY;
799
800         spin_lock(&cli->cl_loi_list_lock);
801         /* Don't shrink if we are already above or below the desired limit
802          * We don't want to shrink below a single RPC, as that will negatively
803          * impact block allocation and long-term performance. */
804         if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
805                 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
806
807         if (target_bytes >= cli->cl_avail_grant) {
808                 spin_unlock(&cli->cl_loi_list_lock);
809                 RETURN(0);
810         }
811         spin_unlock(&cli->cl_loi_list_lock);
812
813         OBD_ALLOC_PTR(body);
814         if (!body)
815                 RETURN(-ENOMEM);
816
817         osc_announce_cached(cli, &body->oa, 0);
818
819         spin_lock(&cli->cl_loi_list_lock);
820         if (target_bytes >= cli->cl_avail_grant) {
821                 /* available grant has changed since target calculation */
822                 spin_unlock(&cli->cl_loi_list_lock);
823                 GOTO(out_free, rc = 0);
824         }
825         body->oa.o_grant = cli->cl_avail_grant - target_bytes;
826         cli->cl_avail_grant = target_bytes;
827         spin_unlock(&cli->cl_loi_list_lock);
828         if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
829                 body->oa.o_valid |= OBD_MD_FLFLAGS;
830                 body->oa.o_flags = 0;
831         }
832         body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
833         osc_update_next_shrink(cli);
834
835         rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
836                                 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
837                                 sizeof(*body), body, NULL);
838         if (rc != 0)
839                 __osc_update_grant(cli, body->oa.o_grant);
840 out_free:
841         OBD_FREE_PTR(body);
842         RETURN(rc);
843 }
844
845 static int osc_should_shrink_grant(struct client_obd *client)
846 {
847         time64_t next_shrink = client->cl_next_shrink_grant;
848
849         if (client->cl_import == NULL)
850                 return 0;
851
852         if ((client->cl_import->imp_connect_data.ocd_connect_flags &
853              OBD_CONNECT_GRANT_SHRINK) == 0)
854                 return 0;
855
856         if (ktime_get_seconds() >= next_shrink - 5) {
857                 /* Get the current RPC size directly, instead of going via:
858                  * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
859                  * Keep comment here so that it can be found by searching. */
860                 int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
861
862                 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
863                     client->cl_avail_grant > brw_size)
864                         return 1;
865                 else
866                         osc_update_next_shrink(client);
867         }
868         return 0;
869 }
870
871 #define GRANT_SHRINK_RPC_BATCH  100
872
873 static struct delayed_work work;
874
875 static void osc_grant_work_handler(struct work_struct *data)
876 {
877         struct client_obd *cli;
878         int rpc_sent;
879         bool init_next_shrink = true;
880         time64_t next_shrink = ktime_get_seconds() + GRANT_SHRINK_INTERVAL;
881
882         rpc_sent = 0;
883         mutex_lock(&client_gtd.gtd_mutex);
884         list_for_each_entry(cli, &client_gtd.gtd_clients,
885                             cl_grant_chain) {
886                 if (++rpc_sent < GRANT_SHRINK_RPC_BATCH &&
887                     osc_should_shrink_grant(cli))
888                         osc_shrink_grant(cli);
889
890                 if (!init_next_shrink) {
891                         if (cli->cl_next_shrink_grant < next_shrink &&
892                             cli->cl_next_shrink_grant > ktime_get_seconds())
893                                 next_shrink = cli->cl_next_shrink_grant;
894                 } else {
895                         init_next_shrink = false;
896                         next_shrink = cli->cl_next_shrink_grant;
897                 }
898         }
899         mutex_unlock(&client_gtd.gtd_mutex);
900
901         if (client_gtd.gtd_stopped == 1)
902                 return;
903
904         if (next_shrink > ktime_get_seconds())
905                 schedule_delayed_work(&work, msecs_to_jiffies(
906                                         (next_shrink - ktime_get_seconds()) *
907                                         MSEC_PER_SEC));
908         else
909                 schedule_work(&work.work);
910 }
911
912 /**
913  * Start grant thread for returing grant to server for idle clients.
914  */
915 static int osc_start_grant_work(void)
916 {
917         client_gtd.gtd_stopped = 0;
918         mutex_init(&client_gtd.gtd_mutex);
919         INIT_LIST_HEAD(&client_gtd.gtd_clients);
920
921         INIT_DELAYED_WORK(&work, osc_grant_work_handler);
922         schedule_work(&work.work);
923
924         return 0;
925 }
926
927 static void osc_stop_grant_work(void)
928 {
929         client_gtd.gtd_stopped = 1;
930         cancel_delayed_work_sync(&work);
931 }
932
933 static void osc_add_grant_list(struct client_obd *client)
934 {
935         mutex_lock(&client_gtd.gtd_mutex);
936         list_add(&client->cl_grant_chain, &client_gtd.gtd_clients);
937         mutex_unlock(&client_gtd.gtd_mutex);
938 }
939
940 static void osc_del_grant_list(struct client_obd *client)
941 {
942         if (list_empty(&client->cl_grant_chain))
943                 return;
944
945         mutex_lock(&client_gtd.gtd_mutex);
946         list_del_init(&client->cl_grant_chain);
947         mutex_unlock(&client_gtd.gtd_mutex);
948 }
949
950 void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
951 {
952         /*
953          * ocd_grant is the total grant amount we're expect to hold: if we've
954          * been evicted, it's the new avail_grant amount, cl_dirty_pages will
955          * drop to 0 as inflight RPCs fail out; otherwise, it's avail_grant +
956          * dirty.
957          *
958          * race is tolerable here: if we're evicted, but imp_state already
959          * left EVICTED state, then cl_dirty_pages must be 0 already.
960          */
961         spin_lock(&cli->cl_loi_list_lock);
962         cli->cl_avail_grant = ocd->ocd_grant;
963         if (cli->cl_import->imp_state != LUSTRE_IMP_EVICTED) {
964                 cli->cl_avail_grant -= cli->cl_reserved_grant;
965                 if (OCD_HAS_FLAG(ocd, GRANT_PARAM))
966                         cli->cl_avail_grant -= cli->cl_dirty_grant;
967                 else
968                         cli->cl_avail_grant -=
969                                         cli->cl_dirty_pages << PAGE_SHIFT;
970         }
971
972         if (OCD_HAS_FLAG(ocd, GRANT_PARAM)) {
973                 u64 size;
974                 int chunk_mask;
975
976                 /* overhead for each extent insertion */
977                 cli->cl_grant_extent_tax = ocd->ocd_grant_tax_kb << 10;
978                 /* determine the appropriate chunk size used by osc_extent. */
979                 cli->cl_chunkbits = max_t(int, PAGE_SHIFT,
980                                           ocd->ocd_grant_blkbits);
981                 /* max_pages_per_rpc must be chunk aligned */
982                 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
983                 cli->cl_max_pages_per_rpc = (cli->cl_max_pages_per_rpc +
984                                              ~chunk_mask) & chunk_mask;
985                 /* determine maximum extent size, in #pages */
986                 size = (u64)ocd->ocd_grant_max_blks << ocd->ocd_grant_blkbits;
987                 cli->cl_max_extent_pages = size >> PAGE_SHIFT;
988                 if (cli->cl_max_extent_pages == 0)
989                         cli->cl_max_extent_pages = 1;
990         } else {
991                 cli->cl_grant_extent_tax = 0;
992                 cli->cl_chunkbits = PAGE_SHIFT;
993                 cli->cl_max_extent_pages = DT_MAX_BRW_PAGES;
994         }
995         spin_unlock(&cli->cl_loi_list_lock);
996
997         CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld."
998                 "chunk bits: %d cl_max_extent_pages: %d\n",
999                 cli_name(cli),
1000                 cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
1001                 cli->cl_max_extent_pages);
1002
1003         if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
1004                 osc_add_grant_list(cli);
1005 }
1006 EXPORT_SYMBOL(osc_init_grant);
1007
1008 /* We assume that the reason this OSC got a short read is because it read
1009  * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1010  * via the LOV, and it _knows_ it's reading inside the file, it's just that
1011  * this stripe never got written at or beyond this stripe offset yet. */
1012 static void handle_short_read(int nob_read, size_t page_count,
1013                               struct brw_page **pga)
1014 {
1015         char *ptr;
1016         int i = 0;
1017
1018         /* skip bytes read OK */
1019         while (nob_read > 0) {
1020                 LASSERT (page_count > 0);
1021
1022                 if (pga[i]->count > nob_read) {
1023                         /* EOF inside this page */
1024                         ptr = kmap(pga[i]->pg) +
1025                                 (pga[i]->off & ~PAGE_MASK);
1026                         memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1027                         kunmap(pga[i]->pg);
1028                         page_count--;
1029                         i++;
1030                         break;
1031                 }
1032
1033                 nob_read -= pga[i]->count;
1034                 page_count--;
1035                 i++;
1036         }
1037
1038         /* zero remaining pages */
1039         while (page_count-- > 0) {
1040                 ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
1041                 memset(ptr, 0, pga[i]->count);
1042                 kunmap(pga[i]->pg);
1043                 i++;
1044         }
1045 }
1046
1047 static int check_write_rcs(struct ptlrpc_request *req,
1048                            int requested_nob, int niocount,
1049                            size_t page_count, struct brw_page **pga)
1050 {
1051         int     i;
1052         __u32   *remote_rcs;
1053
1054         remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1055                                                   sizeof(*remote_rcs) *
1056                                                   niocount);
1057         if (remote_rcs == NULL) {
1058                 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1059                 return(-EPROTO);
1060         }
1061
1062         /* return error if any niobuf was in error */
1063         for (i = 0; i < niocount; i++) {
1064                 if ((int)remote_rcs[i] < 0)
1065                         return(remote_rcs[i]);
1066
1067                 if (remote_rcs[i] != 0) {
1068                         CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1069                                 i, remote_rcs[i], req);
1070                         return(-EPROTO);
1071                 }
1072         }
1073         if (req->rq_bulk != NULL &&
1074             req->rq_bulk->bd_nob_transferred != requested_nob) {
1075                 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1076                        req->rq_bulk->bd_nob_transferred, requested_nob);
1077                 return(-EPROTO);
1078         }
1079
1080         return (0);
1081 }
1082
1083 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1084 {
1085         if (p1->flag != p2->flag) {
1086                 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
1087                                   OBD_BRW_SYNC       | OBD_BRW_ASYNC   |
1088                                   OBD_BRW_NOQUOTA    | OBD_BRW_SOFT_SYNC);
1089
1090                 /* warn if we try to combine flags that we don't know to be
1091                  * safe to combine */
1092                 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1093                         CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
1094                               "report this at https://jira.whamcloud.com/\n",
1095                               p1->flag, p2->flag);
1096                 }
1097                 return 0;
1098         }
1099
1100         return (p1->off + p1->count == p2->off);
1101 }
1102
1103 static int osc_checksum_bulk_t10pi(const char *obd_name, int nob,
1104                                    size_t pg_count, struct brw_page **pga,
1105                                    int opc, obd_dif_csum_fn *fn,
1106                                    int sector_size,
1107                                    u32 *check_sum)
1108 {
1109         struct ahash_request *req;
1110         /* Used Adler as the default checksum type on top of DIF tags */
1111         unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
1112         struct page *__page;
1113         unsigned char *buffer;
1114         __u16 *guard_start;
1115         unsigned int bufsize;
1116         int guard_number;
1117         int used_number = 0;
1118         int used;
1119         u32 cksum;
1120         int rc = 0;
1121         int i = 0;
1122
1123         LASSERT(pg_count > 0);
1124
1125         __page = alloc_page(GFP_KERNEL);
1126         if (__page == NULL)
1127                 return -ENOMEM;
1128
1129         req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1130         if (IS_ERR(req)) {
1131                 rc = PTR_ERR(req);
1132                 CERROR("%s: unable to initialize checksum hash %s: rc = %d\n",
1133                        obd_name, cfs_crypto_hash_name(cfs_alg), rc);
1134                 GOTO(out, rc);
1135         }
1136
1137         buffer = kmap(__page);
1138         guard_start = (__u16 *)buffer;
1139         guard_number = PAGE_SIZE / sizeof(*guard_start);
1140         while (nob > 0 && pg_count > 0) {
1141                 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1142
1143                 /* corrupt the data before we compute the checksum, to
1144                  * simulate an OST->client data error */
1145                 if (unlikely(i == 0 && opc == OST_READ &&
1146                              OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))) {
1147                         unsigned char *ptr = kmap(pga[i]->pg);
1148                         int off = pga[i]->off & ~PAGE_MASK;
1149
1150                         memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1151                         kunmap(pga[i]->pg);
1152                 }
1153
1154                 /*
1155                  * The left guard number should be able to hold checksums of a
1156                  * whole page
1157                  */
1158                 rc = obd_page_dif_generate_buffer(obd_name, pga[i]->pg,
1159                                                   pga[i]->off & ~PAGE_MASK,
1160                                                   count,
1161                                                   guard_start + used_number,
1162                                                   guard_number - used_number,
1163                                                   &used, sector_size,
1164                                                   fn);
1165                 if (rc)
1166                         break;
1167
1168                 used_number += used;
1169                 if (used_number == guard_number) {
1170                         cfs_crypto_hash_update_page(req, __page, 0,
1171                                 used_number * sizeof(*guard_start));
1172                         used_number = 0;
1173                 }
1174
1175                 nob -= pga[i]->count;
1176                 pg_count--;
1177                 i++;
1178         }
1179         kunmap(__page);
1180         if (rc)
1181                 GOTO(out, rc);
1182
1183         if (used_number != 0)
1184                 cfs_crypto_hash_update_page(req, __page, 0,
1185                         used_number * sizeof(*guard_start));
1186
1187         bufsize = sizeof(cksum);
1188         cfs_crypto_hash_final(req, (unsigned char *)&cksum, &bufsize);
1189
1190         /* For sending we only compute the wrong checksum instead
1191          * of corrupting the data so it is still correct on a redo */
1192         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1193                 cksum++;
1194
1195         *check_sum = cksum;
1196 out:
1197         __free_page(__page);
1198         return rc;
1199 }
1200
1201 static int osc_checksum_bulk(int nob, size_t pg_count,
1202                              struct brw_page **pga, int opc,
1203                              enum cksum_types cksum_type,
1204                              u32 *cksum)
1205 {
1206         int                             i = 0;
1207         struct ahash_request           *req;
1208         unsigned int                    bufsize;
1209         unsigned char                   cfs_alg = cksum_obd2cfs(cksum_type);
1210
1211         LASSERT(pg_count > 0);
1212
1213         req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1214         if (IS_ERR(req)) {
1215                 CERROR("Unable to initialize checksum hash %s\n",
1216                        cfs_crypto_hash_name(cfs_alg));
1217                 return PTR_ERR(req);
1218         }
1219
1220         while (nob > 0 && pg_count > 0) {
1221                 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1222
1223                 /* corrupt the data before we compute the checksum, to
1224                  * simulate an OST->client data error */
1225                 if (i == 0 && opc == OST_READ &&
1226                     OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
1227                         unsigned char *ptr = kmap(pga[i]->pg);
1228                         int off = pga[i]->off & ~PAGE_MASK;
1229
1230                         memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1231                         kunmap(pga[i]->pg);
1232                 }
1233                 cfs_crypto_hash_update_page(req, pga[i]->pg,
1234                                             pga[i]->off & ~PAGE_MASK,
1235                                             count);
1236                 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
1237                                (int)(pga[i]->off & ~PAGE_MASK));
1238
1239                 nob -= pga[i]->count;
1240                 pg_count--;
1241                 i++;
1242         }
1243
1244         bufsize = sizeof(*cksum);
1245         cfs_crypto_hash_final(req, (unsigned char *)cksum, &bufsize);
1246
1247         /* For sending we only compute the wrong checksum instead
1248          * of corrupting the data so it is still correct on a redo */
1249         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1250                 (*cksum)++;
1251
1252         return 0;
1253 }
1254
1255 static int osc_checksum_bulk_rw(const char *obd_name,
1256                                 enum cksum_types cksum_type,
1257                                 int nob, size_t pg_count,
1258                                 struct brw_page **pga, int opc,
1259                                 u32 *check_sum)
1260 {
1261         obd_dif_csum_fn *fn = NULL;
1262         int sector_size = 0;
1263         int rc;
1264
1265         ENTRY;
1266         obd_t10_cksum2dif(cksum_type, &fn, &sector_size);
1267
1268         if (fn)
1269                 rc = osc_checksum_bulk_t10pi(obd_name, nob, pg_count, pga,
1270                                              opc, fn, sector_size, check_sum);
1271         else
1272                 rc = osc_checksum_bulk(nob, pg_count, pga, opc, cksum_type,
1273                                        check_sum);
1274
1275         RETURN(rc);
1276 }
1277
1278 static int
1279 osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa,
1280                      u32 page_count, struct brw_page **pga,
1281                      struct ptlrpc_request **reqp, int resend)
1282 {
1283         struct ptlrpc_request   *req;
1284         struct ptlrpc_bulk_desc *desc;
1285         struct ost_body         *body;
1286         struct obd_ioobj        *ioobj;
1287         struct niobuf_remote    *niobuf;
1288         int niocount, i, requested_nob, opc, rc, short_io_size = 0;
1289         struct osc_brw_async_args *aa;
1290         struct req_capsule      *pill;
1291         struct brw_page *pg_prev;
1292         void *short_io_buf;
1293         const char *obd_name = cli->cl_import->imp_obd->obd_name;
1294
1295         ENTRY;
1296         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1297                 RETURN(-ENOMEM); /* Recoverable */
1298         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1299                 RETURN(-EINVAL); /* Fatal */
1300
1301         if ((cmd & OBD_BRW_WRITE) != 0) {
1302                 opc = OST_WRITE;
1303                 req = ptlrpc_request_alloc_pool(cli->cl_import,
1304                                                 osc_rq_pool,
1305                                                 &RQF_OST_BRW_WRITE);
1306         } else {
1307                 opc = OST_READ;
1308                 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1309         }
1310         if (req == NULL)
1311                 RETURN(-ENOMEM);
1312
1313         for (niocount = i = 1; i < page_count; i++) {
1314                 if (!can_merge_pages(pga[i - 1], pga[i]))
1315                         niocount++;
1316         }
1317
1318         pill = &req->rq_pill;
1319         req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1320                              sizeof(*ioobj));
1321         req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1322                              niocount * sizeof(*niobuf));
1323
1324         for (i = 0; i < page_count; i++)
1325                 short_io_size += pga[i]->count;
1326
1327         /* Check if read/write is small enough to be a short io. */
1328         if (short_io_size > cli->cl_max_short_io_bytes || niocount > 1 ||
1329             !imp_connect_shortio(cli->cl_import))
1330                 short_io_size = 0;
1331
1332         req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_CLIENT,
1333                              opc == OST_READ ? 0 : short_io_size);
1334         if (opc == OST_READ)
1335                 req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_SERVER,
1336                                      short_io_size);
1337
1338         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1339         if (rc) {
1340                 ptlrpc_request_free(req);
1341                 RETURN(rc);
1342         }
1343         osc_set_io_portal(req);
1344
1345         ptlrpc_at_set_req_timeout(req);
1346         /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
1347          * retry logic */
1348         req->rq_no_retry_einprogress = 1;
1349
1350         if (short_io_size != 0) {
1351                 desc = NULL;
1352                 short_io_buf = NULL;
1353                 goto no_bulk;
1354         }
1355
1356         desc = ptlrpc_prep_bulk_imp(req, page_count,
1357                 cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
1358                 (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
1359                         PTLRPC_BULK_PUT_SINK) |
1360                         PTLRPC_BULK_BUF_KIOV,
1361                 OST_BULK_PORTAL,
1362                 &ptlrpc_bulk_kiov_pin_ops);
1363
1364         if (desc == NULL)
1365                 GOTO(out, rc = -ENOMEM);
1366         /* NB request now owns desc and will free it when it gets freed */
1367 no_bulk:
1368         body = req_capsule_client_get(pill, &RMF_OST_BODY);
1369         ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1370         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1371         LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1372
1373         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1374
1375         /* For READ and WRITE, we can't fill o_uid and o_gid using from_kuid()
1376          * and from_kgid(), because they are asynchronous. Fortunately, variable
1377          * oa contains valid o_uid and o_gid in these two operations.
1378          * Besides, filling o_uid and o_gid is enough for nrs-tbf, see LU-9658.
1379          * OBD_MD_FLUID and OBD_MD_FLUID is not set in order to avoid breaking
1380          * other process logic */
1381         body->oa.o_uid = oa->o_uid;
1382         body->oa.o_gid = oa->o_gid;
1383
1384         obdo_to_ioobj(oa, ioobj);
1385         ioobj->ioo_bufcnt = niocount;
1386         /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
1387          * that might be send for this request.  The actual number is decided
1388          * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
1389          * "max - 1" for old client compatibility sending "0", and also so the
1390          * the actual maximum is a power-of-two number, not one less. LU-1431 */
1391         if (desc != NULL)
1392                 ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
1393         else /* short io */
1394                 ioobj_max_brw_set(ioobj, 0);
1395
1396         if (short_io_size != 0) {
1397                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1398                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1399                         body->oa.o_flags = 0;
1400                 }
1401                 body->oa.o_flags |= OBD_FL_SHORT_IO;
1402                 CDEBUG(D_CACHE, "Using short io for data transfer, size = %d\n",
1403                        short_io_size);
1404                 if (opc == OST_WRITE) {
1405                         short_io_buf = req_capsule_client_get(pill,
1406                                                               &RMF_SHORT_IO);
1407                         LASSERT(short_io_buf != NULL);
1408                 }
1409         }
1410
1411         LASSERT(page_count > 0);
1412         pg_prev = pga[0];
1413         for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1414                 struct brw_page *pg = pga[i];
1415                 int poff = pg->off & ~PAGE_MASK;
1416
1417                 LASSERT(pg->count > 0);
1418                 /* make sure there is no gap in the middle of page array */
1419                 LASSERTF(page_count == 1 ||
1420                          (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
1421                           ergo(i > 0 && i < page_count - 1,
1422                                poff == 0 && pg->count == PAGE_SIZE)   &&
1423                           ergo(i == page_count - 1, poff == 0)),
1424                          "i: %d/%d pg: %p off: %llu, count: %u\n",
1425                          i, page_count, pg, pg->off, pg->count);
1426                 LASSERTF(i == 0 || pg->off > pg_prev->off,
1427                          "i %d p_c %u pg %p [pri %lu ind %lu] off %llu"
1428                          " prev_pg %p [pri %lu ind %lu] off %llu\n",
1429                          i, page_count,
1430                          pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1431                          pg_prev->pg, page_private(pg_prev->pg),
1432                          pg_prev->pg->index, pg_prev->off);
1433                 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1434                         (pg->flag & OBD_BRW_SRVLOCK));
1435                 if (short_io_size != 0 && opc == OST_WRITE) {
1436                         unsigned char *ptr = ll_kmap_atomic(pg->pg, KM_USER0);
1437
1438                         LASSERT(short_io_size >= requested_nob + pg->count);
1439                         memcpy(short_io_buf + requested_nob,
1440                                ptr + poff,
1441                                pg->count);
1442                         ll_kunmap_atomic(ptr, KM_USER0);
1443                 } else if (short_io_size == 0) {
1444                         desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff,
1445                                                          pg->count);
1446                 }
1447                 requested_nob += pg->count;
1448
1449                 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1450                         niobuf--;
1451                         niobuf->rnb_len += pg->count;
1452                 } else {
1453                         niobuf->rnb_offset = pg->off;
1454                         niobuf->rnb_len    = pg->count;
1455                         niobuf->rnb_flags  = pg->flag;
1456                 }
1457                 pg_prev = pg;
1458         }
1459
1460         LASSERTF((void *)(niobuf - niocount) ==
1461                 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1462                 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1463                 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1464
1465         osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1466         if (resend) {
1467                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1468                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1469                         body->oa.o_flags = 0;
1470                 }
1471                 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1472         }
1473
1474         if (osc_should_shrink_grant(cli))
1475                 osc_shrink_grant_local(cli, &body->oa);
1476
1477         /* size[REQ_REC_OFF] still sizeof (*body) */
1478         if (opc == OST_WRITE) {
1479                 if (cli->cl_checksum &&
1480                     !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1481                         /* store cl_cksum_type in a local variable since
1482                          * it can be changed via lprocfs */
1483                         enum cksum_types cksum_type = cli->cl_cksum_type;
1484
1485                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1486                                 body->oa.o_flags = 0;
1487
1488                         body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1489                                                                 cksum_type);
1490                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1491
1492                         rc = osc_checksum_bulk_rw(obd_name, cksum_type,
1493                                                   requested_nob, page_count,
1494                                                   pga, OST_WRITE,
1495                                                   &body->oa.o_cksum);
1496                         if (rc < 0) {
1497                                 CDEBUG(D_PAGE, "failed to checksum, rc = %d\n",
1498                                        rc);
1499                                 GOTO(out, rc);
1500                         }
1501                         CDEBUG(D_PAGE, "checksum at write origin: %x\n",
1502                                body->oa.o_cksum);
1503
1504                         /* save this in 'oa', too, for later checking */
1505                         oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1506                         oa->o_flags |= obd_cksum_type_pack(obd_name,
1507                                                            cksum_type);
1508                 } else {
1509                         /* clear out the checksum flag, in case this is a
1510                          * resend but cl_checksum is no longer set. b=11238 */
1511                         oa->o_valid &= ~OBD_MD_FLCKSUM;
1512                 }
1513                 oa->o_cksum = body->oa.o_cksum;
1514                 /* 1 RC per niobuf */
1515                 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1516                                      sizeof(__u32) * niocount);
1517         } else {
1518                 if (cli->cl_checksum &&
1519                     !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1520                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1521                                 body->oa.o_flags = 0;
1522                         body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1523                                 cli->cl_cksum_type);
1524                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1525                 }
1526
1527                 /* Client cksum has been already copied to wire obdo in previous
1528                  * lustre_set_wire_obdo(), and in the case a bulk-read is being
1529                  * resent due to cksum error, this will allow Server to
1530                  * check+dump pages on its side */
1531         }
1532         ptlrpc_request_set_replen(req);
1533
1534         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1535         aa = ptlrpc_req_async_args(req);
1536         aa->aa_oa = oa;
1537         aa->aa_requested_nob = requested_nob;
1538         aa->aa_nio_count = niocount;
1539         aa->aa_page_count = page_count;
1540         aa->aa_resends = 0;
1541         aa->aa_ppga = pga;
1542         aa->aa_cli = cli;
1543         INIT_LIST_HEAD(&aa->aa_oaps);
1544
1545         *reqp = req;
1546         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1547         CDEBUG(D_RPCTRACE, "brw rpc %p - object "DOSTID" offset %lld<>%lld\n",
1548                 req, POSTID(&oa->o_oi), niobuf[0].rnb_offset,
1549                 niobuf[niocount - 1].rnb_offset + niobuf[niocount - 1].rnb_len);
1550         RETURN(0);
1551
1552  out:
1553         ptlrpc_req_finished(req);
1554         RETURN(rc);
1555 }
1556
1557 char dbgcksum_file_name[PATH_MAX];
1558
1559 static void dump_all_bulk_pages(struct obdo *oa, __u32 page_count,
1560                                 struct brw_page **pga, __u32 server_cksum,
1561                                 __u32 client_cksum)
1562 {
1563         struct file *filp;
1564         int rc, i;
1565         unsigned int len;
1566         char *buf;
1567
1568         /* will only keep dump of pages on first error for the same range in
1569          * file/fid, not during the resends/retries. */
1570         snprintf(dbgcksum_file_name, sizeof(dbgcksum_file_name),
1571                  "%s-checksum_dump-osc-"DFID":[%llu-%llu]-%x-%x",
1572                  (strncmp(libcfs_debug_file_path_arr, "NONE", 4) != 0 ?
1573                   libcfs_debug_file_path_arr :
1574                   LIBCFS_DEBUG_FILE_PATH_DEFAULT),
1575                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : 0ULL,
1576                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1577                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1578                  pga[0]->off,
1579                  pga[page_count-1]->off + pga[page_count-1]->count - 1,
1580                  client_cksum, server_cksum);
1581         filp = filp_open(dbgcksum_file_name,
1582                          O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, 0600);
1583         if (IS_ERR(filp)) {
1584                 rc = PTR_ERR(filp);
1585                 if (rc == -EEXIST)
1586                         CDEBUG(D_INFO, "%s: can't open to dump pages with "
1587                                "checksum error: rc = %d\n", dbgcksum_file_name,
1588                                rc);
1589                 else
1590                         CERROR("%s: can't open to dump pages with checksum "
1591                                "error: rc = %d\n", dbgcksum_file_name, rc);
1592                 return;
1593         }
1594
1595         for (i = 0; i < page_count; i++) {
1596                 len = pga[i]->count;
1597                 buf = kmap(pga[i]->pg);
1598                 while (len != 0) {
1599                         rc = cfs_kernel_write(filp, buf, len, &filp->f_pos);
1600                         if (rc < 0) {
1601                                 CERROR("%s: wanted to write %u but got %d "
1602                                        "error\n", dbgcksum_file_name, len, rc);
1603                                 break;
1604                         }
1605                         len -= rc;
1606                         buf += rc;
1607                         CDEBUG(D_INFO, "%s: wrote %d bytes\n",
1608                                dbgcksum_file_name, rc);
1609                 }
1610                 kunmap(pga[i]->pg);
1611         }
1612
1613         rc = ll_vfs_fsync_range(filp, 0, LLONG_MAX, 1);
1614         if (rc)
1615                 CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
1616         filp_close(filp, NULL);
1617         return;
1618 }
1619
1620 static int
1621 check_write_checksum(struct obdo *oa, const struct lnet_process_id *peer,
1622                      __u32 client_cksum, __u32 server_cksum,
1623                      struct osc_brw_async_args *aa)
1624 {
1625         const char *obd_name = aa->aa_cli->cl_import->imp_obd->obd_name;
1626         enum cksum_types cksum_type;
1627         obd_dif_csum_fn *fn = NULL;
1628         int sector_size = 0;
1629         bool t10pi = false;
1630         __u32 new_cksum;
1631         char *msg;
1632         int rc;
1633
1634         if (server_cksum == client_cksum) {
1635                 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1636                 return 0;
1637         }
1638
1639         if (aa->aa_cli->cl_checksum_dump)
1640                 dump_all_bulk_pages(oa, aa->aa_page_count, aa->aa_ppga,
1641                                     server_cksum, client_cksum);
1642
1643         cksum_type = obd_cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1644                                            oa->o_flags : 0);
1645
1646         switch (cksum_type) {
1647         case OBD_CKSUM_T10IP512:
1648                 t10pi = true;
1649                 fn = obd_dif_ip_fn;
1650                 sector_size = 512;
1651                 break;
1652         case OBD_CKSUM_T10IP4K:
1653                 t10pi = true;
1654                 fn = obd_dif_ip_fn;
1655                 sector_size = 4096;
1656                 break;
1657         case OBD_CKSUM_T10CRC512:
1658                 t10pi = true;
1659                 fn = obd_dif_crc_fn;
1660                 sector_size = 512;
1661                 break;
1662         case OBD_CKSUM_T10CRC4K:
1663                 t10pi = true;
1664                 fn = obd_dif_crc_fn;
1665                 sector_size = 4096;
1666                 break;
1667         default:
1668                 break;
1669         }
1670
1671         if (t10pi)
1672                 rc = osc_checksum_bulk_t10pi(obd_name, aa->aa_requested_nob,
1673                                              aa->aa_page_count,
1674                                              aa->aa_ppga,
1675                                              OST_WRITE,
1676                                              fn,
1677                                              sector_size,
1678                                              &new_cksum);
1679         else
1680                 rc = osc_checksum_bulk(aa->aa_requested_nob, aa->aa_page_count,
1681                                        aa->aa_ppga, OST_WRITE, cksum_type,
1682                                        &new_cksum);
1683
1684         if (rc < 0)
1685                 msg = "failed to calculate the client write checksum";
1686         else if (cksum_type != obd_cksum_type_unpack(aa->aa_oa->o_flags))
1687                 msg = "the server did not use the checksum type specified in "
1688                       "the original request - likely a protocol problem";
1689         else if (new_cksum == server_cksum)
1690                 msg = "changed on the client after we checksummed it - "
1691                       "likely false positive due to mmap IO (bug 11742)";
1692         else if (new_cksum == client_cksum)
1693                 msg = "changed in transit before arrival at OST";
1694         else
1695                 msg = "changed in transit AND doesn't match the original - "
1696                       "likely false positive due to mmap IO (bug 11742)";
1697
1698         LCONSOLE_ERROR_MSG(0x132, "%s: BAD WRITE CHECKSUM: %s: from %s inode "
1699                            DFID " object "DOSTID" extent [%llu-%llu], original "
1700                            "client csum %x (type %x), server csum %x (type %x),"
1701                            " client csum now %x\n",
1702                            obd_name, msg, libcfs_nid2str(peer->nid),
1703                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1704                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1705                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1706                            POSTID(&oa->o_oi), aa->aa_ppga[0]->off,
1707                            aa->aa_ppga[aa->aa_page_count - 1]->off +
1708                                 aa->aa_ppga[aa->aa_page_count-1]->count - 1,
1709                            client_cksum,
1710                            obd_cksum_type_unpack(aa->aa_oa->o_flags),
1711                            server_cksum, cksum_type, new_cksum);
1712         return 1;
1713 }
1714
1715 /* Note rc enters this function as number of bytes transferred */
1716 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1717 {
1718         struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1719         struct client_obd *cli = aa->aa_cli;
1720         const char *obd_name = cli->cl_import->imp_obd->obd_name;
1721         const struct lnet_process_id *peer =
1722                 &req->rq_import->imp_connection->c_peer;
1723         struct ost_body *body;
1724         u32 client_cksum = 0;
1725         ENTRY;
1726
1727         if (rc < 0 && rc != -EDQUOT) {
1728                 DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc);
1729                 RETURN(rc);
1730         }
1731
1732         LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1733         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1734         if (body == NULL) {
1735                 DEBUG_REQ(D_INFO, req, "Can't unpack body\n");
1736                 RETURN(-EPROTO);
1737         }
1738
1739         /* set/clear over quota flag for a uid/gid/projid */
1740         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1741             body->oa.o_valid & (OBD_MD_FLALLQUOTA)) {
1742                 unsigned qid[LL_MAXQUOTAS] = {
1743                                          body->oa.o_uid, body->oa.o_gid,
1744                                          body->oa.o_projid };
1745                 CDEBUG(D_QUOTA, "setdq for [%u %u %u] with valid %#llx, flags %x\n",
1746                        body->oa.o_uid, body->oa.o_gid, body->oa.o_projid,
1747                        body->oa.o_valid, body->oa.o_flags);
1748                        osc_quota_setdq(cli, qid, body->oa.o_valid,
1749                                        body->oa.o_flags);
1750         }
1751
1752         osc_update_grant(cli, body);
1753
1754         if (rc < 0)
1755                 RETURN(rc);
1756
1757         if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1758                 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1759
1760         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1761                 if (rc > 0) {
1762                         CERROR("Unexpected +ve rc %d\n", rc);
1763                         RETURN(-EPROTO);
1764                 }
1765
1766                 if (req->rq_bulk != NULL &&
1767                     sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
1768                         RETURN(-EAGAIN);
1769
1770                 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
1771                     check_write_checksum(&body->oa, peer, client_cksum,
1772                                          body->oa.o_cksum, aa))
1773                         RETURN(-EAGAIN);
1774
1775                 rc = check_write_rcs(req, aa->aa_requested_nob,aa->aa_nio_count,
1776                                      aa->aa_page_count, aa->aa_ppga);
1777                 GOTO(out, rc);
1778         }
1779
1780         /* The rest of this function executes only for OST_READs */
1781
1782         if (req->rq_bulk == NULL) {
1783                 rc = req_capsule_get_size(&req->rq_pill, &RMF_SHORT_IO,
1784                                           RCL_SERVER);
1785                 LASSERT(rc == req->rq_status);
1786         } else {
1787                 /* if unwrap_bulk failed, return -EAGAIN to retry */
1788                 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
1789         }
1790         if (rc < 0)
1791                 GOTO(out, rc = -EAGAIN);
1792
1793         if (rc > aa->aa_requested_nob) {
1794                 CERROR("Unexpected rc %d (%d requested)\n", rc,
1795                        aa->aa_requested_nob);
1796                 RETURN(-EPROTO);
1797         }
1798
1799         if (req->rq_bulk != NULL && rc != req->rq_bulk->bd_nob_transferred) {
1800                 CERROR ("Unexpected rc %d (%d transferred)\n",
1801                         rc, req->rq_bulk->bd_nob_transferred);
1802                 return (-EPROTO);
1803         }
1804
1805         if (req->rq_bulk == NULL) {
1806                 /* short io */
1807                 int nob, pg_count, i = 0;
1808                 unsigned char *buf;
1809
1810                 CDEBUG(D_CACHE, "Using short io read, size %d\n", rc);
1811                 pg_count = aa->aa_page_count;
1812                 buf = req_capsule_server_sized_get(&req->rq_pill, &RMF_SHORT_IO,
1813                                                    rc);
1814                 nob = rc;
1815                 while (nob > 0 && pg_count > 0) {
1816                         unsigned char *ptr;
1817                         int count = aa->aa_ppga[i]->count > nob ?
1818                                     nob : aa->aa_ppga[i]->count;
1819
1820                         CDEBUG(D_CACHE, "page %p count %d\n",
1821                                aa->aa_ppga[i]->pg, count);
1822                         ptr = ll_kmap_atomic(aa->aa_ppga[i]->pg, KM_USER0);
1823                         memcpy(ptr + (aa->aa_ppga[i]->off & ~PAGE_MASK), buf,
1824                                count);
1825                         ll_kunmap_atomic((void *) ptr, KM_USER0);
1826
1827                         buf += count;
1828                         nob -= count;
1829                         i++;
1830                         pg_count--;
1831                 }
1832         }
1833
1834         if (rc < aa->aa_requested_nob)
1835                 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
1836
1837         if (body->oa.o_valid & OBD_MD_FLCKSUM) {
1838                 static int cksum_counter;
1839                 u32        server_cksum = body->oa.o_cksum;
1840                 char      *via = "";
1841                 char      *router = "";
1842                 enum cksum_types cksum_type;
1843                 u32 o_flags = body->oa.o_valid & OBD_MD_FLFLAGS ?
1844                         body->oa.o_flags : 0;
1845
1846                 cksum_type = obd_cksum_type_unpack(o_flags);
1847                 rc = osc_checksum_bulk_rw(obd_name, cksum_type, rc,
1848                                           aa->aa_page_count, aa->aa_ppga,
1849                                           OST_READ, &client_cksum);
1850                 if (rc < 0)
1851                         GOTO(out, rc);
1852
1853                 if (req->rq_bulk != NULL &&
1854                     peer->nid != req->rq_bulk->bd_sender) {
1855                         via = " via ";
1856                         router = libcfs_nid2str(req->rq_bulk->bd_sender);
1857                 }
1858
1859                 if (server_cksum != client_cksum) {
1860                         struct ost_body *clbody;
1861                         u32 page_count = aa->aa_page_count;
1862
1863                         clbody = req_capsule_client_get(&req->rq_pill,
1864                                                         &RMF_OST_BODY);
1865                         if (cli->cl_checksum_dump)
1866                                 dump_all_bulk_pages(&clbody->oa, page_count,
1867                                                     aa->aa_ppga, server_cksum,
1868                                                     client_cksum);
1869
1870                         LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
1871                                            "%s%s%s inode "DFID" object "DOSTID
1872                                            " extent [%llu-%llu], client %x, "
1873                                            "server %x, cksum_type %x\n",
1874                                            obd_name,
1875                                            libcfs_nid2str(peer->nid),
1876                                            via, router,
1877                                            clbody->oa.o_valid & OBD_MD_FLFID ?
1878                                                 clbody->oa.o_parent_seq : 0ULL,
1879                                            clbody->oa.o_valid & OBD_MD_FLFID ?
1880                                                 clbody->oa.o_parent_oid : 0,
1881                                            clbody->oa.o_valid & OBD_MD_FLFID ?
1882                                                 clbody->oa.o_parent_ver : 0,
1883                                            POSTID(&body->oa.o_oi),
1884                                            aa->aa_ppga[0]->off,
1885                                            aa->aa_ppga[page_count-1]->off +
1886                                            aa->aa_ppga[page_count-1]->count - 1,
1887                                            client_cksum, server_cksum,
1888                                            cksum_type);
1889                         cksum_counter = 0;
1890                         aa->aa_oa->o_cksum = client_cksum;
1891                         rc = -EAGAIN;
1892                 } else {
1893                         cksum_counter++;
1894                         CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1895                         rc = 0;
1896                 }
1897         } else if (unlikely(client_cksum)) {
1898                 static int cksum_missed;
1899
1900                 cksum_missed++;
1901                 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
1902                         CERROR("Checksum %u requested from %s but not sent\n",
1903                                cksum_missed, libcfs_nid2str(peer->nid));
1904         } else {
1905                 rc = 0;
1906         }
1907 out:
1908         if (rc >= 0)
1909                 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
1910                                      aa->aa_oa, &body->oa);
1911
1912         RETURN(rc);
1913 }
1914
1915 static int osc_brw_redo_request(struct ptlrpc_request *request,
1916                                 struct osc_brw_async_args *aa, int rc)
1917 {
1918         struct ptlrpc_request *new_req;
1919         struct osc_brw_async_args *new_aa;
1920         struct osc_async_page *oap;
1921         ENTRY;
1922
1923         DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
1924                   "redo for recoverable error %d", rc);
1925
1926         rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
1927                                 OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
1928                                   aa->aa_cli, aa->aa_oa, aa->aa_page_count,
1929                                   aa->aa_ppga, &new_req, 1);
1930         if (rc)
1931                 RETURN(rc);
1932
1933         list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
1934                 if (oap->oap_request != NULL) {
1935                         LASSERTF(request == oap->oap_request,
1936                                  "request %p != oap_request %p\n",
1937                                  request, oap->oap_request);
1938                         if (oap->oap_interrupted) {
1939                                 ptlrpc_req_finished(new_req);
1940                                 RETURN(-EINTR);
1941                         }
1942                 }
1943         }
1944         /* New request takes over pga and oaps from old request.
1945          * Note that copying a list_head doesn't work, need to move it... */
1946         aa->aa_resends++;
1947         new_req->rq_interpret_reply = request->rq_interpret_reply;
1948         new_req->rq_async_args = request->rq_async_args;
1949         new_req->rq_commit_cb = request->rq_commit_cb;
1950         /* cap resend delay to the current request timeout, this is similar to
1951          * what ptlrpc does (see after_reply()) */
1952         if (aa->aa_resends > new_req->rq_timeout)
1953                 new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
1954         else
1955                 new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
1956         new_req->rq_generation_set = 1;
1957         new_req->rq_import_generation = request->rq_import_generation;
1958
1959         new_aa = ptlrpc_req_async_args(new_req);
1960
1961         INIT_LIST_HEAD(&new_aa->aa_oaps);
1962         list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
1963         INIT_LIST_HEAD(&new_aa->aa_exts);
1964         list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
1965         new_aa->aa_resends = aa->aa_resends;
1966
1967         list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
1968                 if (oap->oap_request) {
1969                         ptlrpc_req_finished(oap->oap_request);
1970                         oap->oap_request = ptlrpc_request_addref(new_req);
1971                 }
1972         }
1973
1974         /* XXX: This code will run into problem if we're going to support
1975          * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
1976          * and wait for all of them to be finished. We should inherit request
1977          * set from old request. */
1978         ptlrpcd_add_req(new_req);
1979
1980         DEBUG_REQ(D_INFO, new_req, "new request");
1981         RETURN(0);
1982 }
1983
1984 /*
1985  * ugh, we want disk allocation on the target to happen in offset order.  we'll
1986  * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
1987  * fine for our small page arrays and doesn't require allocation.  its an
1988  * insertion sort that swaps elements that are strides apart, shrinking the
1989  * stride down until its '1' and the array is sorted.
1990  */
1991 static void sort_brw_pages(struct brw_page **array, int num)
1992 {
1993         int stride, i, j;
1994         struct brw_page *tmp;
1995
1996         if (num == 1)
1997                 return;
1998         for (stride = 1; stride < num ; stride = (stride * 3) + 1)
1999                 ;
2000
2001         do {
2002                 stride /= 3;
2003                 for (i = stride ; i < num ; i++) {
2004                         tmp = array[i];
2005                         j = i;
2006                         while (j >= stride && array[j - stride]->off > tmp->off) {
2007                                 array[j] = array[j - stride];
2008                                 j -= stride;
2009                         }
2010                         array[j] = tmp;
2011                 }
2012         } while (stride > 1);
2013 }
2014
2015 static void osc_release_ppga(struct brw_page **ppga, size_t count)
2016 {
2017         LASSERT(ppga != NULL);
2018         OBD_FREE(ppga, sizeof(*ppga) * count);
2019 }
2020
2021 static int brw_interpret(const struct lu_env *env,
2022                          struct ptlrpc_request *req, void *data, int rc)
2023 {
2024         struct osc_brw_async_args *aa = data;
2025         struct osc_extent *ext;
2026         struct osc_extent *tmp;
2027         struct client_obd *cli = aa->aa_cli;
2028         unsigned long           transferred = 0;
2029         ENTRY;
2030
2031         rc = osc_brw_fini_request(req, rc);
2032         CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
2033         /* When server return -EINPROGRESS, client should always retry
2034          * regardless of the number of times the bulk was resent already. */
2035         if (osc_recoverable_error(rc) && !req->rq_no_delay) {
2036                 if (req->rq_import_generation !=
2037                     req->rq_import->imp_generation) {
2038                         CDEBUG(D_HA, "%s: resend cross eviction for object: "
2039                                ""DOSTID", rc = %d.\n",
2040                                req->rq_import->imp_obd->obd_name,
2041                                POSTID(&aa->aa_oa->o_oi), rc);
2042                 } else if (rc == -EINPROGRESS ||
2043                     client_should_resend(aa->aa_resends, aa->aa_cli)) {
2044                         rc = osc_brw_redo_request(req, aa, rc);
2045                 } else {
2046                         CERROR("%s: too many resent retries for object: "
2047                                "%llu:%llu, rc = %d.\n",
2048                                req->rq_import->imp_obd->obd_name,
2049                                POSTID(&aa->aa_oa->o_oi), rc);
2050                 }
2051
2052                 if (rc == 0)
2053                         RETURN(0);
2054                 else if (rc == -EAGAIN || rc == -EINPROGRESS)
2055                         rc = -EIO;
2056         }
2057
2058         if (rc == 0) {
2059                 struct obdo *oa = aa->aa_oa;
2060                 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
2061                 unsigned long valid = 0;
2062                 struct cl_object *obj;
2063                 struct osc_async_page *last;
2064
2065                 last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
2066                 obj = osc2cl(last->oap_obj);
2067
2068                 cl_object_attr_lock(obj);
2069                 if (oa->o_valid & OBD_MD_FLBLOCKS) {
2070                         attr->cat_blocks = oa->o_blocks;
2071                         valid |= CAT_BLOCKS;
2072                 }
2073                 if (oa->o_valid & OBD_MD_FLMTIME) {
2074                         attr->cat_mtime = oa->o_mtime;
2075                         valid |= CAT_MTIME;
2076                 }
2077                 if (oa->o_valid & OBD_MD_FLATIME) {
2078                         attr->cat_atime = oa->o_atime;
2079                         valid |= CAT_ATIME;
2080                 }
2081                 if (oa->o_valid & OBD_MD_FLCTIME) {
2082                         attr->cat_ctime = oa->o_ctime;
2083                         valid |= CAT_CTIME;
2084                 }
2085
2086                 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
2087                         struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
2088                         loff_t last_off = last->oap_count + last->oap_obj_off +
2089                                 last->oap_page_off;
2090
2091                         /* Change file size if this is an out of quota or
2092                          * direct IO write and it extends the file size */
2093                         if (loi->loi_lvb.lvb_size < last_off) {
2094                                 attr->cat_size = last_off;
2095                                 valid |= CAT_SIZE;
2096                         }
2097                         /* Extend KMS if it's not a lockless write */
2098                         if (loi->loi_kms < last_off &&
2099                             oap2osc_page(last)->ops_srvlock == 0) {
2100                                 attr->cat_kms = last_off;
2101                                 valid |= CAT_KMS;
2102                         }
2103                 }
2104
2105                 if (valid != 0)
2106                         cl_object_attr_update(env, obj, attr, valid);
2107                 cl_object_attr_unlock(obj);
2108         }
2109         OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
2110
2111         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0)
2112                 osc_inc_unstable_pages(req);
2113
2114         list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
2115                 list_del_init(&ext->oe_link);
2116                 osc_extent_finish(env, ext, 1,
2117                                   rc && req->rq_no_delay ? -EWOULDBLOCK : rc);
2118         }
2119         LASSERT(list_empty(&aa->aa_exts));
2120         LASSERT(list_empty(&aa->aa_oaps));
2121
2122         transferred = (req->rq_bulk == NULL ? /* short io */
2123                        aa->aa_requested_nob :
2124                        req->rq_bulk->bd_nob_transferred);
2125
2126         osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
2127         ptlrpc_lprocfs_brw(req, transferred);
2128
2129         spin_lock(&cli->cl_loi_list_lock);
2130         /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
2131          * is called so we know whether to go to sync BRWs or wait for more
2132          * RPCs to complete */
2133         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
2134                 cli->cl_w_in_flight--;
2135         else
2136                 cli->cl_r_in_flight--;
2137         osc_wake_cache_waiters(cli);
2138         spin_unlock(&cli->cl_loi_list_lock);
2139
2140         osc_io_unplug(env, cli, NULL);
2141         RETURN(rc);
2142 }
2143
2144 static void brw_commit(struct ptlrpc_request *req)
2145 {
2146         /* If osc_inc_unstable_pages (via osc_extent_finish) races with
2147          * this called via the rq_commit_cb, I need to ensure
2148          * osc_dec_unstable_pages is still called. Otherwise unstable
2149          * pages may be leaked. */
2150         spin_lock(&req->rq_lock);
2151         if (likely(req->rq_unstable)) {
2152                 req->rq_unstable = 0;
2153                 spin_unlock(&req->rq_lock);
2154
2155                 osc_dec_unstable_pages(req);
2156         } else {
2157                 req->rq_committed = 1;
2158                 spin_unlock(&req->rq_lock);
2159         }
2160 }
2161
2162 /**
2163  * Build an RPC by the list of extent @ext_list. The caller must ensure
2164  * that the total pages in this list are NOT over max pages per RPC.
2165  * Extents in the list must be in OES_RPC state.
2166  */
2167 int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
2168                   struct list_head *ext_list, int cmd)
2169 {
2170         struct ptlrpc_request           *req = NULL;
2171         struct osc_extent               *ext;
2172         struct brw_page                 **pga = NULL;
2173         struct osc_brw_async_args       *aa = NULL;
2174         struct obdo                     *oa = NULL;
2175         struct osc_async_page           *oap;
2176         struct osc_object               *obj = NULL;
2177         struct cl_req_attr              *crattr = NULL;
2178         loff_t                          starting_offset = OBD_OBJECT_EOF;
2179         loff_t                          ending_offset = 0;
2180         int                             mpflag = 0;
2181         int                             mem_tight = 0;
2182         int                             page_count = 0;
2183         bool                            soft_sync = false;
2184         bool                            interrupted = false;
2185         bool                            ndelay = false;
2186         int                             i;
2187         int                             grant = 0;
2188         int                             rc;
2189         __u32                           layout_version = 0;
2190         struct list_head                rpc_list = LIST_HEAD_INIT(rpc_list);
2191         struct ost_body                 *body;
2192         ENTRY;
2193         LASSERT(!list_empty(ext_list));
2194
2195         /* add pages into rpc_list to build BRW rpc */
2196         list_for_each_entry(ext, ext_list, oe_link) {
2197                 LASSERT(ext->oe_state == OES_RPC);
2198                 mem_tight |= ext->oe_memalloc;
2199                 grant += ext->oe_grants;
2200                 page_count += ext->oe_nr_pages;
2201                 layout_version = MAX(layout_version, ext->oe_layout_version);
2202                 if (obj == NULL)
2203                         obj = ext->oe_obj;
2204         }
2205
2206         soft_sync = osc_over_unstable_soft_limit(cli);
2207         if (mem_tight)
2208                 mpflag = cfs_memory_pressure_get_and_set();
2209
2210         OBD_ALLOC(pga, sizeof(*pga) * page_count);
2211         if (pga == NULL)
2212                 GOTO(out, rc = -ENOMEM);
2213
2214         OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
2215         if (oa == NULL)
2216                 GOTO(out, rc = -ENOMEM);
2217
2218         i = 0;
2219         list_for_each_entry(ext, ext_list, oe_link) {
2220                 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
2221                         if (mem_tight)
2222                                 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
2223                         if (soft_sync)
2224                                 oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
2225                         pga[i] = &oap->oap_brw_page;
2226                         pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
2227                         i++;
2228
2229                         list_add_tail(&oap->oap_rpc_item, &rpc_list);
2230                         if (starting_offset == OBD_OBJECT_EOF ||
2231                             starting_offset > oap->oap_obj_off)
2232                                 starting_offset = oap->oap_obj_off;
2233                         else
2234                                 LASSERT(oap->oap_page_off == 0);
2235                         if (ending_offset < oap->oap_obj_off + oap->oap_count)
2236                                 ending_offset = oap->oap_obj_off +
2237                                                 oap->oap_count;
2238                         else
2239                                 LASSERT(oap->oap_page_off + oap->oap_count ==
2240                                         PAGE_SIZE);
2241                         if (oap->oap_interrupted)
2242                                 interrupted = true;
2243                 }
2244                 if (ext->oe_ndelay)
2245                         ndelay = true;
2246         }
2247
2248         /* first page in the list */
2249         oap = list_entry(rpc_list.next, typeof(*oap), oap_rpc_item);
2250
2251         crattr = &osc_env_info(env)->oti_req_attr;
2252         memset(crattr, 0, sizeof(*crattr));
2253         crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
2254         crattr->cra_flags = ~0ULL;
2255         crattr->cra_page = oap2cl_page(oap);
2256         crattr->cra_oa = oa;
2257         cl_req_attr_set(env, osc2cl(obj), crattr);
2258
2259         if (cmd == OBD_BRW_WRITE) {
2260                 oa->o_grant_used = grant;
2261                 if (layout_version > 0) {
2262                         CDEBUG(D_LAYOUT, DFID": write with layout version %u\n",
2263                                PFID(&oa->o_oi.oi_fid), layout_version);
2264
2265                         oa->o_layout_version = layout_version;
2266                         oa->o_valid |= OBD_MD_LAYOUT_VERSION;
2267                 }
2268         }
2269
2270         sort_brw_pages(pga, page_count);
2271         rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 0);
2272         if (rc != 0) {
2273                 CERROR("prep_req failed: %d\n", rc);
2274                 GOTO(out, rc);
2275         }
2276
2277         req->rq_commit_cb = brw_commit;
2278         req->rq_interpret_reply = brw_interpret;
2279         req->rq_memalloc = mem_tight != 0;
2280         oap->oap_request = ptlrpc_request_addref(req);
2281         if (interrupted && !req->rq_intr)
2282                 ptlrpc_mark_interrupted(req);
2283         if (ndelay) {
2284                 req->rq_no_resend = req->rq_no_delay = 1;
2285                 /* probably set a shorter timeout value.
2286                  * to handle ETIMEDOUT in brw_interpret() correctly. */
2287                 /* lustre_msg_set_timeout(req, req->rq_timeout / 2); */
2288         }
2289
2290         /* Need to update the timestamps after the request is built in case
2291          * we race with setattr (locally or in queue at OST).  If OST gets
2292          * later setattr before earlier BRW (as determined by the request xid),
2293          * the OST will not use BRW timestamps.  Sadly, there is no obvious
2294          * way to do this in a single call.  bug 10150 */
2295         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
2296         crattr->cra_oa = &body->oa;
2297         crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME;
2298         cl_req_attr_set(env, osc2cl(obj), crattr);
2299         lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
2300
2301         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2302         aa = ptlrpc_req_async_args(req);
2303         INIT_LIST_HEAD(&aa->aa_oaps);
2304         list_splice_init(&rpc_list, &aa->aa_oaps);
2305         INIT_LIST_HEAD(&aa->aa_exts);
2306         list_splice_init(ext_list, &aa->aa_exts);
2307
2308         spin_lock(&cli->cl_loi_list_lock);
2309         starting_offset >>= PAGE_SHIFT;
2310         if (cmd == OBD_BRW_READ) {
2311                 cli->cl_r_in_flight++;
2312                 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2313                 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2314                 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2315                                       starting_offset + 1);
2316         } else {
2317                 cli->cl_w_in_flight++;
2318                 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2319                 lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
2320                 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2321                                       starting_offset + 1);
2322         }
2323         spin_unlock(&cli->cl_loi_list_lock);
2324
2325         DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %ur/%uw in flight",
2326                   page_count, aa, cli->cl_r_in_flight,
2327                   cli->cl_w_in_flight);
2328         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
2329
2330         ptlrpcd_add_req(req);
2331         rc = 0;
2332         EXIT;
2333
2334 out:
2335         if (mem_tight != 0)
2336                 cfs_memory_pressure_restore(mpflag);
2337
2338         if (rc != 0) {
2339                 LASSERT(req == NULL);
2340
2341                 if (oa)
2342                         OBD_SLAB_FREE_PTR(oa, osc_obdo_kmem);
2343                 if (pga)
2344                         OBD_FREE(pga, sizeof(*pga) * page_count);
2345                 /* this should happen rarely and is pretty bad, it makes the
2346                  * pending list not follow the dirty order */
2347                 while (!list_empty(ext_list)) {
2348                         ext = list_entry(ext_list->next, struct osc_extent,
2349                                          oe_link);
2350                         list_del_init(&ext->oe_link);
2351                         osc_extent_finish(env, ext, 0, rc);
2352                 }
2353         }
2354         RETURN(rc);
2355 }
2356
2357 static int osc_set_lock_data(struct ldlm_lock *lock, void *data)
2358 {
2359         int set = 0;
2360
2361         LASSERT(lock != NULL);
2362
2363         lock_res_and_lock(lock);
2364
2365         if (lock->l_ast_data == NULL)
2366                 lock->l_ast_data = data;
2367         if (lock->l_ast_data == data)
2368                 set = 1;
2369
2370         unlock_res_and_lock(lock);
2371
2372         return set;
2373 }
2374
2375 int osc_enqueue_fini(struct ptlrpc_request *req, osc_enqueue_upcall_f upcall,
2376                      void *cookie, struct lustre_handle *lockh,
2377                      enum ldlm_mode mode, __u64 *flags, bool speculative,
2378                      int errcode)
2379 {
2380         bool intent = *flags & LDLM_FL_HAS_INTENT;
2381         int rc;
2382         ENTRY;
2383
2384         /* The request was created before ldlm_cli_enqueue call. */
2385         if (intent && errcode == ELDLM_LOCK_ABORTED) {
2386                 struct ldlm_reply *rep;
2387
2388                 rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
2389                 LASSERT(rep != NULL);
2390
2391                 rep->lock_policy_res1 =
2392                         ptlrpc_status_ntoh(rep->lock_policy_res1);
2393                 if (rep->lock_policy_res1)
2394                         errcode = rep->lock_policy_res1;
2395                 if (!speculative)
2396                         *flags |= LDLM_FL_LVB_READY;
2397         } else if (errcode == ELDLM_OK) {
2398                 *flags |= LDLM_FL_LVB_READY;
2399         }
2400
2401         /* Call the update callback. */
2402         rc = (*upcall)(cookie, lockh, errcode);
2403
2404         /* release the reference taken in ldlm_cli_enqueue() */
2405         if (errcode == ELDLM_LOCK_MATCHED)
2406                 errcode = ELDLM_OK;
2407         if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
2408                 ldlm_lock_decref(lockh, mode);
2409
2410         RETURN(rc);
2411 }
2412
2413 int osc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
2414                           struct osc_enqueue_args *aa, int rc)
2415 {
2416         struct ldlm_lock *lock;
2417         struct lustre_handle *lockh = &aa->oa_lockh;
2418         enum ldlm_mode mode = aa->oa_mode;
2419         struct ost_lvb *lvb = aa->oa_lvb;
2420         __u32 lvb_len = sizeof(*lvb);
2421         __u64 flags = 0;
2422
2423         ENTRY;
2424
2425         /* ldlm_cli_enqueue is holding a reference on the lock, so it must
2426          * be valid. */
2427         lock = ldlm_handle2lock(lockh);
2428         LASSERTF(lock != NULL,
2429                  "lockh %#llx, req %p, aa %p - client evicted?\n",
2430                  lockh->cookie, req, aa);
2431
2432         /* Take an additional reference so that a blocking AST that
2433          * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
2434          * to arrive after an upcall has been executed by
2435          * osc_enqueue_fini(). */
2436         ldlm_lock_addref(lockh, mode);
2437
2438         /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
2439         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
2440
2441         /* Let CP AST to grant the lock first. */
2442         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
2443
2444         if (aa->oa_speculative) {
2445                 LASSERT(aa->oa_lvb == NULL);
2446                 LASSERT(aa->oa_flags == NULL);
2447                 aa->oa_flags = &flags;
2448         }
2449
2450         /* Complete obtaining the lock procedure. */
2451         rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_type, 1,
2452                                    aa->oa_mode, aa->oa_flags, lvb, lvb_len,
2453                                    lockh, rc);
2454         /* Complete osc stuff. */
2455         rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
2456                               aa->oa_flags, aa->oa_speculative, rc);
2457
2458         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
2459
2460         ldlm_lock_decref(lockh, mode);
2461         LDLM_LOCK_PUT(lock);
2462         RETURN(rc);
2463 }
2464
2465 struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
2466
2467 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
2468  * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
2469  * other synchronous requests, however keeping some locks and trying to obtain
2470  * others may take a considerable amount of time in a case of ost failure; and
2471  * when other sync requests do not get released lock from a client, the client
2472  * is evicted from the cluster -- such scenarious make the life difficult, so
2473  * release locks just after they are obtained. */
2474 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2475                      __u64 *flags, union ldlm_policy_data *policy,
2476                      struct ost_lvb *lvb, int kms_valid,
2477                      osc_enqueue_upcall_f upcall, void *cookie,
2478                      struct ldlm_enqueue_info *einfo,
2479                      struct ptlrpc_request_set *rqset, int async,
2480                      bool speculative)
2481 {
2482         struct obd_device *obd = exp->exp_obd;
2483         struct lustre_handle lockh = { 0 };
2484         struct ptlrpc_request *req = NULL;
2485         int intent = *flags & LDLM_FL_HAS_INTENT;
2486         __u64 match_flags = *flags;
2487         enum ldlm_mode mode;
2488         int rc;
2489         ENTRY;
2490
2491         /* Filesystem lock extents are extended to page boundaries so that
2492          * dealing with the page cache is a little smoother.  */
2493         policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2494         policy->l_extent.end |= ~PAGE_MASK;
2495
2496         /*
2497          * kms is not valid when either object is completely fresh (so that no
2498          * locks are cached), or object was evicted. In the latter case cached
2499          * lock cannot be used, because it would prime inode state with
2500          * potentially stale LVB.
2501          */
2502         if (!kms_valid)
2503                 goto no_match;
2504
2505         /* Next, search for already existing extent locks that will cover us */
2506         /* If we're trying to read, we also search for an existing PW lock.  The
2507          * VFS and page cache already protect us locally, so lots of readers/
2508          * writers can share a single PW lock.
2509          *
2510          * There are problems with conversion deadlocks, so instead of
2511          * converting a read lock to a write lock, we'll just enqueue a new
2512          * one.
2513          *
2514          * At some point we should cancel the read lock instead of making them
2515          * send us a blocking callback, but there are problems with canceling
2516          * locks out from other users right now, too. */
2517         mode = einfo->ei_mode;
2518         if (einfo->ei_mode == LCK_PR)
2519                 mode |= LCK_PW;
2520         /* Normal lock requests must wait for the LVB to be ready before
2521          * matching a lock; speculative lock requests do not need to,
2522          * because they will not actually use the lock. */
2523         if (!speculative)
2524                 match_flags |= LDLM_FL_LVB_READY;
2525         if (intent != 0)
2526                 match_flags |= LDLM_FL_BLOCK_GRANTED;
2527         mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
2528                                einfo->ei_type, policy, mode, &lockh, 0);
2529         if (mode) {
2530                 struct ldlm_lock *matched;
2531
2532                 if (*flags & LDLM_FL_TEST_LOCK)
2533                         RETURN(ELDLM_OK);
2534
2535                 matched = ldlm_handle2lock(&lockh);
2536                 if (speculative) {
2537                         /* This DLM lock request is speculative, and does not
2538                          * have an associated IO request. Therefore if there
2539                          * is already a DLM lock, it wll just inform the
2540                          * caller to cancel the request for this stripe.*/
2541                         lock_res_and_lock(matched);
2542                         if (ldlm_extent_equal(&policy->l_extent,
2543                             &matched->l_policy_data.l_extent))
2544                                 rc = -EEXIST;
2545                         else
2546                                 rc = -ECANCELED;
2547                         unlock_res_and_lock(matched);
2548
2549                         ldlm_lock_decref(&lockh, mode);
2550                         LDLM_LOCK_PUT(matched);
2551                         RETURN(rc);
2552                 } else if (osc_set_lock_data(matched, einfo->ei_cbdata)) {
2553                         *flags |= LDLM_FL_LVB_READY;
2554
2555                         /* We already have a lock, and it's referenced. */
2556                         (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
2557
2558                         ldlm_lock_decref(&lockh, mode);
2559                         LDLM_LOCK_PUT(matched);
2560                         RETURN(ELDLM_OK);
2561                 } else {
2562                         ldlm_lock_decref(&lockh, mode);
2563                         LDLM_LOCK_PUT(matched);
2564                 }
2565         }
2566
2567 no_match:
2568         if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
2569                 RETURN(-ENOLCK);
2570
2571         if (intent) {
2572                 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2573                                            &RQF_LDLM_ENQUEUE_LVB);
2574                 if (req == NULL)
2575                         RETURN(-ENOMEM);
2576
2577                 rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
2578                 if (rc) {
2579                         ptlrpc_request_free(req);
2580                         RETURN(rc);
2581                 }
2582
2583                 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
2584                                      sizeof *lvb);
2585                 ptlrpc_request_set_replen(req);
2586         }
2587
2588         /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
2589         *flags &= ~LDLM_FL_BLOCK_GRANTED;
2590
2591         rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
2592                               sizeof(*lvb), LVB_T_OST, &lockh, async);
2593         if (async) {
2594                 if (!rc) {
2595                         struct osc_enqueue_args *aa;
2596                         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2597                         aa = ptlrpc_req_async_args(req);
2598                         aa->oa_exp         = exp;
2599                         aa->oa_mode        = einfo->ei_mode;
2600                         aa->oa_type        = einfo->ei_type;
2601                         lustre_handle_copy(&aa->oa_lockh, &lockh);
2602                         aa->oa_upcall      = upcall;
2603                         aa->oa_cookie      = cookie;
2604                         aa->oa_speculative = speculative;
2605                         if (!speculative) {
2606                                 aa->oa_flags  = flags;
2607                                 aa->oa_lvb    = lvb;
2608                         } else {
2609                                 /* speculative locks are essentially to enqueue
2610                                  * a DLM lock  in advance, so we don't care
2611                                  * about the result of the enqueue. */
2612                                 aa->oa_lvb    = NULL;
2613                                 aa->oa_flags  = NULL;
2614                         }
2615
2616                         req->rq_interpret_reply =
2617                                 (ptlrpc_interpterer_t)osc_enqueue_interpret;
2618                         if (rqset == PTLRPCD_SET)
2619                                 ptlrpcd_add_req(req);
2620                         else
2621                                 ptlrpc_set_add_req(rqset, req);
2622                 } else if (intent) {
2623                         ptlrpc_req_finished(req);
2624                 }
2625                 RETURN(rc);
2626         }
2627
2628         rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
2629                               flags, speculative, rc);
2630         if (intent)
2631                 ptlrpc_req_finished(req);
2632
2633         RETURN(rc);
2634 }
2635
2636 int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2637                    enum ldlm_type type, union ldlm_policy_data *policy,
2638                    enum ldlm_mode mode, __u64 *flags, void *data,
2639                    struct lustre_handle *lockh, int unref)
2640 {
2641         struct obd_device *obd = exp->exp_obd;
2642         __u64 lflags = *flags;
2643         enum ldlm_mode rc;
2644         ENTRY;
2645
2646         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
2647                 RETURN(-EIO);
2648
2649         /* Filesystem lock extents are extended to page boundaries so that
2650          * dealing with the page cache is a little smoother */
2651         policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2652         policy->l_extent.end |= ~PAGE_MASK;
2653
2654         /* Next, search for already existing extent locks that will cover us */
2655         /* If we're trying to read, we also search for an existing PW lock.  The
2656          * VFS and page cache already protect us locally, so lots of readers/
2657          * writers can share a single PW lock. */
2658         rc = mode;
2659         if (mode == LCK_PR)
2660                 rc |= LCK_PW;
2661         rc = ldlm_lock_match(obd->obd_namespace, lflags,
2662                              res_id, type, policy, rc, lockh, unref);
2663         if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
2664                 RETURN(rc);
2665
2666         if (data != NULL) {
2667                 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2668
2669                 LASSERT(lock != NULL);
2670                 if (!osc_set_lock_data(lock, data)) {
2671                         ldlm_lock_decref(lockh, rc);
2672                         rc = 0;
2673                 }
2674                 LDLM_LOCK_PUT(lock);
2675         }
2676         RETURN(rc);
2677 }
2678
2679 static int osc_statfs_interpret(const struct lu_env *env,
2680                                 struct ptlrpc_request *req,
2681                                 struct osc_async_args *aa, int rc)
2682 {
2683         struct obd_statfs *msfs;
2684         ENTRY;
2685
2686         if (rc == -EBADR)
2687                 /* The request has in fact never been sent
2688                  * due to issues at a higher level (LOV).
2689                  * Exit immediately since the caller is
2690                  * aware of the problem and takes care
2691                  * of the clean up */
2692                  RETURN(rc);
2693
2694         if ((rc == -ENOTCONN || rc == -EAGAIN) &&
2695             (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
2696                 GOTO(out, rc = 0);
2697
2698         if (rc != 0)
2699                 GOTO(out, rc);
2700
2701         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
2702         if (msfs == NULL) {
2703                 GOTO(out, rc = -EPROTO);
2704         }
2705
2706         *aa->aa_oi->oi_osfs = *msfs;
2707 out:
2708         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
2709         RETURN(rc);
2710 }
2711
2712 static int osc_statfs_async(struct obd_export *exp,
2713                             struct obd_info *oinfo, time64_t max_age,
2714                             struct ptlrpc_request_set *rqset)
2715 {
2716         struct obd_device     *obd = class_exp2obd(exp);
2717         struct ptlrpc_request *req;
2718         struct osc_async_args *aa;
2719         int rc;
2720         ENTRY;
2721
2722         /* We could possibly pass max_age in the request (as an absolute
2723          * timestamp or a "seconds.usec ago") so the target can avoid doing
2724          * extra calls into the filesystem if that isn't necessary (e.g.
2725          * during mount that would help a bit).  Having relative timestamps
2726          * is not so great if request processing is slow, while absolute
2727          * timestamps are not ideal because they need time synchronization. */
2728         req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
2729         if (req == NULL)
2730                 RETURN(-ENOMEM);
2731
2732         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
2733         if (rc) {
2734                 ptlrpc_request_free(req);
2735                 RETURN(rc);
2736         }
2737         ptlrpc_request_set_replen(req);
2738         req->rq_request_portal = OST_CREATE_PORTAL;
2739         ptlrpc_at_set_req_timeout(req);
2740
2741         if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
2742                 /* procfs requests not want stat in wait for avoid deadlock */
2743                 req->rq_no_resend = 1;
2744                 req->rq_no_delay = 1;
2745         }
2746
2747         req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
2748         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2749         aa = ptlrpc_req_async_args(req);
2750         aa->aa_oi = oinfo;
2751
2752         ptlrpc_set_add_req(rqset, req);
2753         RETURN(0);
2754 }
2755
2756 static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
2757                       struct obd_statfs *osfs, time64_t max_age, __u32 flags)
2758 {
2759         struct obd_device     *obd = class_exp2obd(exp);
2760         struct obd_statfs     *msfs;
2761         struct ptlrpc_request *req;
2762         struct obd_import     *imp = NULL;
2763         int rc;
2764         ENTRY;
2765
2766
2767         /*Since the request might also come from lprocfs, so we need
2768          *sync this with client_disconnect_export Bug15684*/
2769         down_read(&obd->u.cli.cl_sem);
2770         if (obd->u.cli.cl_import)
2771                 imp = class_import_get(obd->u.cli.cl_import);
2772         up_read(&obd->u.cli.cl_sem);
2773         if (!imp)
2774                 RETURN(-ENODEV);
2775
2776         /* We could possibly pass max_age in the request (as an absolute
2777          * timestamp or a "seconds.usec ago") so the target can avoid doing
2778          * extra calls into the filesystem if that isn't necessary (e.g.
2779          * during mount that would help a bit).  Having relative timestamps
2780          * is not so great if request processing is slow, while absolute
2781          * timestamps are not ideal because they need time synchronization. */
2782         req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
2783
2784         class_import_put(imp);
2785
2786         if (req == NULL)
2787                 RETURN(-ENOMEM);
2788
2789         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
2790         if (rc) {
2791                 ptlrpc_request_free(req);
2792                 RETURN(rc);
2793         }
2794         ptlrpc_request_set_replen(req);
2795         req->rq_request_portal = OST_CREATE_PORTAL;
2796         ptlrpc_at_set_req_timeout(req);
2797
2798         if (flags & OBD_STATFS_NODELAY) {
2799                 /* procfs requests not want stat in wait for avoid deadlock */
2800                 req->rq_no_resend = 1;
2801                 req->rq_no_delay = 1;
2802         }
2803
2804         rc = ptlrpc_queue_wait(req);
2805         if (rc)
2806                 GOTO(out, rc);
2807
2808         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
2809         if (msfs == NULL)
2810                 GOTO(out, rc = -EPROTO);
2811
2812         *osfs = *msfs;
2813
2814         EXIT;
2815 out:
2816         ptlrpc_req_finished(req);
2817         return rc;
2818 }
2819
2820 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2821                          void *karg, void __user *uarg)
2822 {
2823         struct obd_device *obd = exp->exp_obd;
2824         struct obd_ioctl_data *data = karg;
2825         int err = 0;
2826         ENTRY;
2827
2828         if (!try_module_get(THIS_MODULE)) {
2829                 CERROR("%s: cannot get module '%s'\n", obd->obd_name,
2830                        module_name(THIS_MODULE));
2831                 return -EINVAL;
2832         }
2833         switch (cmd) {
2834         case OBD_IOC_CLIENT_RECOVER:
2835                 err = ptlrpc_recover_import(obd->u.cli.cl_import,
2836                                             data->ioc_inlbuf1, 0);
2837                 if (err > 0)
2838                         err = 0;
2839                 GOTO(out, err);
2840         case IOC_OSC_SET_ACTIVE:
2841                 err = ptlrpc_set_import_active(obd->u.cli.cl_import,
2842                                                data->ioc_offset);
2843                 GOTO(out, err);
2844         case OBD_IOC_PING_TARGET:
2845                 err = ptlrpc_obd_ping(obd);
2846                 GOTO(out, err);
2847         default:
2848                 CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
2849                        cmd, current_comm());
2850                 GOTO(out, err = -ENOTTY);
2851         }
2852 out:
2853         module_put(THIS_MODULE);
2854         return err;
2855 }
2856
2857 int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
2858                        u32 keylen, void *key, u32 vallen, void *val,
2859                        struct ptlrpc_request_set *set)
2860 {
2861         struct ptlrpc_request *req;
2862         struct obd_device     *obd = exp->exp_obd;
2863         struct obd_import     *imp = class_exp2cliimp(exp);
2864         char                  *tmp;
2865         int                    rc;
2866         ENTRY;
2867
2868         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
2869
2870         if (KEY_IS(KEY_CHECKSUM)) {
2871                 if (vallen != sizeof(int))
2872                         RETURN(-EINVAL);
2873                 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
2874                 RETURN(0);
2875         }
2876
2877         if (KEY_IS(KEY_SPTLRPC_CONF)) {
2878                 sptlrpc_conf_client_adapt(obd);
2879                 RETURN(0);
2880         }
2881
2882         if (KEY_IS(KEY_FLUSH_CTX)) {
2883                 sptlrpc_import_flush_my_ctx(imp);
2884                 RETURN(0);
2885         }
2886
2887         if (KEY_IS(KEY_CACHE_SET)) {
2888                 struct client_obd *cli = &obd->u.cli;
2889
2890                 LASSERT(cli->cl_cache == NULL); /* only once */
2891                 cli->cl_cache = (struct cl_client_cache *)val;
2892                 cl_cache_incref(cli->cl_cache);
2893                 cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
2894
2895                 /* add this osc into entity list */
2896                 LASSERT(list_empty(&cli->cl_lru_osc));
2897                 spin_lock(&cli->cl_cache->ccc_lru_lock);
2898                 list_add(&cli->cl_lru_osc, &cli->cl_cache->ccc_lru);
2899                 spin_unlock(&cli->cl_cache->ccc_lru_lock);
2900
2901                 RETURN(0);
2902         }
2903
2904         if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
2905                 struct client_obd *cli = &obd->u.cli;
2906                 long nr = atomic_long_read(&cli->cl_lru_in_list) >> 1;
2907                 long target = *(long *)val;
2908
2909                 nr = osc_lru_shrink(env, cli, min(nr, target), true);
2910                 *(long *)val -= nr;
2911                 RETURN(0);
2912         }
2913
2914         if (!set && !KEY_IS(KEY_GRANT_SHRINK))
2915                 RETURN(-EINVAL);
2916
2917         /* We pass all other commands directly to OST. Since nobody calls osc
2918            methods directly and everybody is supposed to go through LOV, we
2919            assume lov checked invalid values for us.
2920            The only recognised values so far are evict_by_nid and mds_conn.
2921            Even if something bad goes through, we'd get a -EINVAL from OST
2922            anyway. */
2923
2924         req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
2925                                                 &RQF_OST_SET_GRANT_INFO :
2926                                                 &RQF_OBD_SET_INFO);
2927         if (req == NULL)
2928                 RETURN(-ENOMEM);
2929
2930         req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
2931                              RCL_CLIENT, keylen);
2932         if (!KEY_IS(KEY_GRANT_SHRINK))
2933                 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
2934                                      RCL_CLIENT, vallen);
2935         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
2936         if (rc) {
2937                 ptlrpc_request_free(req);
2938                 RETURN(rc);
2939         }
2940
2941         tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
2942         memcpy(tmp, key, keylen);
2943         tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
2944                                                         &RMF_OST_BODY :
2945                                                         &RMF_SETINFO_VAL);
2946         memcpy(tmp, val, vallen);
2947
2948         if (KEY_IS(KEY_GRANT_SHRINK)) {
2949                 struct osc_grant_args *aa;
2950                 struct obdo *oa;
2951
2952                 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
2953                 aa = ptlrpc_req_async_args(req);
2954                 OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
2955                 if (!oa) {
2956                         ptlrpc_req_finished(req);
2957                         RETURN(-ENOMEM);
2958                 }
2959                 *oa = ((struct ost_body *)val)->oa;
2960                 aa->aa_oa = oa;
2961                 req->rq_interpret_reply = osc_shrink_grant_interpret;
2962         }
2963
2964         ptlrpc_request_set_replen(req);
2965         if (!KEY_IS(KEY_GRANT_SHRINK)) {
2966                 LASSERT(set != NULL);
2967                 ptlrpc_set_add_req(set, req);
2968                 ptlrpc_check_set(NULL, set);
2969         } else {
2970                 ptlrpcd_add_req(req);
2971         }
2972
2973         RETURN(0);
2974 }
2975 EXPORT_SYMBOL(osc_set_info_async);
2976
2977 int osc_reconnect(const struct lu_env *env, struct obd_export *exp,
2978                   struct obd_device *obd, struct obd_uuid *cluuid,
2979                   struct obd_connect_data *data, void *localdata)
2980 {
2981         struct client_obd *cli = &obd->u.cli;
2982
2983         if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
2984                 long lost_grant;
2985                 long grant;
2986
2987                 spin_lock(&cli->cl_loi_list_lock);
2988                 grant = cli->cl_avail_grant + cli->cl_reserved_grant;
2989                 if (data->ocd_connect_flags & OBD_CONNECT_GRANT_PARAM)
2990                         grant += cli->cl_dirty_grant;
2991                 else
2992                         grant += cli->cl_dirty_pages << PAGE_SHIFT;
2993                 data->ocd_grant = grant ? : 2 * cli_brw_size(obd);
2994                 lost_grant = cli->cl_lost_grant;
2995                 cli->cl_lost_grant = 0;
2996                 spin_unlock(&cli->cl_loi_list_lock);
2997
2998                 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d"
2999                        " ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags,
3000                        data->ocd_version, data->ocd_grant, lost_grant);
3001         }
3002
3003         RETURN(0);
3004 }
3005 EXPORT_SYMBOL(osc_reconnect);
3006
3007 int osc_disconnect(struct obd_export *exp)
3008 {
3009         struct obd_device *obd = class_exp2obd(exp);
3010         int rc;
3011
3012         rc = client_disconnect_export(exp);
3013         /**
3014          * Initially we put del_shrink_grant before disconnect_export, but it
3015          * causes the following problem if setup (connect) and cleanup
3016          * (disconnect) are tangled together.
3017          *      connect p1                     disconnect p2
3018          *   ptlrpc_connect_import
3019          *     ...............               class_manual_cleanup
3020          *                                     osc_disconnect
3021          *                                     del_shrink_grant
3022          *   ptlrpc_connect_interrupt
3023          *     osc_init_grant
3024          *   add this client to shrink list
3025          *                                      cleanup_osc
3026          * Bang! grant shrink thread trigger the shrink. BUG18662
3027          */
3028         osc_del_grant_list(&obd->u.cli);
3029         return rc;
3030 }
3031 EXPORT_SYMBOL(osc_disconnect);
3032
3033 int osc_ldlm_resource_invalidate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
3034                                  struct hlist_node *hnode, void *arg)
3035 {
3036         struct lu_env *env = arg;
3037         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
3038         struct ldlm_lock *lock;
3039         struct osc_object *osc = NULL;
3040         ENTRY;
3041
3042         lock_res(res);
3043         list_for_each_entry(lock, &res->lr_granted, l_res_link) {
3044                 if (lock->l_ast_data != NULL && osc == NULL) {
3045                         osc = lock->l_ast_data;
3046                         cl_object_get(osc2cl(osc));
3047                 }
3048
3049                 /* clear LDLM_FL_CLEANED flag to make sure it will be canceled
3050                  * by the 2nd round of ldlm_namespace_clean() call in
3051                  * osc_import_event(). */
3052                 ldlm_clear_cleaned(lock);
3053         }
3054         unlock_res(res);
3055
3056         if (osc != NULL) {
3057                 osc_object_invalidate(env, osc);
3058                 cl_object_put(env, osc2cl(osc));
3059         }
3060
3061         RETURN(0);
3062 }
3063 EXPORT_SYMBOL(osc_ldlm_resource_invalidate);
3064
3065 static int osc_import_event(struct obd_device *obd,
3066                             struct obd_import *imp,
3067                             enum obd_import_event event)
3068 {
3069         struct client_obd *cli;
3070         int rc = 0;
3071
3072         ENTRY;
3073         LASSERT(imp->imp_obd == obd);
3074
3075         switch (event) {
3076         case IMP_EVENT_DISCON: {
3077                 cli = &obd->u.cli;
3078                 spin_lock(&cli->cl_loi_list_lock);
3079                 cli->cl_avail_grant = 0;
3080                 cli->cl_lost_grant = 0;
3081                 spin_unlock(&cli->cl_loi_list_lock);
3082                 break;
3083         }
3084         case IMP_EVENT_INACTIVE: {
3085                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE);
3086                 break;
3087         }
3088         case IMP_EVENT_INVALIDATE: {
3089                 struct ldlm_namespace *ns = obd->obd_namespace;
3090                 struct lu_env         *env;
3091                 __u16                  refcheck;
3092
3093                 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3094
3095                 env = cl_env_get(&refcheck);
3096                 if (!IS_ERR(env)) {
3097                         osc_io_unplug(env, &obd->u.cli, NULL);
3098
3099                         cfs_hash_for_each_nolock(ns->ns_rs_hash,
3100                                                  osc_ldlm_resource_invalidate,
3101                                                  env, 0);
3102                         cl_env_put(env, &refcheck);
3103
3104                         ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3105                 } else
3106                         rc = PTR_ERR(env);
3107                 break;
3108         }
3109         case IMP_EVENT_ACTIVE: {
3110                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE);
3111                 break;
3112         }
3113         case IMP_EVENT_OCD: {
3114                 struct obd_connect_data *ocd = &imp->imp_connect_data;
3115
3116                 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
3117                         osc_init_grant(&obd->u.cli, ocd);
3118
3119                 /* See bug 7198 */
3120                 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
3121                         imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
3122
3123                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD);
3124                 break;
3125         }
3126         case IMP_EVENT_DEACTIVATE: {
3127                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE);
3128                 break;
3129         }
3130         case IMP_EVENT_ACTIVATE: {
3131                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE);
3132                 break;
3133         }
3134         default:
3135                 CERROR("Unknown import event %d\n", event);
3136                 LBUG();
3137         }
3138         RETURN(rc);
3139 }
3140
3141 /**
3142  * Determine whether the lock can be canceled before replaying the lock
3143  * during recovery, see bug16774 for detailed information.
3144  *
3145  * \retval zero the lock can't be canceled
3146  * \retval other ok to cancel
3147  */
3148 static int osc_cancel_weight(struct ldlm_lock *lock)
3149 {
3150         /*
3151          * Cancel all unused and granted extent lock.
3152          */
3153         if (lock->l_resource->lr_type == LDLM_EXTENT &&
3154             lock->l_granted_mode == lock->l_req_mode &&
3155             osc_ldlm_weigh_ast(lock) == 0)
3156                 RETURN(1);
3157
3158         RETURN(0);
3159 }
3160
3161 static int brw_queue_work(const struct lu_env *env, void *data)
3162 {
3163         struct client_obd *cli = data;
3164
3165         CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
3166
3167         osc_io_unplug(env, cli, NULL);
3168         RETURN(0);
3169 }
3170
3171 int osc_setup_common(struct obd_device *obd, struct lustre_cfg *lcfg)
3172 {
3173         struct client_obd *cli = &obd->u.cli;
3174         void *handler;
3175         int rc;
3176
3177         ENTRY;
3178
3179         rc = ptlrpcd_addref();
3180         if (rc)
3181                 RETURN(rc);
3182
3183         rc = client_obd_setup(obd, lcfg);
3184         if (rc)
3185                 GOTO(out_ptlrpcd, rc);
3186
3187
3188         handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
3189         if (IS_ERR(handler))
3190                 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3191         cli->cl_writeback_work = handler;
3192
3193         handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
3194         if (IS_ERR(handler))
3195                 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3196         cli->cl_lru_work = handler;
3197
3198         rc = osc_quota_setup(obd);
3199         if (rc)
3200                 GOTO(out_ptlrpcd_work, rc);
3201
3202         cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
3203         osc_update_next_shrink(cli);
3204
3205         RETURN(rc);
3206
3207 out_ptlrpcd_work:
3208         if (cli->cl_writeback_work != NULL) {
3209                 ptlrpcd_destroy_work(cli->cl_writeback_work);
3210                 cli->cl_writeback_work = NULL;
3211         }
3212         if (cli->cl_lru_work != NULL) {
3213                 ptlrpcd_destroy_work(cli->cl_lru_work);
3214                 cli->cl_lru_work = NULL;
3215         }
3216         client_obd_cleanup(obd);
3217 out_ptlrpcd:
3218         ptlrpcd_decref();
3219         RETURN(rc);
3220 }
3221 EXPORT_SYMBOL(osc_setup_common);
3222
3223 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3224 {
3225         struct client_obd *cli = &obd->u.cli;
3226         int                adding;
3227         int                added;
3228         int                req_count;
3229         int                rc;
3230
3231         ENTRY;
3232
3233         rc = osc_setup_common(obd, lcfg);
3234         if (rc < 0)
3235                 RETURN(rc);
3236
3237         rc = osc_tunables_init(obd);
3238         if (rc)
3239                 RETURN(rc);
3240
3241         /*
3242          * We try to control the total number of requests with a upper limit
3243          * osc_reqpool_maxreqcount. There might be some race which will cause
3244          * over-limit allocation, but it is fine.
3245          */
3246         req_count = atomic_read(&osc_pool_req_count);
3247         if (req_count < osc_reqpool_maxreqcount) {
3248                 adding = cli->cl_max_rpcs_in_flight + 2;
3249                 if (req_count + adding > osc_reqpool_maxreqcount)
3250                         adding = osc_reqpool_maxreqcount - req_count;
3251
3252                 added = ptlrpc_add_rqs_to_pool(osc_rq_pool, adding);
3253                 atomic_add(added, &osc_pool_req_count);
3254         }
3255
3256         ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
3257
3258         spin_lock(&osc_shrink_lock);
3259         list_add_tail(&cli->cl_shrink_list, &osc_shrink_list);
3260         spin_unlock(&osc_shrink_lock);
3261         cli->cl_import->imp_idle_timeout = osc_idle_timeout;
3262         cli->cl_import->imp_idle_debug = D_HA;
3263
3264         RETURN(0);
3265 }
3266
3267 int osc_precleanup_common(struct obd_device *obd)
3268 {
3269         struct client_obd *cli = &obd->u.cli;
3270         ENTRY;
3271
3272         /* LU-464
3273          * for echo client, export may be on zombie list, wait for
3274          * zombie thread to cull it, because cli.cl_import will be
3275          * cleared in client_disconnect_export():
3276          *   class_export_destroy() -> obd_cleanup() ->
3277          *   echo_device_free() -> echo_client_cleanup() ->
3278          *   obd_disconnect() -> osc_disconnect() ->
3279          *   client_disconnect_export()
3280          */
3281         obd_zombie_barrier();
3282         if (cli->cl_writeback_work) {
3283                 ptlrpcd_destroy_work(cli->cl_writeback_work);
3284                 cli->cl_writeback_work = NULL;
3285         }
3286
3287         if (cli->cl_lru_work) {
3288                 ptlrpcd_destroy_work(cli->cl_lru_work);
3289                 cli->cl_lru_work = NULL;
3290         }
3291
3292         obd_cleanup_client_import(obd);
3293         RETURN(0);
3294 }
3295 EXPORT_SYMBOL(osc_precleanup_common);
3296
3297 static int osc_precleanup(struct obd_device *obd)
3298 {
3299         ENTRY;
3300
3301         osc_precleanup_common(obd);
3302
3303         ptlrpc_lprocfs_unregister_obd(obd);
3304         RETURN(0);
3305 }
3306
3307 int osc_cleanup_common(struct obd_device *obd)
3308 {
3309         struct client_obd *cli = &obd->u.cli;
3310         int rc;
3311
3312         ENTRY;
3313
3314         spin_lock(&osc_shrink_lock);
3315         list_del(&cli->cl_shrink_list);
3316         spin_unlock(&osc_shrink_lock);
3317
3318         /* lru cleanup */
3319         if (cli->cl_cache != NULL) {
3320                 LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
3321                 spin_lock(&cli->cl_cache->ccc_lru_lock);
3322                 list_del_init(&cli->cl_lru_osc);
3323                 spin_unlock(&cli->cl_cache->ccc_lru_lock);
3324                 cli->cl_lru_left = NULL;
3325                 cl_cache_decref(cli->cl_cache);
3326                 cli->cl_cache = NULL;
3327         }
3328
3329         /* free memory of osc quota cache */
3330         osc_quota_cleanup(obd);
3331
3332         rc = client_obd_cleanup(obd);
3333
3334         ptlrpcd_decref();
3335         RETURN(rc);
3336 }
3337 EXPORT_SYMBOL(osc_cleanup_common);
3338
3339 int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg)
3340 {
3341         ssize_t count  = class_modify_config(lcfg, PARAM_OSC,
3342                                              &obd->obd_kset.kobj);
3343         return count > 0 ? 0 : count;
3344 }
3345
3346 static int osc_process_config(struct obd_device *obd, size_t len, void *buf)
3347 {
3348         return osc_process_config_base(obd, buf);
3349 }
3350
3351 static struct obd_ops osc_obd_ops = {
3352         .o_owner                = THIS_MODULE,
3353         .o_setup                = osc_setup,
3354         .o_precleanup           = osc_precleanup,
3355         .o_cleanup              = osc_cleanup_common,
3356         .o_add_conn             = client_import_add_conn,
3357         .o_del_conn             = client_import_del_conn,
3358         .o_connect              = client_connect_import,
3359         .o_reconnect            = osc_reconnect,
3360         .o_disconnect           = osc_disconnect,
3361         .o_statfs               = osc_statfs,
3362         .o_statfs_async         = osc_statfs_async,
3363         .o_create               = osc_create,
3364         .o_destroy              = osc_destroy,
3365         .o_getattr              = osc_getattr,
3366         .o_setattr              = osc_setattr,
3367         .o_iocontrol            = osc_iocontrol,
3368         .o_set_info_async       = osc_set_info_async,
3369         .o_import_event         = osc_import_event,
3370         .o_process_config       = osc_process_config,
3371         .o_quotactl             = osc_quotactl,
3372 };
3373
3374 static struct shrinker *osc_cache_shrinker;
3375 struct list_head osc_shrink_list = LIST_HEAD_INIT(osc_shrink_list);
3376 DEFINE_SPINLOCK(osc_shrink_lock);
3377
3378 #ifndef HAVE_SHRINKER_COUNT
3379 static int osc_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
3380 {
3381         struct shrink_control scv = {
3382                 .nr_to_scan = shrink_param(sc, nr_to_scan),
3383                 .gfp_mask   = shrink_param(sc, gfp_mask)
3384         };
3385 #if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL)
3386         struct shrinker *shrinker = NULL;
3387 #endif
3388
3389         (void)osc_cache_shrink_scan(shrinker, &scv);
3390
3391         return osc_cache_shrink_count(shrinker, &scv);
3392 }
3393 #endif
3394
3395 static int __init osc_init(void)
3396 {
3397         bool enable_proc = true;
3398         struct obd_type *type;
3399         unsigned int reqpool_size;
3400         unsigned int reqsize;
3401         int rc;
3402         DEF_SHRINKER_VAR(osc_shvar, osc_cache_shrink,
3403                          osc_cache_shrink_count, osc_cache_shrink_scan);
3404         ENTRY;
3405
3406         /* print an address of _any_ initialized kernel symbol from this
3407          * module, to allow debugging with gdb that doesn't support data
3408          * symbols from modules.*/
3409         CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
3410
3411         rc = lu_kmem_init(osc_caches);
3412         if (rc)
3413                 RETURN(rc);
3414
3415         type = class_search_type(LUSTRE_OSP_NAME);
3416         if (type != NULL && type->typ_procsym != NULL)
3417                 enable_proc = false;
3418
3419         rc = class_register_type(&osc_obd_ops, NULL, enable_proc, NULL,
3420                                  LUSTRE_OSC_NAME, &osc_device_type);
3421         if (rc)
3422                 GOTO(out_kmem, rc);
3423
3424         osc_cache_shrinker = set_shrinker(DEFAULT_SEEKS, &osc_shvar);
3425
3426         /* This is obviously too much memory, only prevent overflow here */
3427         if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0)
3428                 GOTO(out_type, rc = -EINVAL);
3429
3430         reqpool_size = osc_reqpool_mem_max << 20;
3431
3432         reqsize = 1;
3433         while (reqsize < OST_IO_MAXREQSIZE)
3434                 reqsize = reqsize << 1;
3435
3436         /*
3437          * We don't enlarge the request count in OSC pool according to
3438          * cl_max_rpcs_in_flight. The allocation from the pool will only be
3439          * tried after normal allocation failed. So a small OSC pool won't
3440          * cause much performance degression in most of cases.
3441          */
3442         osc_reqpool_maxreqcount = reqpool_size / reqsize;
3443
3444         atomic_set(&osc_pool_req_count, 0);
3445         osc_rq_pool = ptlrpc_init_rq_pool(0, OST_IO_MAXREQSIZE,
3446                                           ptlrpc_add_rqs_to_pool);
3447
3448         if (osc_rq_pool == NULL)
3449                 GOTO(out_type, rc = -ENOMEM);
3450
3451         rc = osc_start_grant_work();
3452         if (rc != 0)
3453                 GOTO(out_req_pool, rc);
3454
3455         RETURN(rc);
3456
3457 out_req_pool:
3458         ptlrpc_free_rq_pool(osc_rq_pool);
3459 out_type:
3460         class_unregister_type(LUSTRE_OSC_NAME);
3461 out_kmem:
3462         lu_kmem_fini(osc_caches);
3463
3464         RETURN(rc);
3465 }
3466
3467 static void __exit osc_exit(void)
3468 {
3469         osc_stop_grant_work();
3470         remove_shrinker(osc_cache_shrinker);
3471         class_unregister_type(LUSTRE_OSC_NAME);
3472         lu_kmem_fini(osc_caches);
3473         ptlrpc_free_rq_pool(osc_rq_pool);
3474 }
3475
3476 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3477 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3478 MODULE_VERSION(LUSTRE_VERSION_STRING);
3479 MODULE_LICENSE("GPL");
3480
3481 module_init(osc_init);
3482 module_exit(osc_exit);