Whamcloud - gitweb
LU-15913 mdt: disable parallel rename for striped dirs
[fs/lustre-release.git] / lustre / osc / osc_request.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  */
31
32 #define DEBUG_SUBSYSTEM S_OSC
33
34 #include <linux/workqueue.h>
35 #include <libcfs/libcfs.h>
36 #include <linux/falloc.h>
37 #include <lprocfs_status.h>
38 #include <lustre_dlm.h>
39 #include <lustre_fid.h>
40 #include <lustre_ha.h>
41 #include <uapi/linux/lustre/lustre_ioctl.h>
42 #include <lustre_net.h>
43 #include <lustre_obdo.h>
44 #include <obd.h>
45 #include <obd_cksum.h>
46 #include <obd_class.h>
47 #include <lustre_osc.h>
48 #include <linux/falloc.h>
49
50 #include "osc_internal.h"
51 #include <lnet/lnet_rdma.h>
52
53 atomic_t osc_pool_req_count;
54 unsigned int osc_reqpool_maxreqcount;
55 struct ptlrpc_request_pool *osc_rq_pool;
56
57 /* max memory used for request pool, unit is MB */
58 static unsigned int osc_reqpool_mem_max = 5;
59 module_param(osc_reqpool_mem_max, uint, 0444);
60
61 static int osc_idle_timeout = 20;
62 module_param(osc_idle_timeout, uint, 0644);
63
64 #define osc_grant_args osc_brw_async_args
65
66 struct osc_setattr_args {
67         struct obdo             *sa_oa;
68         obd_enqueue_update_f     sa_upcall;
69         void                    *sa_cookie;
70 };
71
72 struct osc_fsync_args {
73         struct osc_object       *fa_obj;
74         struct obdo             *fa_oa;
75         obd_enqueue_update_f    fa_upcall;
76         void                    *fa_cookie;
77 };
78
79 struct osc_ladvise_args {
80         struct obdo             *la_oa;
81         obd_enqueue_update_f     la_upcall;
82         void                    *la_cookie;
83 };
84
85 static void osc_release_ppga(struct brw_page **ppga, size_t count);
86 static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req,
87                          void *data, int rc);
88
89 void osc_pack_req_body(struct ptlrpc_request *req, struct obdo *oa)
90 {
91         struct ost_body *body;
92
93         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
94         LASSERT(body);
95
96         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
97 }
98
99 static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
100                        struct obdo *oa)
101 {
102         struct ptlrpc_request   *req;
103         struct ost_body         *body;
104         int                      rc;
105
106         ENTRY;
107         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
108         if (req == NULL)
109                 RETURN(-ENOMEM);
110
111         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
112         if (rc) {
113                 ptlrpc_request_free(req);
114                 RETURN(rc);
115         }
116
117         osc_pack_req_body(req, oa);
118
119         ptlrpc_request_set_replen(req);
120
121         rc = ptlrpc_queue_wait(req);
122         if (rc)
123                 GOTO(out, rc);
124
125         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
126         if (body == NULL)
127                 GOTO(out, rc = -EPROTO);
128
129         CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
130         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
131
132         oa->o_blksize = cli_brw_size(exp->exp_obd);
133         oa->o_valid |= OBD_MD_FLBLKSZ;
134
135         EXIT;
136 out:
137         ptlrpc_req_finished(req);
138
139         return rc;
140 }
141
142 static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
143                        struct obdo *oa)
144 {
145         struct ptlrpc_request   *req;
146         struct ost_body         *body;
147         int                      rc;
148
149         ENTRY;
150         LASSERT(oa->o_valid & OBD_MD_FLGROUP);
151
152         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
153         if (req == NULL)
154                 RETURN(-ENOMEM);
155
156         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
157         if (rc) {
158                 ptlrpc_request_free(req);
159                 RETURN(rc);
160         }
161
162         osc_pack_req_body(req, oa);
163
164         ptlrpc_request_set_replen(req);
165
166         rc = ptlrpc_queue_wait(req);
167         if (rc)
168                 GOTO(out, rc);
169
170         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
171         if (body == NULL)
172                 GOTO(out, rc = -EPROTO);
173
174         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
175
176         EXIT;
177 out:
178         ptlrpc_req_finished(req);
179
180         RETURN(rc);
181 }
182
183 static int osc_setattr_interpret(const struct lu_env *env,
184                                  struct ptlrpc_request *req, void *args, int rc)
185 {
186         struct osc_setattr_args *sa = args;
187         struct ost_body *body;
188
189         ENTRY;
190
191         if (rc != 0)
192                 GOTO(out, rc);
193
194         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
195         if (body == NULL)
196                 GOTO(out, rc = -EPROTO);
197
198         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
199                              &body->oa);
200 out:
201         rc = sa->sa_upcall(sa->sa_cookie, rc);
202         RETURN(rc);
203 }
204
205 int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
206                       obd_enqueue_update_f upcall, void *cookie,
207                       struct ptlrpc_request_set *rqset)
208 {
209         struct ptlrpc_request   *req;
210         struct osc_setattr_args *sa;
211         int                      rc;
212
213         ENTRY;
214
215         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
216         if (req == NULL)
217                 RETURN(-ENOMEM);
218
219         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
220         if (rc) {
221                 ptlrpc_request_free(req);
222                 RETURN(rc);
223         }
224
225         osc_pack_req_body(req, oa);
226
227         ptlrpc_request_set_replen(req);
228
229         /* do mds to ost setattr asynchronously */
230         if (!rqset) {
231                 /* Do not wait for response. */
232                 ptlrpcd_add_req(req);
233         } else {
234                 req->rq_interpret_reply = osc_setattr_interpret;
235
236                 sa = ptlrpc_req_async_args(sa, req);
237                 sa->sa_oa = oa;
238                 sa->sa_upcall = upcall;
239                 sa->sa_cookie = cookie;
240
241                 ptlrpc_set_add_req(rqset, req);
242         }
243
244         RETURN(0);
245 }
246
247 static int osc_ladvise_interpret(const struct lu_env *env,
248                                  struct ptlrpc_request *req,
249                                  void *arg, int rc)
250 {
251         struct osc_ladvise_args *la = arg;
252         struct ost_body *body;
253         ENTRY;
254
255         if (rc != 0)
256                 GOTO(out, rc);
257
258         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
259         if (body == NULL)
260                 GOTO(out, rc = -EPROTO);
261
262         *la->la_oa = body->oa;
263 out:
264         rc = la->la_upcall(la->la_cookie, rc);
265         RETURN(rc);
266 }
267
268 /**
269  * If rqset is NULL, do not wait for response. Upcall and cookie could also
270  * be NULL in this case
271  */
272 int osc_ladvise_base(struct obd_export *exp, struct obdo *oa,
273                      struct ladvise_hdr *ladvise_hdr,
274                      obd_enqueue_update_f upcall, void *cookie,
275                      struct ptlrpc_request_set *rqset)
276 {
277         struct ptlrpc_request   *req;
278         struct ost_body         *body;
279         struct osc_ladvise_args *la;
280         int                      rc;
281         struct lu_ladvise       *req_ladvise;
282         struct lu_ladvise       *ladvise = ladvise_hdr->lah_advise;
283         int                      num_advise = ladvise_hdr->lah_count;
284         struct ladvise_hdr      *req_ladvise_hdr;
285         ENTRY;
286
287         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_LADVISE);
288         if (req == NULL)
289                 RETURN(-ENOMEM);
290
291         req_capsule_set_size(&req->rq_pill, &RMF_OST_LADVISE, RCL_CLIENT,
292                              num_advise * sizeof(*ladvise));
293         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_LADVISE);
294         if (rc != 0) {
295                 ptlrpc_request_free(req);
296                 RETURN(rc);
297         }
298         req->rq_request_portal = OST_IO_PORTAL;
299         ptlrpc_at_set_req_timeout(req);
300
301         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
302         LASSERT(body);
303         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
304                              oa);
305
306         req_ladvise_hdr = req_capsule_client_get(&req->rq_pill,
307                                                  &RMF_OST_LADVISE_HDR);
308         memcpy(req_ladvise_hdr, ladvise_hdr, sizeof(*ladvise_hdr));
309
310         req_ladvise = req_capsule_client_get(&req->rq_pill, &RMF_OST_LADVISE);
311         memcpy(req_ladvise, ladvise, sizeof(*ladvise) * num_advise);
312         ptlrpc_request_set_replen(req);
313
314         if (rqset == NULL) {
315                 /* Do not wait for response. */
316                 ptlrpcd_add_req(req);
317                 RETURN(0);
318         }
319
320         req->rq_interpret_reply = osc_ladvise_interpret;
321         la = ptlrpc_req_async_args(la, req);
322         la->la_oa = oa;
323         la->la_upcall = upcall;
324         la->la_cookie = cookie;
325
326         ptlrpc_set_add_req(rqset, req);
327
328         RETURN(0);
329 }
330
331 static int osc_create(const struct lu_env *env, struct obd_export *exp,
332                       struct obdo *oa)
333 {
334         struct ptlrpc_request *req;
335         struct ost_body       *body;
336         int                    rc;
337         ENTRY;
338
339         LASSERT(oa != NULL);
340         LASSERT(oa->o_valid & OBD_MD_FLGROUP);
341         LASSERT(fid_seq_is_echo(ostid_seq(&oa->o_oi)));
342
343         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
344         if (req == NULL)
345                 GOTO(out, rc = -ENOMEM);
346
347         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
348         if (rc) {
349                 ptlrpc_request_free(req);
350                 GOTO(out, rc);
351         }
352
353         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
354         LASSERT(body);
355
356         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
357
358         ptlrpc_request_set_replen(req);
359
360         rc = ptlrpc_queue_wait(req);
361         if (rc)
362                 GOTO(out_req, rc);
363
364         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
365         if (body == NULL)
366                 GOTO(out_req, rc = -EPROTO);
367
368         CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
369         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
370
371         oa->o_blksize = cli_brw_size(exp->exp_obd);
372         oa->o_valid |= OBD_MD_FLBLKSZ;
373
374         CDEBUG(D_HA, "transno: %lld\n",
375                lustre_msg_get_transno(req->rq_repmsg));
376 out_req:
377         ptlrpc_req_finished(req);
378 out:
379         RETURN(rc);
380 }
381
382 int osc_punch_send(struct obd_export *exp, struct obdo *oa,
383                    obd_enqueue_update_f upcall, void *cookie)
384 {
385         struct ptlrpc_request *req;
386         struct osc_setattr_args *sa;
387         struct obd_import *imp = class_exp2cliimp(exp);
388         struct ost_body *body;
389         int rc;
390
391         ENTRY;
392
393         req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
394         if (req == NULL)
395                 RETURN(-ENOMEM);
396
397         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
398         if (rc < 0) {
399                 ptlrpc_request_free(req);
400                 RETURN(rc);
401         }
402
403         osc_set_io_portal(req);
404
405         ptlrpc_at_set_req_timeout(req);
406
407         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
408
409         lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
410
411         ptlrpc_request_set_replen(req);
412
413         req->rq_interpret_reply = osc_setattr_interpret;
414         sa = ptlrpc_req_async_args(sa, req);
415         sa->sa_oa = oa;
416         sa->sa_upcall = upcall;
417         sa->sa_cookie = cookie;
418
419         ptlrpcd_add_req(req);
420
421         RETURN(0);
422 }
423 EXPORT_SYMBOL(osc_punch_send);
424
425 /**
426  * osc_fallocate_base() - Handles fallocate request.
427  *
428  * @exp:        Export structure
429  * @oa:         Attributes passed to OSS from client (obdo structure)
430  * @upcall:     Primary & supplementary group information
431  * @cookie:     Exclusive identifier
432  * @rqset:      Request list.
433  * @mode:       Operation done on given range.
434  *
435  * osc_fallocate_base() - Handles fallocate requests only. Only block
436  * allocation or standard preallocate operation is supported currently.
437  * Other mode flags is not supported yet. ftruncate(2) or truncate(2)
438  * is supported via SETATTR request.
439  *
440  * Return: Non-zero on failure and O on success.
441  */
442 int osc_fallocate_base(struct obd_export *exp, struct obdo *oa,
443                        obd_enqueue_update_f upcall, void *cookie, int mode)
444 {
445         struct ptlrpc_request *req;
446         struct osc_setattr_args *sa;
447         struct ost_body *body;
448         struct obd_import *imp = class_exp2cliimp(exp);
449         int rc;
450         ENTRY;
451
452         oa->o_falloc_mode = mode;
453         req = ptlrpc_request_alloc(class_exp2cliimp(exp),
454                                    &RQF_OST_FALLOCATE);
455         if (req == NULL)
456                 RETURN(-ENOMEM);
457
458         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_FALLOCATE);
459         if (rc != 0) {
460                 ptlrpc_request_free(req);
461                 RETURN(rc);
462         }
463
464         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
465         LASSERT(body);
466
467         lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
468
469         ptlrpc_request_set_replen(req);
470
471         req->rq_interpret_reply = osc_setattr_interpret;
472         BUILD_BUG_ON(sizeof(*sa) > sizeof(req->rq_async_args));
473         sa = ptlrpc_req_async_args(sa, req);
474         sa->sa_oa = oa;
475         sa->sa_upcall = upcall;
476         sa->sa_cookie = cookie;
477
478         ptlrpcd_add_req(req);
479
480         RETURN(0);
481 }
482 EXPORT_SYMBOL(osc_fallocate_base);
483
484 static int osc_sync_interpret(const struct lu_env *env,
485                               struct ptlrpc_request *req, void *args, int rc)
486 {
487         struct osc_fsync_args *fa = args;
488         struct ost_body *body;
489         struct cl_attr *attr = &osc_env_info(env)->oti_attr;
490         unsigned long valid = 0;
491         struct cl_object *obj;
492         ENTRY;
493
494         if (rc != 0)
495                 GOTO(out, rc);
496
497         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
498         if (body == NULL) {
499                 CERROR("can't unpack ost_body\n");
500                 GOTO(out, rc = -EPROTO);
501         }
502
503         *fa->fa_oa = body->oa;
504         obj = osc2cl(fa->fa_obj);
505
506         /* Update osc object's blocks attribute */
507         cl_object_attr_lock(obj);
508         if (body->oa.o_valid & OBD_MD_FLBLOCKS) {
509                 attr->cat_blocks = body->oa.o_blocks;
510                 valid |= CAT_BLOCKS;
511         }
512
513         if (valid != 0)
514                 cl_object_attr_update(env, obj, attr, valid);
515         cl_object_attr_unlock(obj);
516
517 out:
518         rc = fa->fa_upcall(fa->fa_cookie, rc);
519         RETURN(rc);
520 }
521
522 int osc_sync_base(struct osc_object *obj, struct obdo *oa,
523                   obd_enqueue_update_f upcall, void *cookie,
524                   struct ptlrpc_request_set *rqset)
525 {
526         struct obd_export     *exp = osc_export(obj);
527         struct ptlrpc_request *req;
528         struct ost_body       *body;
529         struct osc_fsync_args *fa;
530         int                    rc;
531         ENTRY;
532
533         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
534         if (req == NULL)
535                 RETURN(-ENOMEM);
536
537         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
538         if (rc) {
539                 ptlrpc_request_free(req);
540                 RETURN(rc);
541         }
542
543         /* overload the size and blocks fields in the oa with start/end */
544         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
545         LASSERT(body);
546         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
547
548         ptlrpc_request_set_replen(req);
549         req->rq_interpret_reply = osc_sync_interpret;
550
551         fa = ptlrpc_req_async_args(fa, req);
552         fa->fa_obj = obj;
553         fa->fa_oa = oa;
554         fa->fa_upcall = upcall;
555         fa->fa_cookie = cookie;
556
557         ptlrpc_set_add_req(rqset, req);
558
559         RETURN (0);
560 }
561
562 /* Find and cancel locally locks matched by @mode in the resource found by
563  * @objid. Found locks are added into @cancel list. Returns the amount of
564  * locks added to @cancels list. */
565 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
566                                    struct list_head *cancels,
567                                    enum ldlm_mode mode, __u64 lock_flags)
568 {
569         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
570         struct ldlm_res_id res_id;
571         struct ldlm_resource *res;
572         int count;
573         ENTRY;
574
575         /* Return, i.e. cancel nothing, only if ELC is supported (flag in
576          * export) but disabled through procfs (flag in NS).
577          *
578          * This distinguishes from a case when ELC is not supported originally,
579          * when we still want to cancel locks in advance and just cancel them
580          * locally, without sending any RPC. */
581         if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
582                 RETURN(0);
583
584         ostid_build_res_name(&oa->o_oi, &res_id);
585         res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
586         if (IS_ERR(res))
587                 RETURN(0);
588
589         LDLM_RESOURCE_ADDREF(res);
590         count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
591                                            lock_flags, 0, NULL);
592         LDLM_RESOURCE_DELREF(res);
593         ldlm_resource_putref(res);
594         RETURN(count);
595 }
596
597 static int osc_destroy_interpret(const struct lu_env *env,
598                                  struct ptlrpc_request *req, void *args, int rc)
599 {
600         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
601
602         atomic_dec(&cli->cl_destroy_in_flight);
603         wake_up(&cli->cl_destroy_waitq);
604
605         return 0;
606 }
607
608 static int osc_can_send_destroy(struct client_obd *cli)
609 {
610         if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
611             cli->cl_max_rpcs_in_flight) {
612                 /* The destroy request can be sent */
613                 return 1;
614         }
615         if (atomic_dec_return(&cli->cl_destroy_in_flight) <
616             cli->cl_max_rpcs_in_flight) {
617                 /*
618                  * The counter has been modified between the two atomic
619                  * operations.
620                  */
621                 wake_up(&cli->cl_destroy_waitq);
622         }
623         return 0;
624 }
625
626 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
627                        struct obdo *oa)
628 {
629         struct client_obd     *cli = &exp->exp_obd->u.cli;
630         struct ptlrpc_request *req;
631         struct ost_body       *body;
632         LIST_HEAD(cancels);
633         int rc, count;
634         ENTRY;
635
636         if (!oa) {
637                 CDEBUG(D_INFO, "oa NULL\n");
638                 RETURN(-EINVAL);
639         }
640
641         count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
642                                         LDLM_FL_DISCARD_DATA);
643
644         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
645         if (req == NULL) {
646                 ldlm_lock_list_put(&cancels, l_bl_ast, count);
647                 RETURN(-ENOMEM);
648         }
649
650         rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
651                                0, &cancels, count);
652         if (rc) {
653                 ptlrpc_request_free(req);
654                 RETURN(rc);
655         }
656
657         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
658         ptlrpc_at_set_req_timeout(req);
659
660         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
661         LASSERT(body);
662         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
663
664         ptlrpc_request_set_replen(req);
665
666         req->rq_interpret_reply = osc_destroy_interpret;
667         if (!osc_can_send_destroy(cli)) {
668                 /*
669                  * Wait until the number of on-going destroy RPCs drops
670                  * under max_rpc_in_flight
671                  */
672                 rc = l_wait_event_abortable_exclusive(
673                         cli->cl_destroy_waitq,
674                         osc_can_send_destroy(cli));
675                 if (rc) {
676                         ptlrpc_req_finished(req);
677                         RETURN(-EINTR);
678                 }
679         }
680
681         /* Do not wait for response */
682         ptlrpcd_add_req(req);
683         RETURN(0);
684 }
685
686 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
687                                 long writing_bytes)
688 {
689         u64 bits = OBD_MD_FLBLOCKS | OBD_MD_FLGRANT;
690
691         LASSERT(!(oa->o_valid & bits));
692
693         oa->o_valid |= bits;
694         spin_lock(&cli->cl_loi_list_lock);
695         if (cli->cl_ocd_grant_param)
696                 oa->o_dirty = cli->cl_dirty_grant;
697         else
698                 oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
699         if (unlikely(cli->cl_dirty_pages > cli->cl_dirty_max_pages)) {
700                 CERROR("dirty %lu > dirty_max %lu\n",
701                        cli->cl_dirty_pages,
702                        cli->cl_dirty_max_pages);
703                 oa->o_undirty = 0;
704         } else if (unlikely(atomic_long_read(&obd_dirty_pages) >
705                             (long)(obd_max_dirty_pages + 1))) {
706                 /* The atomic_read() allowing the atomic_inc() are
707                  * not covered by a lock thus they may safely race and trip
708                  * this CERROR() unless we add in a small fudge factor (+1). */
709                 CERROR("%s: dirty %ld > system dirty_max %ld\n",
710                        cli_name(cli), atomic_long_read(&obd_dirty_pages),
711                        obd_max_dirty_pages);
712                 oa->o_undirty = 0;
713         } else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
714                             0x7fffffff)) {
715                 CERROR("dirty %lu - dirty_max %lu too big???\n",
716                        cli->cl_dirty_pages, cli->cl_dirty_max_pages);
717                 oa->o_undirty = 0;
718         } else {
719                 unsigned long nrpages;
720                 unsigned long undirty;
721
722                 nrpages = cli->cl_max_pages_per_rpc;
723                 nrpages *= cli->cl_max_rpcs_in_flight + 1;
724                 nrpages = max(nrpages, cli->cl_dirty_max_pages);
725                 undirty = nrpages << PAGE_SHIFT;
726                 if (cli->cl_ocd_grant_param) {
727                         int nrextents;
728
729                         /* take extent tax into account when asking for more
730                          * grant space */
731                         nrextents = (nrpages + cli->cl_max_extent_pages - 1) /
732                                      cli->cl_max_extent_pages;
733                         undirty += nrextents * cli->cl_grant_extent_tax;
734                 }
735                 /* Do not ask for more than OBD_MAX_GRANT - a margin for server
736                  * to add extent tax, etc.
737                  */
738                 oa->o_undirty = min(undirty, OBD_MAX_GRANT &
739                                     ~(PTLRPC_MAX_BRW_SIZE * 4UL));
740         }
741         oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
742         /* o_dropped AKA o_misc is 32 bits, but cl_lost_grant is 64 bits */
743         if (cli->cl_lost_grant > INT_MAX) {
744                 CDEBUG(D_CACHE,
745                       "%s: avoided o_dropped overflow: cl_lost_grant %lu\n",
746                       cli_name(cli), cli->cl_lost_grant);
747                 oa->o_dropped = INT_MAX;
748         } else {
749                 oa->o_dropped = cli->cl_lost_grant;
750         }
751         cli->cl_lost_grant -= oa->o_dropped;
752         spin_unlock(&cli->cl_loi_list_lock);
753         CDEBUG(D_CACHE, "%s: dirty: %llu undirty: %u dropped %u grant: %llu"
754                " cl_lost_grant %lu\n", cli_name(cli), oa->o_dirty,
755                oa->o_undirty, oa->o_dropped, oa->o_grant, cli->cl_lost_grant);
756 }
757
758 void osc_update_next_shrink(struct client_obd *cli)
759 {
760         cli->cl_next_shrink_grant = ktime_get_seconds() +
761                                     cli->cl_grant_shrink_interval;
762
763         CDEBUG(D_CACHE, "next time %lld to shrink grant\n",
764                cli->cl_next_shrink_grant);
765 }
766 EXPORT_SYMBOL(osc_update_next_shrink);
767
768 static void __osc_update_grant(struct client_obd *cli, u64 grant)
769 {
770         spin_lock(&cli->cl_loi_list_lock);
771         cli->cl_avail_grant += grant;
772         spin_unlock(&cli->cl_loi_list_lock);
773 }
774
775 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
776 {
777         if (body->oa.o_valid & OBD_MD_FLGRANT) {
778                 CDEBUG(D_CACHE, "got %llu extra grant\n", body->oa.o_grant);
779                 __osc_update_grant(cli, body->oa.o_grant);
780         }
781 }
782
783 /**
784  * grant thread data for shrinking space.
785  */
786 struct grant_thread_data {
787         struct list_head        gtd_clients;
788         struct mutex            gtd_mutex;
789         unsigned long           gtd_stopped:1;
790 };
791 static struct grant_thread_data client_gtd;
792
793 static int osc_shrink_grant_interpret(const struct lu_env *env,
794                                       struct ptlrpc_request *req,
795                                       void *args, int rc)
796 {
797         struct osc_grant_args *aa = args;
798         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
799         struct ost_body *body;
800
801         if (rc != 0) {
802                 __osc_update_grant(cli, aa->aa_oa->o_grant);
803                 GOTO(out, rc);
804         }
805
806         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
807         LASSERT(body);
808         osc_update_grant(cli, body);
809 out:
810         OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
811         aa->aa_oa = NULL;
812
813         return rc;
814 }
815
816 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
817 {
818         spin_lock(&cli->cl_loi_list_lock);
819         oa->o_grant = cli->cl_avail_grant / 4;
820         cli->cl_avail_grant -= oa->o_grant;
821         spin_unlock(&cli->cl_loi_list_lock);
822         if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
823                 oa->o_valid |= OBD_MD_FLFLAGS;
824                 oa->o_flags = 0;
825         }
826         oa->o_flags |= OBD_FL_SHRINK_GRANT;
827         osc_update_next_shrink(cli);
828 }
829
830 /* Shrink the current grant, either from some large amount to enough for a
831  * full set of in-flight RPCs, or if we have already shrunk to that limit
832  * then to enough for a single RPC.  This avoids keeping more grant than
833  * needed, and avoids shrinking the grant piecemeal. */
834 static int osc_shrink_grant(struct client_obd *cli)
835 {
836         __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
837                              (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
838
839         spin_lock(&cli->cl_loi_list_lock);
840         if (cli->cl_avail_grant <= target_bytes)
841                 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
842         spin_unlock(&cli->cl_loi_list_lock);
843
844         return osc_shrink_grant_to_target(cli, target_bytes);
845 }
846
847 int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
848 {
849         int                     rc = 0;
850         struct ost_body        *body;
851         ENTRY;
852
853         spin_lock(&cli->cl_loi_list_lock);
854         /* Don't shrink if we are already above or below the desired limit
855          * We don't want to shrink below a single RPC, as that will negatively
856          * impact block allocation and long-term performance. */
857         if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
858                 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
859
860         if (target_bytes >= cli->cl_avail_grant) {
861                 spin_unlock(&cli->cl_loi_list_lock);
862                 RETURN(0);
863         }
864         spin_unlock(&cli->cl_loi_list_lock);
865
866         OBD_ALLOC_PTR(body);
867         if (!body)
868                 RETURN(-ENOMEM);
869
870         osc_announce_cached(cli, &body->oa, 0);
871
872         spin_lock(&cli->cl_loi_list_lock);
873         if (target_bytes >= cli->cl_avail_grant) {
874                 /* available grant has changed since target calculation */
875                 spin_unlock(&cli->cl_loi_list_lock);
876                 GOTO(out_free, rc = 0);
877         }
878         body->oa.o_grant = cli->cl_avail_grant - target_bytes;
879         cli->cl_avail_grant = target_bytes;
880         spin_unlock(&cli->cl_loi_list_lock);
881         if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
882                 body->oa.o_valid |= OBD_MD_FLFLAGS;
883                 body->oa.o_flags = 0;
884         }
885         body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
886         osc_update_next_shrink(cli);
887
888         rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
889                                 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
890                                 sizeof(*body), body, NULL);
891         if (rc != 0)
892                 __osc_update_grant(cli, body->oa.o_grant);
893 out_free:
894         OBD_FREE_PTR(body);
895         RETURN(rc);
896 }
897
898 static int osc_should_shrink_grant(struct client_obd *client)
899 {
900         time64_t next_shrink = client->cl_next_shrink_grant;
901
902         if (client->cl_import == NULL)
903                 return 0;
904
905         if (!OCD_HAS_FLAG(&client->cl_import->imp_connect_data, GRANT_SHRINK) ||
906             client->cl_import->imp_grant_shrink_disabled) {
907                 osc_update_next_shrink(client);
908                 return 0;
909         }
910
911         if (ktime_get_seconds() >= next_shrink - 5) {
912                 /* Get the current RPC size directly, instead of going via:
913                  * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
914                  * Keep comment here so that it can be found by searching. */
915                 int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
916
917                 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
918                     client->cl_avail_grant > brw_size)
919                         return 1;
920                 else
921                         osc_update_next_shrink(client);
922         }
923         return 0;
924 }
925
926 #define GRANT_SHRINK_RPC_BATCH  100
927
928 static struct delayed_work work;
929
930 static void osc_grant_work_handler(struct work_struct *data)
931 {
932         struct client_obd *cli;
933         int rpc_sent;
934         bool init_next_shrink = true;
935         time64_t next_shrink = ktime_get_seconds() + GRANT_SHRINK_INTERVAL;
936
937         rpc_sent = 0;
938         mutex_lock(&client_gtd.gtd_mutex);
939         list_for_each_entry(cli, &client_gtd.gtd_clients,
940                             cl_grant_chain) {
941                 if (rpc_sent < GRANT_SHRINK_RPC_BATCH &&
942                     osc_should_shrink_grant(cli)) {
943                         osc_shrink_grant(cli);
944                         rpc_sent++;
945                 }
946
947                 if (!init_next_shrink) {
948                         if (cli->cl_next_shrink_grant < next_shrink &&
949                             cli->cl_next_shrink_grant > ktime_get_seconds())
950                                 next_shrink = cli->cl_next_shrink_grant;
951                 } else {
952                         init_next_shrink = false;
953                         next_shrink = cli->cl_next_shrink_grant;
954                 }
955         }
956         mutex_unlock(&client_gtd.gtd_mutex);
957
958         if (client_gtd.gtd_stopped == 1)
959                 return;
960
961         if (next_shrink > ktime_get_seconds()) {
962                 time64_t delay = next_shrink - ktime_get_seconds();
963
964                 schedule_delayed_work(&work, cfs_time_seconds(delay));
965         } else {
966                 schedule_work(&work.work);
967         }
968 }
969
970 void osc_schedule_grant_work(void)
971 {
972         cancel_delayed_work_sync(&work);
973         schedule_work(&work.work);
974 }
975 EXPORT_SYMBOL(osc_schedule_grant_work);
976
977 /**
978  * Start grant thread for returing grant to server for idle clients.
979  */
980 static int osc_start_grant_work(void)
981 {
982         client_gtd.gtd_stopped = 0;
983         mutex_init(&client_gtd.gtd_mutex);
984         INIT_LIST_HEAD(&client_gtd.gtd_clients);
985
986         INIT_DELAYED_WORK(&work, osc_grant_work_handler);
987         schedule_work(&work.work);
988
989         return 0;
990 }
991
992 static void osc_stop_grant_work(void)
993 {
994         client_gtd.gtd_stopped = 1;
995         cancel_delayed_work_sync(&work);
996 }
997
998 static void osc_add_grant_list(struct client_obd *client)
999 {
1000         mutex_lock(&client_gtd.gtd_mutex);
1001         list_add(&client->cl_grant_chain, &client_gtd.gtd_clients);
1002         mutex_unlock(&client_gtd.gtd_mutex);
1003 }
1004
1005 static void osc_del_grant_list(struct client_obd *client)
1006 {
1007         if (list_empty(&client->cl_grant_chain))
1008                 return;
1009
1010         mutex_lock(&client_gtd.gtd_mutex);
1011         list_del_init(&client->cl_grant_chain);
1012         mutex_unlock(&client_gtd.gtd_mutex);
1013 }
1014
1015 void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1016 {
1017         /*
1018          * ocd_grant is the total grant amount we're expect to hold: if we've
1019          * been evicted, it's the new avail_grant amount, cl_dirty_pages will
1020          * drop to 0 as inflight RPCs fail out; otherwise, it's avail_grant +
1021          * dirty.
1022          *
1023          * race is tolerable here: if we're evicted, but imp_state already
1024          * left EVICTED state, then cl_dirty_pages must be 0 already.
1025          */
1026         spin_lock(&cli->cl_loi_list_lock);
1027         cli->cl_avail_grant = ocd->ocd_grant;
1028         if (cli->cl_import->imp_state != LUSTRE_IMP_EVICTED) {
1029                 unsigned long consumed = cli->cl_reserved_grant;
1030
1031                 if (OCD_HAS_FLAG(ocd, GRANT_PARAM))
1032                         consumed += cli->cl_dirty_grant;
1033                 else
1034                         consumed += cli->cl_dirty_pages << PAGE_SHIFT;
1035                 if (cli->cl_avail_grant < consumed) {
1036                         CERROR("%s: granted %ld but already consumed %ld\n",
1037                                cli_name(cli), cli->cl_avail_grant, consumed);
1038                         cli->cl_avail_grant = 0;
1039                 } else {
1040                         cli->cl_avail_grant -= consumed;
1041                 }
1042         }
1043
1044         if (OCD_HAS_FLAG(ocd, GRANT_PARAM)) {
1045                 u64 size;
1046                 int chunk_mask;
1047
1048                 /* overhead for each extent insertion */
1049                 cli->cl_grant_extent_tax = ocd->ocd_grant_tax_kb << 10;
1050                 /* determine the appropriate chunk size used by osc_extent. */
1051                 cli->cl_chunkbits = max_t(int, PAGE_SHIFT,
1052                                           ocd->ocd_grant_blkbits);
1053                 /* max_pages_per_rpc must be chunk aligned */
1054                 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
1055                 cli->cl_max_pages_per_rpc = (cli->cl_max_pages_per_rpc +
1056                                              ~chunk_mask) & chunk_mask;
1057                 /* determine maximum extent size, in #pages */
1058                 size = (u64)ocd->ocd_grant_max_blks << ocd->ocd_grant_blkbits;
1059                 cli->cl_max_extent_pages = (size >> PAGE_SHIFT) ?: 1;
1060                 cli->cl_ocd_grant_param = 1;
1061         } else {
1062                 cli->cl_ocd_grant_param = 0;
1063                 cli->cl_grant_extent_tax = 0;
1064                 cli->cl_chunkbits = PAGE_SHIFT;
1065                 cli->cl_max_extent_pages = DT_MAX_BRW_PAGES;
1066         }
1067         spin_unlock(&cli->cl_loi_list_lock);
1068
1069         CDEBUG(D_CACHE,
1070                "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld. chunk bits: %d cl_max_extent_pages: %d\n",
1071                cli_name(cli),
1072                cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
1073                cli->cl_max_extent_pages);
1074
1075         if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
1076                 osc_add_grant_list(cli);
1077 }
1078 EXPORT_SYMBOL(osc_init_grant);
1079
1080 /* We assume that the reason this OSC got a short read is because it read
1081  * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1082  * via the LOV, and it _knows_ it's reading inside the file, it's just that
1083  * this stripe never got written at or beyond this stripe offset yet. */
1084 static void handle_short_read(int nob_read, size_t page_count,
1085                               struct brw_page **pga)
1086 {
1087         char *ptr;
1088         int i = 0;
1089
1090         /* skip bytes read OK */
1091         while (nob_read > 0) {
1092                 LASSERT (page_count > 0);
1093
1094                 if (pga[i]->count > nob_read) {
1095                         /* EOF inside this page */
1096                         ptr = kmap(pga[i]->pg) +
1097                                 (pga[i]->off & ~PAGE_MASK);
1098                         memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1099                         kunmap(pga[i]->pg);
1100                         page_count--;
1101                         i++;
1102                         break;
1103                 }
1104
1105                 nob_read -= pga[i]->count;
1106                 page_count--;
1107                 i++;
1108         }
1109
1110         /* zero remaining pages */
1111         while (page_count-- > 0) {
1112                 ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
1113                 memset(ptr, 0, pga[i]->count);
1114                 kunmap(pga[i]->pg);
1115                 i++;
1116         }
1117 }
1118
1119 static int check_write_rcs(struct ptlrpc_request *req,
1120                            int requested_nob, int niocount,
1121                            size_t page_count, struct brw_page **pga)
1122 {
1123         int     i;
1124         __u32   *remote_rcs;
1125
1126         remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1127                                                   sizeof(*remote_rcs) *
1128                                                   niocount);
1129         if (remote_rcs == NULL) {
1130                 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1131                 return(-EPROTO);
1132         }
1133
1134         /* return error if any niobuf was in error */
1135         for (i = 0; i < niocount; i++) {
1136                 if ((int)remote_rcs[i] < 0) {
1137                         CDEBUG(D_INFO, "rc[%d]: %d req %p\n",
1138                                i, remote_rcs[i], req);
1139                         return remote_rcs[i];
1140                 }
1141
1142                 if (remote_rcs[i] != 0) {
1143                         CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1144                                 i, remote_rcs[i], req);
1145                         return(-EPROTO);
1146                 }
1147         }
1148         if (req->rq_bulk != NULL &&
1149             req->rq_bulk->bd_nob_transferred != requested_nob) {
1150                 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1151                        req->rq_bulk->bd_nob_transferred, requested_nob);
1152                 return(-EPROTO);
1153         }
1154
1155         return (0);
1156 }
1157
1158 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1159 {
1160         if (p1->flag != p2->flag) {
1161                 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
1162                                   OBD_BRW_SYNC       | OBD_BRW_ASYNC   |
1163                                   OBD_BRW_NOQUOTA    | OBD_BRW_SOFT_SYNC |
1164                                   OBD_BRW_SYS_RESOURCE);
1165
1166                 /* warn if we try to combine flags that we don't know to be
1167                  * safe to combine */
1168                 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1169                         CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
1170                               "report this at https://jira.whamcloud.com/\n",
1171                               p1->flag, p2->flag);
1172                 }
1173                 return 0;
1174         }
1175
1176         return (p1->off + p1->count == p2->off);
1177 }
1178
1179 #if IS_ENABLED(CONFIG_CRC_T10DIF)
1180 static int osc_checksum_bulk_t10pi(const char *obd_name, int nob,
1181                                    size_t pg_count, struct brw_page **pga,
1182                                    int opc, obd_dif_csum_fn *fn,
1183                                    int sector_size,
1184                                    u32 *check_sum, bool resend)
1185 {
1186         struct ahash_request *req;
1187         /* Used Adler as the default checksum type on top of DIF tags */
1188         unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
1189         struct page *__page;
1190         unsigned char *buffer;
1191         __u16 *guard_start;
1192         unsigned int bufsize;
1193         int guard_number;
1194         int used_number = 0;
1195         int used;
1196         u32 cksum;
1197         int rc = 0;
1198         int i = 0;
1199
1200         LASSERT(pg_count > 0);
1201
1202         __page = alloc_page(GFP_KERNEL);
1203         if (__page == NULL)
1204                 return -ENOMEM;
1205
1206         req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1207         if (IS_ERR(req)) {
1208                 rc = PTR_ERR(req);
1209                 CERROR("%s: unable to initialize checksum hash %s: rc = %d\n",
1210                        obd_name, cfs_crypto_hash_name(cfs_alg), rc);
1211                 GOTO(out, rc);
1212         }
1213
1214         buffer = kmap(__page);
1215         guard_start = (__u16 *)buffer;
1216         guard_number = PAGE_SIZE / sizeof(*guard_start);
1217         CDEBUG(D_PAGE | (resend ? D_HA : 0),
1218                "GRD tags per page=%u, resend=%u, bytes=%u, pages=%zu\n",
1219                guard_number, resend, nob, pg_count);
1220
1221         while (nob > 0 && pg_count > 0) {
1222                 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1223
1224                 /* corrupt the data before we compute the checksum, to
1225                  * simulate an OST->client data error */
1226                 if (unlikely(i == 0 && opc == OST_READ &&
1227                              OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))) {
1228                         unsigned char *ptr = kmap(pga[i]->pg);
1229                         int off = pga[i]->off & ~PAGE_MASK;
1230
1231                         memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1232                         kunmap(pga[i]->pg);
1233                 }
1234
1235                 /*
1236                  * The left guard number should be able to hold checksums of a
1237                  * whole page
1238                  */
1239                 rc = obd_page_dif_generate_buffer(obd_name, pga[i]->pg,
1240                                                   pga[i]->off & ~PAGE_MASK,
1241                                                   count,
1242                                                   guard_start + used_number,
1243                                                   guard_number - used_number,
1244                                                   &used, sector_size,
1245                                                   fn);
1246                 if (unlikely(resend))
1247                         CDEBUG(D_PAGE | D_HA,
1248                                "pga[%u]: used %u off %llu+%u gen checksum: %*phN\n",
1249                                i, used, pga[i]->off & ~PAGE_MASK, count,
1250                                (int)(used * sizeof(*guard_start)),
1251                                guard_start + used_number);
1252                 if (rc)
1253                         break;
1254
1255                 used_number += used;
1256                 if (used_number == guard_number) {
1257                         cfs_crypto_hash_update_page(req, __page, 0,
1258                                 used_number * sizeof(*guard_start));
1259                         used_number = 0;
1260                 }
1261
1262                 nob -= pga[i]->count;
1263                 pg_count--;
1264                 i++;
1265         }
1266         kunmap(__page);
1267         if (rc)
1268                 GOTO(out, rc);
1269
1270         if (used_number != 0)
1271                 cfs_crypto_hash_update_page(req, __page, 0,
1272                         used_number * sizeof(*guard_start));
1273
1274         bufsize = sizeof(cksum);
1275         cfs_crypto_hash_final(req, (unsigned char *)&cksum, &bufsize);
1276
1277         /* For sending we only compute the wrong checksum instead
1278          * of corrupting the data so it is still correct on a redo */
1279         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1280                 cksum++;
1281
1282         *check_sum = cksum;
1283 out:
1284         __free_page(__page);
1285         return rc;
1286 }
1287 #else /* !CONFIG_CRC_T10DIF */
1288 #define obd_dif_ip_fn NULL
1289 #define obd_dif_crc_fn NULL
1290 #define osc_checksum_bulk_t10pi(name, nob, pgc, pga, opc, fn, ssize, csum, re) \
1291         -EOPNOTSUPP
1292 #endif /* CONFIG_CRC_T10DIF */
1293
1294 static int osc_checksum_bulk(int nob, size_t pg_count,
1295                              struct brw_page **pga, int opc,
1296                              enum cksum_types cksum_type,
1297                              u32 *cksum)
1298 {
1299         int                             i = 0;
1300         struct ahash_request           *req;
1301         unsigned int                    bufsize;
1302         unsigned char                   cfs_alg = cksum_obd2cfs(cksum_type);
1303
1304         LASSERT(pg_count > 0);
1305
1306         req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1307         if (IS_ERR(req)) {
1308                 CERROR("Unable to initialize checksum hash %s\n",
1309                        cfs_crypto_hash_name(cfs_alg));
1310                 return PTR_ERR(req);
1311         }
1312
1313         while (nob > 0 && pg_count > 0) {
1314                 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1315
1316                 /* corrupt the data before we compute the checksum, to
1317                  * simulate an OST->client data error */
1318                 if (i == 0 && opc == OST_READ &&
1319                     OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
1320                         unsigned char *ptr = kmap(pga[i]->pg);
1321                         int off = pga[i]->off & ~PAGE_MASK;
1322
1323                         memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1324                         kunmap(pga[i]->pg);
1325                 }
1326                 cfs_crypto_hash_update_page(req, pga[i]->pg,
1327                                             pga[i]->off & ~PAGE_MASK,
1328                                             count);
1329                 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
1330                                (int)(pga[i]->off & ~PAGE_MASK));
1331
1332                 nob -= pga[i]->count;
1333                 pg_count--;
1334                 i++;
1335         }
1336
1337         bufsize = sizeof(*cksum);
1338         cfs_crypto_hash_final(req, (unsigned char *)cksum, &bufsize);
1339
1340         /* For sending we only compute the wrong checksum instead
1341          * of corrupting the data so it is still correct on a redo */
1342         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1343                 (*cksum)++;
1344
1345         return 0;
1346 }
1347
1348 static int osc_checksum_bulk_rw(const char *obd_name,
1349                                 enum cksum_types cksum_type,
1350                                 int nob, size_t pg_count,
1351                                 struct brw_page **pga, int opc,
1352                                 u32 *check_sum, bool resend)
1353 {
1354         obd_dif_csum_fn *fn = NULL;
1355         int sector_size = 0;
1356         int rc;
1357
1358         ENTRY;
1359         obd_t10_cksum2dif(cksum_type, &fn, &sector_size);
1360
1361         if (fn)
1362                 rc = osc_checksum_bulk_t10pi(obd_name, nob, pg_count, pga,
1363                                              opc, fn, sector_size, check_sum,
1364                                              resend);
1365         else
1366                 rc = osc_checksum_bulk(nob, pg_count, pga, opc, cksum_type,
1367                                        check_sum);
1368
1369         RETURN(rc);
1370 }
1371
1372 static inline void osc_release_bounce_pages(struct brw_page **pga,
1373                                             u32 page_count)
1374 {
1375 #ifdef HAVE_LUSTRE_CRYPTO
1376         int i;
1377
1378         for (i = 0; i < page_count; i++) {
1379                 /* Bounce pages allocated by a call to
1380                  * llcrypt_encrypt_pagecache_blocks() in osc_brw_prep_request()
1381                  * are identified thanks to the PageChecked flag.
1382                  */
1383                 if (PageChecked(pga[i]->pg))
1384                         llcrypt_finalize_bounce_page(&pga[i]->pg);
1385                 pga[i]->count -= pga[i]->bp_count_diff;
1386                 pga[i]->off += pga[i]->bp_off_diff;
1387         }
1388 #endif
1389 }
1390
1391 static int
1392 osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa,
1393                      u32 page_count, struct brw_page **pga,
1394                      struct ptlrpc_request **reqp, int resend)
1395 {
1396         struct ptlrpc_request *req;
1397         struct ptlrpc_bulk_desc *desc;
1398         struct ost_body *body;
1399         struct obd_ioobj *ioobj;
1400         struct niobuf_remote *niobuf;
1401         int niocount, i, requested_nob, opc, rc, short_io_size = 0;
1402         struct osc_brw_async_args *aa;
1403         struct req_capsule *pill;
1404         struct brw_page *pg_prev;
1405         void *short_io_buf;
1406         const char *obd_name = cli->cl_import->imp_obd->obd_name;
1407         struct inode *inode = NULL;
1408         bool directio = false;
1409         bool gpu = 0;
1410         bool enable_checksum = true;
1411         struct cl_page *clpage;
1412
1413         ENTRY;
1414         if (pga[0]->pg) {
1415                 clpage = oap2cl_page(brw_page2oap(pga[0]));
1416                 inode = clpage->cp_inode;
1417                 if (clpage->cp_type == CPT_TRANSIENT)
1418                         directio = true;
1419         }
1420         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1421                 RETURN(-ENOMEM); /* Recoverable */
1422         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1423                 RETURN(-EINVAL); /* Fatal */
1424
1425         if ((cmd & OBD_BRW_WRITE) != 0) {
1426                 opc = OST_WRITE;
1427                 req = ptlrpc_request_alloc_pool(cli->cl_import,
1428                                                 osc_rq_pool,
1429                                                 &RQF_OST_BRW_WRITE);
1430         } else {
1431                 opc = OST_READ;
1432                 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1433         }
1434         if (req == NULL)
1435                 RETURN(-ENOMEM);
1436
1437         if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode) &&
1438             llcrypt_has_encryption_key(inode)) {
1439                 for (i = 0; i < page_count; i++) {
1440                         struct brw_page *brwpg = pga[i];
1441                         struct page *data_page = NULL;
1442                         bool retried = false;
1443                         bool lockedbymyself;
1444                         u32 nunits = (brwpg->off & ~PAGE_MASK) + brwpg->count;
1445                         struct address_space *map_orig = NULL;
1446                         pgoff_t index_orig;
1447
1448 retry_encrypt:
1449                         nunits = round_up(nunits, LUSTRE_ENCRYPTION_UNIT_SIZE);
1450                         /* The page can already be locked when we arrive here.
1451                          * This is possible when cl_page_assume/vvp_page_assume
1452                          * is stuck on wait_on_page_writeback with page lock
1453                          * held. In this case there is no risk for the lock to
1454                          * be released while we are doing our encryption
1455                          * processing, because writeback against that page will
1456                          * end in vvp_page_completion_write/cl_page_completion,
1457                          * which means only once the page is fully processed.
1458                          */
1459                         lockedbymyself = trylock_page(brwpg->pg);
1460                         if (directio) {
1461                                 map_orig = brwpg->pg->mapping;
1462                                 brwpg->pg->mapping = inode->i_mapping;
1463                                 index_orig = brwpg->pg->index;
1464                                 clpage = oap2cl_page(brw_page2oap(brwpg));
1465                                 brwpg->pg->index = clpage->cp_page_index;
1466                         }
1467                         data_page =
1468                                 llcrypt_encrypt_pagecache_blocks(brwpg->pg,
1469                                                                  nunits, 0,
1470                                                                  GFP_NOFS);
1471                         if (directio) {
1472                                 brwpg->pg->mapping = map_orig;
1473                                 brwpg->pg->index = index_orig;
1474                         }
1475                         if (lockedbymyself)
1476                                 unlock_page(brwpg->pg);
1477                         if (IS_ERR(data_page)) {
1478                                 rc = PTR_ERR(data_page);
1479                                 if (rc == -ENOMEM && !retried) {
1480                                         retried = true;
1481                                         rc = 0;
1482                                         goto retry_encrypt;
1483                                 }
1484                                 ptlrpc_request_free(req);
1485                                 RETURN(rc);
1486                         }
1487                         /* Set PageChecked flag on bounce page for
1488                          * disambiguation in osc_release_bounce_pages().
1489                          */
1490                         SetPageChecked(data_page);
1491                         brwpg->pg = data_page;
1492                         /* there should be no gap in the middle of page array */
1493                         if (i == page_count - 1) {
1494                                 struct osc_async_page *oap =
1495                                         brw_page2oap(brwpg);
1496
1497                                 oa->o_size = oap->oap_count +
1498                                         oap->oap_obj_off + oap->oap_page_off;
1499                         }
1500                         /* len is forced to nunits, and relative offset to 0
1501                          * so store the old, clear text info
1502                          */
1503                         brwpg->bp_count_diff = nunits - brwpg->count;
1504                         brwpg->count = nunits;
1505                         brwpg->bp_off_diff = brwpg->off & ~PAGE_MASK;
1506                         brwpg->off = brwpg->off & PAGE_MASK;
1507                 }
1508         } else if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode)) {
1509                 struct osc_async_page *oap = brw_page2oap(pga[0]);
1510                 struct cl_page *clpage = oap2cl_page(oap);
1511                 struct cl_object *clobj = clpage->cp_obj;
1512                 struct cl_attr attr = { 0 };
1513                 struct lu_env *env;
1514                 __u16 refcheck;
1515
1516                 env = cl_env_get(&refcheck);
1517                 if (IS_ERR(env)) {
1518                         rc = PTR_ERR(env);
1519                         ptlrpc_request_free(req);
1520                         RETURN(rc);
1521                 }
1522
1523                 cl_object_attr_lock(clobj);
1524                 rc = cl_object_attr_get(env, clobj, &attr);
1525                 cl_object_attr_unlock(clobj);
1526                 cl_env_put(env, &refcheck);
1527                 if (rc != 0) {
1528                         ptlrpc_request_free(req);
1529                         RETURN(rc);
1530                 }
1531                 if (attr.cat_size)
1532                         oa->o_size = attr.cat_size;
1533         } else if (opc == OST_READ && inode && IS_ENCRYPTED(inode) &&
1534                    llcrypt_has_encryption_key(inode)) {
1535                 for (i = 0; i < page_count; i++) {
1536                         struct brw_page *pg = pga[i];
1537                         u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
1538
1539                         nunits = round_up(nunits, LUSTRE_ENCRYPTION_UNIT_SIZE);
1540                         /* count/off are forced to cover the whole encryption
1541                          * unit size so that all encrypted data is stored on the
1542                          * OST, so adjust bp_{count,off}_diff for the size of
1543                          * the clear text.
1544                          */
1545                         pg->bp_count_diff = nunits - pg->count;
1546                         pg->count = nunits;
1547                         pg->bp_off_diff = pg->off & ~PAGE_MASK;
1548                         pg->off = pg->off & PAGE_MASK;
1549                 }
1550         }
1551
1552         for (niocount = i = 1; i < page_count; i++) {
1553                 if (!can_merge_pages(pga[i - 1], pga[i]))
1554                         niocount++;
1555         }
1556
1557         pill = &req->rq_pill;
1558         req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1559                              sizeof(*ioobj));
1560         req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1561                              niocount * sizeof(*niobuf));
1562
1563         for (i = 0; i < page_count; i++) {
1564                 short_io_size += pga[i]->count;
1565                 if (!inode || !IS_ENCRYPTED(inode) ||
1566                     !llcrypt_has_encryption_key(inode)) {
1567                         pga[i]->bp_count_diff = 0;
1568                         pga[i]->bp_off_diff = 0;
1569                 }
1570         }
1571
1572         if (brw_page2oap(pga[0])->oap_brw_flags & OBD_BRW_RDMA_ONLY) {
1573                 enable_checksum = false;
1574                 short_io_size = 0;
1575                 gpu = 1;
1576         }
1577
1578         /* Check if read/write is small enough to be a short io. */
1579         if (short_io_size > cli->cl_max_short_io_bytes || niocount > 1 ||
1580             !imp_connect_shortio(cli->cl_import))
1581                 short_io_size = 0;
1582
1583         /* If this is an empty RPC to old server, just ignore it */
1584         if (!short_io_size && !pga[0]->pg) {
1585                 ptlrpc_request_free(req);
1586                 RETURN(-ENODATA);
1587         }
1588
1589         req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_CLIENT,
1590                              opc == OST_READ ? 0 : short_io_size);
1591         if (opc == OST_READ)
1592                 req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_SERVER,
1593                                      short_io_size);
1594
1595         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1596         if (rc) {
1597                 ptlrpc_request_free(req);
1598                 RETURN(rc);
1599         }
1600         osc_set_io_portal(req);
1601
1602         ptlrpc_at_set_req_timeout(req);
1603         /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
1604          * retry logic */
1605         req->rq_no_retry_einprogress = 1;
1606
1607         if (short_io_size != 0) {
1608                 desc = NULL;
1609                 short_io_buf = NULL;
1610                 goto no_bulk;
1611         }
1612
1613         desc = ptlrpc_prep_bulk_imp(req, page_count,
1614                 cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
1615                 (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
1616                         PTLRPC_BULK_PUT_SINK),
1617                 OST_BULK_PORTAL,
1618                 &ptlrpc_bulk_kiov_pin_ops);
1619
1620         if (desc == NULL)
1621                 GOTO(out, rc = -ENOMEM);
1622         /* NB request now owns desc and will free it when it gets freed */
1623         desc->bd_is_rdma = gpu;
1624 no_bulk:
1625         body = req_capsule_client_get(pill, &RMF_OST_BODY);
1626         ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1627         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1628         LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1629
1630         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1631
1632         /* For READ and WRITE, we can't fill o_uid and o_gid using from_kuid()
1633          * and from_kgid(), because they are asynchronous. Fortunately, variable
1634          * oa contains valid o_uid and o_gid in these two operations.
1635          * Besides, filling o_uid and o_gid is enough for nrs-tbf, see LU-9658.
1636          * OBD_MD_FLUID and OBD_MD_FLUID is not set in order to avoid breaking
1637          * other process logic */
1638         body->oa.o_uid = oa->o_uid;
1639         body->oa.o_gid = oa->o_gid;
1640
1641         obdo_to_ioobj(oa, ioobj);
1642         ioobj->ioo_bufcnt = niocount;
1643         /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
1644          * that might be send for this request.  The actual number is decided
1645          * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
1646          * "max - 1" for old client compatibility sending "0", and also so the
1647          * the actual maximum is a power-of-two number, not one less. LU-1431 */
1648         if (desc != NULL)
1649                 ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
1650         else /* short io */
1651                 ioobj_max_brw_set(ioobj, 0);
1652
1653         if (short_io_size != 0) {
1654                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1655                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1656                         body->oa.o_flags = 0;
1657                 }
1658                 body->oa.o_flags |= OBD_FL_SHORT_IO;
1659                 CDEBUG(D_CACHE, "Using short io for data transfer, size = %d\n",
1660                        short_io_size);
1661                 if (opc == OST_WRITE) {
1662                         short_io_buf = req_capsule_client_get(pill,
1663                                                               &RMF_SHORT_IO);
1664                         LASSERT(short_io_buf != NULL);
1665                 }
1666         }
1667
1668         LASSERT(page_count > 0);
1669         pg_prev = pga[0];
1670         for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1671                 struct brw_page *pg = pga[i];
1672                 int poff = pg->off & ~PAGE_MASK;
1673
1674                 LASSERT(pg->count > 0);
1675                 /* make sure there is no gap in the middle of page array */
1676                 LASSERTF(page_count == 1 ||
1677                          (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
1678                           ergo(i > 0 && i < page_count - 1,
1679                                poff == 0 && pg->count == PAGE_SIZE)   &&
1680                           ergo(i == page_count - 1, poff == 0)),
1681                          "i: %d/%d pg: %p off: %llu, count: %u\n",
1682                          i, page_count, pg, pg->off, pg->count);
1683                 LASSERTF(i == 0 || pg->off > pg_prev->off,
1684                          "i %d p_c %u pg %p [pri %lu ind %lu] off %llu"
1685                          " prev_pg %p [pri %lu ind %lu] off %llu\n",
1686                          i, page_count,
1687                          pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1688                          pg_prev->pg, page_private(pg_prev->pg),
1689                          pg_prev->pg->index, pg_prev->off);
1690                 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1691                         (pg->flag & OBD_BRW_SRVLOCK));
1692                 if (short_io_size != 0 && opc == OST_WRITE) {
1693                         unsigned char *ptr = kmap_atomic(pg->pg);
1694
1695                         LASSERT(short_io_size >= requested_nob + pg->count);
1696                         memcpy(short_io_buf + requested_nob,
1697                                ptr + poff,
1698                                pg->count);
1699                         kunmap_atomic(ptr);
1700                 } else if (short_io_size == 0) {
1701                         desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff,
1702                                                          pg->count);
1703                 }
1704                 requested_nob += pg->count;
1705
1706                 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1707                         niobuf--;
1708                         niobuf->rnb_len += pg->count;
1709                 } else {
1710                         niobuf->rnb_offset = pg->off;
1711                         niobuf->rnb_len    = pg->count;
1712                         niobuf->rnb_flags  = pg->flag;
1713                 }
1714                 pg_prev = pg;
1715         }
1716
1717         LASSERTF((void *)(niobuf - niocount) ==
1718                 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1719                 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1720                 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1721
1722         osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1723         if (resend) {
1724                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1725                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1726                         body->oa.o_flags = 0;
1727                 }
1728                 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1729         }
1730
1731         if (osc_should_shrink_grant(cli))
1732                 osc_shrink_grant_local(cli, &body->oa);
1733
1734         if (!cli->cl_checksum || sptlrpc_flavor_has_bulk(&req->rq_flvr))
1735                 enable_checksum = false;
1736
1737         /* size[REQ_REC_OFF] still sizeof (*body) */
1738         if (opc == OST_WRITE) {
1739                 if (enable_checksum) {
1740                         /* store cl_cksum_type in a local variable since
1741                          * it can be changed via lprocfs */
1742                         enum cksum_types cksum_type = cli->cl_cksum_type;
1743
1744                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1745                                 body->oa.o_flags = 0;
1746
1747                         body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1748                                                                 cksum_type);
1749                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1750
1751                         rc = osc_checksum_bulk_rw(obd_name, cksum_type,
1752                                                   requested_nob, page_count,
1753                                                   pga, OST_WRITE,
1754                                                   &body->oa.o_cksum, resend);
1755                         if (rc < 0) {
1756                                 CDEBUG(D_PAGE, "failed to checksum: rc = %d\n",
1757                                        rc);
1758                                 GOTO(out, rc);
1759                         }
1760                         CDEBUG(D_PAGE | (resend ? D_HA : 0),
1761                                "checksum at write origin: %x (%x)\n",
1762                                body->oa.o_cksum, cksum_type);
1763
1764                         /* save this in 'oa', too, for later checking */
1765                         oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1766                         oa->o_flags |= obd_cksum_type_pack(obd_name,
1767                                                            cksum_type);
1768                 } else {
1769                         /* clear out the checksum flag, in case this is a
1770                          * resend but cl_checksum is no longer set. b=11238 */
1771                         oa->o_valid &= ~OBD_MD_FLCKSUM;
1772                 }
1773                 oa->o_cksum = body->oa.o_cksum;
1774                 /* 1 RC per niobuf */
1775                 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1776                                      sizeof(__u32) * niocount);
1777         } else {
1778                 if (enable_checksum) {
1779                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1780                                 body->oa.o_flags = 0;
1781                         body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1782                                 cli->cl_cksum_type);
1783                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1784                 }
1785
1786                 /* Client cksum has been already copied to wire obdo in previous
1787                  * lustre_set_wire_obdo(), and in the case a bulk-read is being
1788                  * resent due to cksum error, this will allow Server to
1789                  * check+dump pages on its side */
1790         }
1791         ptlrpc_request_set_replen(req);
1792
1793         aa = ptlrpc_req_async_args(aa, req);
1794         aa->aa_oa = oa;
1795         aa->aa_requested_nob = requested_nob;
1796         aa->aa_nio_count = niocount;
1797         aa->aa_page_count = page_count;
1798         aa->aa_resends = 0;
1799         aa->aa_ppga = pga;
1800         aa->aa_cli = cli;
1801         INIT_LIST_HEAD(&aa->aa_oaps);
1802
1803         *reqp = req;
1804         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1805         CDEBUG(D_RPCTRACE, "brw rpc %p - object "DOSTID" offset %lld<>%lld\n",
1806                 req, POSTID(&oa->o_oi), niobuf[0].rnb_offset,
1807                 niobuf[niocount - 1].rnb_offset + niobuf[niocount - 1].rnb_len);
1808         RETURN(0);
1809
1810  out:
1811         ptlrpc_req_finished(req);
1812         RETURN(rc);
1813 }
1814
1815 char dbgcksum_file_name[PATH_MAX];
1816
1817 static void dump_all_bulk_pages(struct obdo *oa, __u32 page_count,
1818                                 struct brw_page **pga, __u32 server_cksum,
1819                                 __u32 client_cksum)
1820 {
1821         struct file *filp;
1822         int rc, i;
1823         unsigned int len;
1824         char *buf;
1825
1826         /* will only keep dump of pages on first error for the same range in
1827          * file/fid, not during the resends/retries. */
1828         snprintf(dbgcksum_file_name, sizeof(dbgcksum_file_name),
1829                  "%s-checksum_dump-osc-"DFID":[%llu-%llu]-%x-%x",
1830                  (strncmp(libcfs_debug_file_path, "NONE", 4) != 0 ?
1831                   libcfs_debug_file_path : LIBCFS_DEBUG_FILE_PATH_DEFAULT),
1832                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : 0ULL,
1833                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1834                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1835                  pga[0]->off,
1836                  pga[page_count-1]->off + pga[page_count-1]->count - 1,
1837                  client_cksum, server_cksum);
1838         CWARN("dumping checksum data to %s\n", dbgcksum_file_name);
1839         filp = filp_open(dbgcksum_file_name,
1840                          O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, 0600);
1841         if (IS_ERR(filp)) {
1842                 rc = PTR_ERR(filp);
1843                 if (rc == -EEXIST)
1844                         CDEBUG(D_INFO, "%s: can't open to dump pages with "
1845                                "checksum error: rc = %d\n", dbgcksum_file_name,
1846                                rc);
1847                 else
1848                         CERROR("%s: can't open to dump pages with checksum "
1849                                "error: rc = %d\n", dbgcksum_file_name, rc);
1850                 return;
1851         }
1852
1853         for (i = 0; i < page_count; i++) {
1854                 len = pga[i]->count;
1855                 buf = kmap(pga[i]->pg);
1856                 while (len != 0) {
1857                         rc = cfs_kernel_write(filp, buf, len, &filp->f_pos);
1858                         if (rc < 0) {
1859                                 CERROR("%s: wanted to write %u but got %d "
1860                                        "error\n", dbgcksum_file_name, len, rc);
1861                                 break;
1862                         }
1863                         len -= rc;
1864                         buf += rc;
1865                 }
1866                 kunmap(pga[i]->pg);
1867         }
1868
1869         rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
1870         if (rc)
1871                 CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
1872         filp_close(filp, NULL);
1873
1874         libcfs_debug_dumplog();
1875 }
1876
1877 static int
1878 check_write_checksum(struct obdo *oa, const struct lnet_processid *peer,
1879                      __u32 client_cksum, __u32 server_cksum,
1880                      struct osc_brw_async_args *aa)
1881 {
1882         const char *obd_name = aa->aa_cli->cl_import->imp_obd->obd_name;
1883         enum cksum_types cksum_type;
1884         obd_dif_csum_fn *fn = NULL;
1885         int sector_size = 0;
1886         __u32 new_cksum;
1887         char *msg;
1888         int rc;
1889
1890         if (server_cksum == client_cksum) {
1891                 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1892                 return 0;
1893         }
1894
1895         if (aa->aa_cli->cl_checksum_dump)
1896                 dump_all_bulk_pages(oa, aa->aa_page_count, aa->aa_ppga,
1897                                     server_cksum, client_cksum);
1898
1899         cksum_type = obd_cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1900                                            oa->o_flags : 0);
1901
1902         switch (cksum_type) {
1903         case OBD_CKSUM_T10IP512:
1904                 fn = obd_dif_ip_fn;
1905                 sector_size = 512;
1906                 break;
1907         case OBD_CKSUM_T10IP4K:
1908                 fn = obd_dif_ip_fn;
1909                 sector_size = 4096;
1910                 break;
1911         case OBD_CKSUM_T10CRC512:
1912                 fn = obd_dif_crc_fn;
1913                 sector_size = 512;
1914                 break;
1915         case OBD_CKSUM_T10CRC4K:
1916                 fn = obd_dif_crc_fn;
1917                 sector_size = 4096;
1918                 break;
1919         default:
1920                 break;
1921         }
1922
1923         if (fn)
1924                 rc = osc_checksum_bulk_t10pi(obd_name, aa->aa_requested_nob,
1925                                              aa->aa_page_count, aa->aa_ppga,
1926                                              OST_WRITE, fn, sector_size,
1927                                              &new_cksum, true);
1928         else
1929                 rc = osc_checksum_bulk(aa->aa_requested_nob, aa->aa_page_count,
1930                                        aa->aa_ppga, OST_WRITE, cksum_type,
1931                                        &new_cksum);
1932
1933         if (rc < 0)
1934                 msg = "failed to calculate the client write checksum";
1935         else if (cksum_type != obd_cksum_type_unpack(aa->aa_oa->o_flags))
1936                 msg = "the server did not use the checksum type specified in "
1937                       "the original request - likely a protocol problem";
1938         else if (new_cksum == server_cksum)
1939                 msg = "changed on the client after we checksummed it - "
1940                       "likely false positive due to mmap IO (bug 11742)";
1941         else if (new_cksum == client_cksum)
1942                 msg = "changed in transit before arrival at OST";
1943         else
1944                 msg = "changed in transit AND doesn't match the original - "
1945                       "likely false positive due to mmap IO (bug 11742)";
1946
1947         LCONSOLE_ERROR_MSG(0x132, "%s: BAD WRITE CHECKSUM: %s: from %s inode "
1948                            DFID " object "DOSTID" extent [%llu-%llu], original "
1949                            "client csum %x (type %x), server csum %x (type %x),"
1950                            " client csum now %x\n",
1951                            obd_name, msg, libcfs_nidstr(&peer->nid),
1952                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1953                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1954                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1955                            POSTID(&oa->o_oi), aa->aa_ppga[0]->off,
1956                            aa->aa_ppga[aa->aa_page_count - 1]->off +
1957                                 aa->aa_ppga[aa->aa_page_count-1]->count - 1,
1958                            client_cksum,
1959                            obd_cksum_type_unpack(aa->aa_oa->o_flags),
1960                            server_cksum, cksum_type, new_cksum);
1961         return 1;
1962 }
1963
1964 /* Note rc enters this function as number of bytes transferred */
1965 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1966 {
1967         struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1968         struct client_obd *cli = aa->aa_cli;
1969         const char *obd_name = cli->cl_import->imp_obd->obd_name;
1970         const struct lnet_processid *peer =
1971                 &req->rq_import->imp_connection->c_peer;
1972         struct ost_body *body;
1973         u32 client_cksum = 0;
1974         struct inode *inode = NULL;
1975         unsigned int blockbits = 0, blocksize = 0;
1976         struct cl_page *clpage;
1977
1978         ENTRY;
1979
1980         if (rc < 0 && rc != -EDQUOT) {
1981                 DEBUG_REQ(D_INFO, req, "Failed request: rc = %d", rc);
1982                 RETURN(rc);
1983         }
1984
1985         LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1986         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1987         if (body == NULL) {
1988                 DEBUG_REQ(D_INFO, req, "cannot unpack body");
1989                 RETURN(-EPROTO);
1990         }
1991
1992         /* set/clear over quota flag for a uid/gid/projid */
1993         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1994             body->oa.o_valid & (OBD_MD_FLALLQUOTA)) {
1995                 unsigned qid[LL_MAXQUOTAS] = {
1996                                          body->oa.o_uid, body->oa.o_gid,
1997                                          body->oa.o_projid };
1998                 CDEBUG(D_QUOTA,
1999                        "setdq for [%u %u %u] with valid %#llx, flags %x\n",
2000                        body->oa.o_uid, body->oa.o_gid, body->oa.o_projid,
2001                        body->oa.o_valid, body->oa.o_flags);
2002                 osc_quota_setdq(cli, req->rq_xid, qid, body->oa.o_valid,
2003                                 body->oa.o_flags);
2004         }
2005
2006         osc_update_grant(cli, body);
2007
2008         if (rc < 0)
2009                 RETURN(rc);
2010
2011         if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
2012                 client_cksum = aa->aa_oa->o_cksum; /* save for later */
2013
2014         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
2015                 if (rc > 0) {
2016                         CERROR("%s: unexpected positive size %d\n",
2017                                obd_name, rc);
2018                         RETURN(-EPROTO);
2019                 }
2020
2021                 if (req->rq_bulk != NULL &&
2022                     sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
2023                         RETURN(-EAGAIN);
2024
2025                 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
2026                     check_write_checksum(&body->oa, peer, client_cksum,
2027                                          body->oa.o_cksum, aa))
2028                         RETURN(-EAGAIN);
2029
2030                 rc = check_write_rcs(req, aa->aa_requested_nob,
2031                                      aa->aa_nio_count, aa->aa_page_count,
2032                                      aa->aa_ppga);
2033                 GOTO(out, rc);
2034         }
2035
2036         /* The rest of this function executes only for OST_READs */
2037
2038         if (req->rq_bulk == NULL) {
2039                 rc = req_capsule_get_size(&req->rq_pill, &RMF_SHORT_IO,
2040                                           RCL_SERVER);
2041                 LASSERT(rc == req->rq_status);
2042         } else {
2043                 /* if unwrap_bulk failed, return -EAGAIN to retry */
2044                 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
2045         }
2046         if (rc < 0)
2047                 GOTO(out, rc = -EAGAIN);
2048
2049         if (rc > aa->aa_requested_nob) {
2050                 CERROR("%s: unexpected size %d, requested %d\n", obd_name,
2051                        rc, aa->aa_requested_nob);
2052                 RETURN(-EPROTO);
2053         }
2054
2055         if (req->rq_bulk != NULL && rc != req->rq_bulk->bd_nob_transferred) {
2056                 CERROR("%s: unexpected size %d, transferred %d\n", obd_name,
2057                        rc, req->rq_bulk->bd_nob_transferred);
2058                 RETURN(-EPROTO);
2059         }
2060
2061         if (req->rq_bulk == NULL) {
2062                 /* short io */
2063                 int nob, pg_count, i = 0;
2064                 unsigned char *buf;
2065
2066                 CDEBUG(D_CACHE, "Using short io read, size %d\n", rc);
2067                 pg_count = aa->aa_page_count;
2068                 buf = req_capsule_server_sized_get(&req->rq_pill, &RMF_SHORT_IO,
2069                                                    rc);
2070                 nob = rc;
2071                 while (nob > 0 && pg_count > 0) {
2072                         unsigned char *ptr;
2073                         int count = aa->aa_ppga[i]->count > nob ?
2074                                     nob : aa->aa_ppga[i]->count;
2075
2076                         CDEBUG(D_CACHE, "page %p count %d\n",
2077                                aa->aa_ppga[i]->pg, count);
2078                         ptr = kmap_atomic(aa->aa_ppga[i]->pg);
2079                         memcpy(ptr + (aa->aa_ppga[i]->off & ~PAGE_MASK), buf,
2080                                count);
2081                         kunmap_atomic((void *) ptr);
2082
2083                         buf += count;
2084                         nob -= count;
2085                         i++;
2086                         pg_count--;
2087                 }
2088         }
2089
2090         if (rc < aa->aa_requested_nob)
2091                 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
2092
2093         if (body->oa.o_valid & OBD_MD_FLCKSUM) {
2094                 static int cksum_counter;
2095                 u32 server_cksum = body->oa.o_cksum;
2096                 int nob = rc;
2097                 char *via = "";
2098                 char *router = "";
2099                 enum cksum_types cksum_type;
2100                 u32 o_flags = body->oa.o_valid & OBD_MD_FLFLAGS ?
2101                         body->oa.o_flags : 0;
2102
2103                 cksum_type = obd_cksum_type_unpack(o_flags);
2104                 rc = osc_checksum_bulk_rw(obd_name, cksum_type, nob,
2105                                           aa->aa_page_count, aa->aa_ppga,
2106                                           OST_READ, &client_cksum, false);
2107                 if (rc < 0)
2108                         GOTO(out, rc);
2109
2110                 if (req->rq_bulk != NULL &&
2111                     lnet_nid_to_nid4(&peer->nid) != req->rq_bulk->bd_sender) {
2112                         via = " via ";
2113                         router = libcfs_nid2str(req->rq_bulk->bd_sender);
2114                 }
2115
2116                 if (server_cksum != client_cksum) {
2117                         struct ost_body *clbody;
2118                         __u32 client_cksum2;
2119                         u32 page_count = aa->aa_page_count;
2120
2121                         osc_checksum_bulk_rw(obd_name, cksum_type, nob,
2122                                              page_count, aa->aa_ppga,
2123                                              OST_READ, &client_cksum2, true);
2124                         clbody = req_capsule_client_get(&req->rq_pill,
2125                                                         &RMF_OST_BODY);
2126                         if (cli->cl_checksum_dump)
2127                                 dump_all_bulk_pages(&clbody->oa, page_count,
2128                                                     aa->aa_ppga, server_cksum,
2129                                                     client_cksum);
2130
2131                         LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
2132                                            "%s%s%s inode "DFID" object "DOSTID
2133                                            " extent [%llu-%llu], client %x/%x, "
2134                                            "server %x, cksum_type %x\n",
2135                                            obd_name,
2136                                            libcfs_nidstr(&peer->nid),
2137                                            via, router,
2138                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2139                                                 clbody->oa.o_parent_seq : 0ULL,
2140                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2141                                                 clbody->oa.o_parent_oid : 0,
2142                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2143                                                 clbody->oa.o_parent_ver : 0,
2144                                            POSTID(&body->oa.o_oi),
2145                                            aa->aa_ppga[0]->off,
2146                                            aa->aa_ppga[page_count-1]->off +
2147                                            aa->aa_ppga[page_count-1]->count - 1,
2148                                            client_cksum, client_cksum2,
2149                                            server_cksum, cksum_type);
2150                         cksum_counter = 0;
2151                         aa->aa_oa->o_cksum = client_cksum;
2152                         rc = -EAGAIN;
2153                 } else {
2154                         cksum_counter++;
2155                         CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
2156                         rc = 0;
2157                 }
2158         } else if (unlikely(client_cksum)) {
2159                 static int cksum_missed;
2160
2161                 cksum_missed++;
2162                 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
2163                         CERROR("%s: checksum %u requested from %s but not sent\n",
2164                                obd_name, cksum_missed,
2165                                libcfs_nidstr(&peer->nid));
2166         } else {
2167                 rc = 0;
2168         }
2169
2170         /* get the inode from the first cl_page */
2171         clpage = oap2cl_page(brw_page2oap(aa->aa_ppga[0]));
2172         inode = clpage->cp_inode;
2173         if (clpage->cp_type == CPT_TRANSIENT && inode) {
2174                 blockbits = inode->i_blkbits;
2175                 blocksize = 1 << blockbits;
2176         }
2177         if (inode && IS_ENCRYPTED(inode)) {
2178                 int idx;
2179
2180                 if (!llcrypt_has_encryption_key(inode)) {
2181                         CDEBUG(D_SEC, "no enc key for ino %lu\n", inode->i_ino);
2182                         GOTO(out, rc);
2183                 }
2184                 for (idx = 0; idx < aa->aa_page_count; idx++) {
2185                         struct brw_page *brwpg = aa->aa_ppga[idx];
2186                         unsigned int offs = 0;
2187
2188                         while (offs < PAGE_SIZE) {
2189                                 /* do not decrypt if page is all 0s */
2190                                 if (memchr_inv(page_address(brwpg->pg) + offs,
2191                                       0, LUSTRE_ENCRYPTION_UNIT_SIZE) == NULL) {
2192                                         /* if page is empty forward info to
2193                                          * upper layers (ll_io_zero_page) by
2194                                          * clearing PagePrivate2
2195                                          */
2196                                         if (!offs)
2197                                                 ClearPagePrivate2(brwpg->pg);
2198                                         break;
2199                                 }
2200
2201                                 if (blockbits) {
2202                                         /* This is direct IO case. Directly call
2203                                          * decrypt function that takes inode as
2204                                          * input parameter. Page does not need
2205                                          * to be locked.
2206                                          */
2207                                         u64 lblk_num;
2208                                         unsigned int i;
2209
2210                                         clpage =
2211                                                oap2cl_page(brw_page2oap(brwpg));
2212                                         lblk_num =
2213                                                 ((u64)(clpage->cp_page_index) <<
2214                                                 (PAGE_SHIFT - blockbits)) +
2215                                                 (offs >> blockbits);
2216                                         for (i = offs;
2217                                              i < offs +
2218                                                     LUSTRE_ENCRYPTION_UNIT_SIZE;
2219                                              i += blocksize, lblk_num++) {
2220                                                 rc =
2221                                                   llcrypt_decrypt_block_inplace(
2222                                                           inode, brwpg->pg,
2223                                                           blocksize, i,
2224                                                           lblk_num);
2225                                                 if (rc)
2226                                                         break;
2227                                         }
2228                                 } else {
2229                                         rc = llcrypt_decrypt_pagecache_blocks(
2230                                                 brwpg->pg,
2231                                                 LUSTRE_ENCRYPTION_UNIT_SIZE,
2232                                                 offs);
2233                                 }
2234                                 if (rc)
2235                                         GOTO(out, rc);
2236
2237                                 offs += LUSTRE_ENCRYPTION_UNIT_SIZE;
2238                         }
2239                 }
2240         }
2241
2242 out:
2243         if (rc >= 0)
2244                 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
2245                                      aa->aa_oa, &body->oa);
2246
2247         RETURN(rc);
2248 }
2249
2250 static int osc_brw_redo_request(struct ptlrpc_request *request,
2251                                 struct osc_brw_async_args *aa, int rc)
2252 {
2253         struct ptlrpc_request *new_req;
2254         struct osc_brw_async_args *new_aa;
2255         struct osc_async_page *oap;
2256         ENTRY;
2257
2258         /* The below message is checked in replay-ost-single.sh test_8ae*/
2259         DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
2260                   "redo for recoverable error %d", rc);
2261
2262         rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
2263                                 OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
2264                                   aa->aa_cli, aa->aa_oa, aa->aa_page_count,
2265                                   aa->aa_ppga, &new_req, 1);
2266         if (rc)
2267                 RETURN(rc);
2268
2269         list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2270                 if (oap->oap_request != NULL) {
2271                         LASSERTF(request == oap->oap_request,
2272                                  "request %p != oap_request %p\n",
2273                                  request, oap->oap_request);
2274                 }
2275         }
2276         /*
2277          * New request takes over pga and oaps from old request.
2278          * Note that copying a list_head doesn't work, need to move it...
2279          */
2280         aa->aa_resends++;
2281         new_req->rq_interpret_reply = request->rq_interpret_reply;
2282         new_req->rq_async_args = request->rq_async_args;
2283         new_req->rq_commit_cb = request->rq_commit_cb;
2284         /* cap resend delay to the current request timeout, this is similar to
2285          * what ptlrpc does (see after_reply()) */
2286         if (aa->aa_resends > new_req->rq_timeout)
2287                 new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
2288         else
2289                 new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
2290         new_req->rq_generation_set = 1;
2291         new_req->rq_import_generation = request->rq_import_generation;
2292
2293         new_aa = ptlrpc_req_async_args(new_aa, new_req);
2294
2295         INIT_LIST_HEAD(&new_aa->aa_oaps);
2296         list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
2297         INIT_LIST_HEAD(&new_aa->aa_exts);
2298         list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
2299         new_aa->aa_resends = aa->aa_resends;
2300
2301         list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
2302                 if (oap->oap_request) {
2303                         ptlrpc_req_finished(oap->oap_request);
2304                         oap->oap_request = ptlrpc_request_addref(new_req);
2305                 }
2306         }
2307
2308         /* XXX: This code will run into problem if we're going to support
2309          * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
2310          * and wait for all of them to be finished. We should inherit request
2311          * set from old request. */
2312         ptlrpcd_add_req(new_req);
2313
2314         DEBUG_REQ(D_INFO, new_req, "new request");
2315         RETURN(0);
2316 }
2317
2318 /*
2319  * ugh, we want disk allocation on the target to happen in offset order.  we'll
2320  * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
2321  * fine for our small page arrays and doesn't require allocation.  its an
2322  * insertion sort that swaps elements that are strides apart, shrinking the
2323  * stride down until its '1' and the array is sorted.
2324  */
2325 static void sort_brw_pages(struct brw_page **array, int num)
2326 {
2327         int stride, i, j;
2328         struct brw_page *tmp;
2329
2330         if (num == 1)
2331                 return;
2332         for (stride = 1; stride < num ; stride = (stride * 3) + 1)
2333                 ;
2334
2335         do {
2336                 stride /= 3;
2337                 for (i = stride ; i < num ; i++) {
2338                         tmp = array[i];
2339                         j = i;
2340                         while (j >= stride && array[j - stride]->off > tmp->off) {
2341                                 array[j] = array[j - stride];
2342                                 j -= stride;
2343                         }
2344                         array[j] = tmp;
2345                 }
2346         } while (stride > 1);
2347 }
2348
2349 static void osc_release_ppga(struct brw_page **ppga, size_t count)
2350 {
2351         LASSERT(ppga != NULL);
2352         OBD_FREE_PTR_ARRAY_LARGE(ppga, count);
2353 }
2354
2355 static int brw_interpret(const struct lu_env *env,
2356                          struct ptlrpc_request *req, void *args, int rc)
2357 {
2358         struct osc_brw_async_args *aa = args;
2359         struct osc_extent *ext;
2360         struct osc_extent *tmp;
2361         struct client_obd *cli = aa->aa_cli;
2362         unsigned long transferred = 0;
2363
2364         ENTRY;
2365
2366         rc = osc_brw_fini_request(req, rc);
2367         CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
2368
2369         /* restore clear text pages */
2370         osc_release_bounce_pages(aa->aa_ppga, aa->aa_page_count);
2371
2372         /*
2373          * When server returns -EINPROGRESS, client should always retry
2374          * regardless of the number of times the bulk was resent already.
2375          */
2376         if (osc_recoverable_error(rc) && !req->rq_no_delay) {
2377                 if (req->rq_import_generation !=
2378                     req->rq_import->imp_generation) {
2379                         CDEBUG(D_HA, "%s: resend cross eviction for object: "
2380                                ""DOSTID", rc = %d.\n",
2381                                req->rq_import->imp_obd->obd_name,
2382                                POSTID(&aa->aa_oa->o_oi), rc);
2383                 } else if (rc == -EINPROGRESS ||
2384                            client_should_resend(aa->aa_resends, aa->aa_cli)) {
2385                         rc = osc_brw_redo_request(req, aa, rc);
2386                 } else {
2387                         CERROR("%s: too many resent retries for object: "
2388                                "%llu:%llu, rc = %d.\n",
2389                                req->rq_import->imp_obd->obd_name,
2390                                POSTID(&aa->aa_oa->o_oi), rc);
2391                 }
2392
2393                 if (rc == 0)
2394                         RETURN(0);
2395                 else if (rc == -EAGAIN || rc == -EINPROGRESS)
2396                         rc = -EIO;
2397         }
2398
2399         if (rc == 0) {
2400                 struct obdo *oa = aa->aa_oa;
2401                 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
2402                 unsigned long valid = 0;
2403                 struct cl_object *obj;
2404                 struct osc_async_page *last;
2405
2406                 last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
2407                 obj = osc2cl(last->oap_obj);
2408
2409                 cl_object_attr_lock(obj);
2410                 if (oa->o_valid & OBD_MD_FLBLOCKS) {
2411                         attr->cat_blocks = oa->o_blocks;
2412                         valid |= CAT_BLOCKS;
2413                 }
2414                 if (oa->o_valid & OBD_MD_FLMTIME) {
2415                         attr->cat_mtime = oa->o_mtime;
2416                         valid |= CAT_MTIME;
2417                 }
2418                 if (oa->o_valid & OBD_MD_FLATIME) {
2419                         attr->cat_atime = oa->o_atime;
2420                         valid |= CAT_ATIME;
2421                 }
2422                 if (oa->o_valid & OBD_MD_FLCTIME) {
2423                         attr->cat_ctime = oa->o_ctime;
2424                         valid |= CAT_CTIME;
2425                 }
2426
2427                 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
2428                         struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
2429                         loff_t last_off = last->oap_count + last->oap_obj_off +
2430                                 last->oap_page_off;
2431
2432                         /* Change file size if this is an out of quota or
2433                          * direct IO write and it extends the file size */
2434                         if (loi->loi_lvb.lvb_size < last_off) {
2435                                 attr->cat_size = last_off;
2436                                 valid |= CAT_SIZE;
2437                         }
2438                         /* Extend KMS if it's not a lockless write */
2439                         if (loi->loi_kms < last_off &&
2440                             oap2osc_page(last)->ops_srvlock == 0) {
2441                                 attr->cat_kms = last_off;
2442                                 valid |= CAT_KMS;
2443                         }
2444                 }
2445
2446                 if (valid != 0)
2447                         cl_object_attr_update(env, obj, attr, valid);
2448                 cl_object_attr_unlock(obj);
2449         }
2450         OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
2451         aa->aa_oa = NULL;
2452
2453         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0)
2454                 osc_inc_unstable_pages(req);
2455
2456         list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
2457                 list_del_init(&ext->oe_link);
2458                 osc_extent_finish(env, ext, 1,
2459                                   rc && req->rq_no_delay ? -EAGAIN : rc);
2460         }
2461         LASSERT(list_empty(&aa->aa_exts));
2462         LASSERT(list_empty(&aa->aa_oaps));
2463
2464         transferred = (req->rq_bulk == NULL ? /* short io */
2465                        aa->aa_requested_nob :
2466                        req->rq_bulk->bd_nob_transferred);
2467
2468         osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
2469         ptlrpc_lprocfs_brw(req, transferred);
2470
2471         spin_lock(&cli->cl_loi_list_lock);
2472         /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
2473          * is called so we know whether to go to sync BRWs or wait for more
2474          * RPCs to complete */
2475         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
2476                 cli->cl_w_in_flight--;
2477         else
2478                 cli->cl_r_in_flight--;
2479         osc_wake_cache_waiters(cli);
2480         spin_unlock(&cli->cl_loi_list_lock);
2481
2482         osc_io_unplug(env, cli, NULL);
2483         RETURN(rc);
2484 }
2485
2486 static void brw_commit(struct ptlrpc_request *req)
2487 {
2488         /* If osc_inc_unstable_pages (via osc_extent_finish) races with
2489          * this called via the rq_commit_cb, I need to ensure
2490          * osc_dec_unstable_pages is still called. Otherwise unstable
2491          * pages may be leaked. */
2492         spin_lock(&req->rq_lock);
2493         if (likely(req->rq_unstable)) {
2494                 req->rq_unstable = 0;
2495                 spin_unlock(&req->rq_lock);
2496
2497                 osc_dec_unstable_pages(req);
2498         } else {
2499                 req->rq_committed = 1;
2500                 spin_unlock(&req->rq_lock);
2501         }
2502 }
2503
2504 /**
2505  * Build an RPC by the list of extent @ext_list. The caller must ensure
2506  * that the total pages in this list are NOT over max pages per RPC.
2507  * Extents in the list must be in OES_RPC state.
2508  */
2509 int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
2510                   struct list_head *ext_list, int cmd)
2511 {
2512         struct ptlrpc_request           *req = NULL;
2513         struct osc_extent               *ext;
2514         struct brw_page                 **pga = NULL;
2515         struct osc_brw_async_args       *aa = NULL;
2516         struct obdo                     *oa = NULL;
2517         struct osc_async_page           *oap;
2518         struct osc_object               *obj = NULL;
2519         struct cl_req_attr              *crattr = NULL;
2520         loff_t                          starting_offset = OBD_OBJECT_EOF;
2521         loff_t                          ending_offset = 0;
2522         /* '1' for consistency with code that checks !mpflag to restore */
2523         int mpflag = 1;
2524         int                             mem_tight = 0;
2525         int                             page_count = 0;
2526         bool                            soft_sync = false;
2527         bool                            ndelay = false;
2528         int                             i;
2529         int                             grant = 0;
2530         int                             rc;
2531         __u32                           layout_version = 0;
2532         LIST_HEAD(rpc_list);
2533         struct ost_body                 *body;
2534         ENTRY;
2535         LASSERT(!list_empty(ext_list));
2536
2537         /* add pages into rpc_list to build BRW rpc */
2538         list_for_each_entry(ext, ext_list, oe_link) {
2539                 LASSERT(ext->oe_state == OES_RPC);
2540                 mem_tight |= ext->oe_memalloc;
2541                 grant += ext->oe_grants;
2542                 page_count += ext->oe_nr_pages;
2543                 layout_version = max(layout_version, ext->oe_layout_version);
2544                 if (obj == NULL)
2545                         obj = ext->oe_obj;
2546         }
2547
2548         soft_sync = osc_over_unstable_soft_limit(cli);
2549         if (mem_tight)
2550                 mpflag = memalloc_noreclaim_save();
2551
2552         OBD_ALLOC_PTR_ARRAY_LARGE(pga, page_count);
2553         if (pga == NULL)
2554                 GOTO(out, rc = -ENOMEM);
2555
2556         OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
2557         if (oa == NULL)
2558                 GOTO(out, rc = -ENOMEM);
2559
2560         i = 0;
2561         list_for_each_entry(ext, ext_list, oe_link) {
2562                 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
2563                         if (mem_tight)
2564                                 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
2565                         if (soft_sync)
2566                                 oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
2567                         pga[i] = &oap->oap_brw_page;
2568                         pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
2569                         i++;
2570
2571                         list_add_tail(&oap->oap_rpc_item, &rpc_list);
2572                         if (starting_offset == OBD_OBJECT_EOF ||
2573                             starting_offset > oap->oap_obj_off)
2574                                 starting_offset = oap->oap_obj_off;
2575                         else
2576                                 LASSERT(oap->oap_page_off == 0);
2577                         if (ending_offset < oap->oap_obj_off + oap->oap_count)
2578                                 ending_offset = oap->oap_obj_off +
2579                                                 oap->oap_count;
2580                         else
2581                                 LASSERT(oap->oap_page_off + oap->oap_count ==
2582                                         PAGE_SIZE);
2583                 }
2584                 if (ext->oe_ndelay)
2585                         ndelay = true;
2586         }
2587
2588         /* first page in the list */
2589         oap = list_first_entry(&rpc_list, typeof(*oap), oap_rpc_item);
2590
2591         crattr = &osc_env_info(env)->oti_req_attr;
2592         memset(crattr, 0, sizeof(*crattr));
2593         crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
2594         crattr->cra_flags = ~0ULL;
2595         crattr->cra_page = oap2cl_page(oap);
2596         crattr->cra_oa = oa;
2597         cl_req_attr_set(env, osc2cl(obj), crattr);
2598
2599         if (cmd == OBD_BRW_WRITE) {
2600                 oa->o_grant_used = grant;
2601                 if (layout_version > 0) {
2602                         CDEBUG(D_LAYOUT, DFID": write with layout version %u\n",
2603                                PFID(&oa->o_oi.oi_fid), layout_version);
2604
2605                         oa->o_layout_version = layout_version;
2606                         oa->o_valid |= OBD_MD_LAYOUT_VERSION;
2607                 }
2608         }
2609
2610         sort_brw_pages(pga, page_count);
2611         rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 0);
2612         if (rc != 0) {
2613                 CERROR("prep_req failed: %d\n", rc);
2614                 GOTO(out, rc);
2615         }
2616
2617         req->rq_commit_cb = brw_commit;
2618         req->rq_interpret_reply = brw_interpret;
2619         req->rq_memalloc = mem_tight != 0;
2620         oap->oap_request = ptlrpc_request_addref(req);
2621         if (ndelay) {
2622                 req->rq_no_resend = req->rq_no_delay = 1;
2623                 /* probably set a shorter timeout value.
2624                  * to handle ETIMEDOUT in brw_interpret() correctly. */
2625                 /* lustre_msg_set_timeout(req, req->rq_timeout / 2); */
2626         }
2627
2628         /* Need to update the timestamps after the request is built in case
2629          * we race with setattr (locally or in queue at OST).  If OST gets
2630          * later setattr before earlier BRW (as determined by the request xid),
2631          * the OST will not use BRW timestamps.  Sadly, there is no obvious
2632          * way to do this in a single call.  bug 10150 */
2633         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
2634         crattr->cra_oa = &body->oa;
2635         crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME;
2636         cl_req_attr_set(env, osc2cl(obj), crattr);
2637         lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
2638
2639         aa = ptlrpc_req_async_args(aa, req);
2640         INIT_LIST_HEAD(&aa->aa_oaps);
2641         list_splice_init(&rpc_list, &aa->aa_oaps);
2642         INIT_LIST_HEAD(&aa->aa_exts);
2643         list_splice_init(ext_list, &aa->aa_exts);
2644
2645         spin_lock(&cli->cl_loi_list_lock);
2646         starting_offset >>= PAGE_SHIFT;
2647         ending_offset >>= PAGE_SHIFT;
2648         if (cmd == OBD_BRW_READ) {
2649                 cli->cl_r_in_flight++;
2650                 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2651                 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2652                 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2653                                       starting_offset + 1);
2654         } else {
2655                 cli->cl_w_in_flight++;
2656                 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2657                 lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
2658                 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2659                                       starting_offset + 1);
2660         }
2661         spin_unlock(&cli->cl_loi_list_lock);
2662
2663         DEBUG_REQ(D_INODE, req, "%d pages, aa %p, now %ur/%uw in flight",
2664                   page_count, aa, cli->cl_r_in_flight, cli->cl_w_in_flight);
2665         if (libcfs_debug & D_IOTRACE) {
2666                 struct lu_fid fid;
2667
2668                 fid.f_seq = crattr->cra_oa->o_parent_seq;
2669                 fid.f_oid = crattr->cra_oa->o_parent_oid;
2670                 fid.f_ver = crattr->cra_oa->o_parent_ver;
2671                 CDEBUG(D_IOTRACE,
2672                        DFID": %d %s pages, start %lld, end %lld, now %ur/%uw in flight\n",
2673                        PFID(&fid), page_count,
2674                        cmd == OBD_BRW_READ ? "read" : "write", starting_offset,
2675                        ending_offset, cli->cl_r_in_flight, cli->cl_w_in_flight);
2676         }
2677         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
2678
2679         ptlrpcd_add_req(req);
2680         rc = 0;
2681         EXIT;
2682
2683 out:
2684         if (mem_tight)
2685                 memalloc_noreclaim_restore(mpflag);
2686
2687         if (rc != 0) {
2688                 LASSERT(req == NULL);
2689
2690                 if (oa)
2691                         OBD_SLAB_FREE_PTR(oa, osc_obdo_kmem);
2692                 if (pga) {
2693                         osc_release_bounce_pages(pga, page_count);
2694                         osc_release_ppga(pga, page_count);
2695                 }
2696                 /* this should happen rarely and is pretty bad, it makes the
2697                  * pending list not follow the dirty order
2698                  */
2699                 while ((ext = list_first_entry_or_null(ext_list,
2700                                                        struct osc_extent,
2701                                                        oe_link)) != NULL) {
2702                         list_del_init(&ext->oe_link);
2703                         osc_extent_finish(env, ext, 0, rc);
2704                 }
2705         }
2706         RETURN(rc);
2707 }
2708
2709 /* This is to refresh our lock in face of no RPCs. */
2710 void osc_send_empty_rpc(struct osc_object *osc, pgoff_t start)
2711 {
2712         struct ptlrpc_request *req;
2713         struct obdo oa;
2714         struct brw_page bpg = { .off = start, .count = 1};
2715         struct brw_page *pga = &bpg;
2716         int rc;
2717
2718         memset(&oa, 0, sizeof(oa));
2719         oa.o_oi = osc->oo_oinfo->loi_oi;
2720         oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLFLAGS;
2721         /* For updated servers - don't do a read */
2722         oa.o_flags = OBD_FL_NORPC;
2723
2724         rc = osc_brw_prep_request(OBD_BRW_READ, osc_cli(osc), &oa, 1, &pga,
2725                                   &req, 0);
2726
2727         /* If we succeeded we ship it off, if not there's no point in doing
2728          * anything. Also no resends.
2729          * No interpret callback, no commit callback.
2730          */
2731         if (!rc) {
2732                 req->rq_no_resend = 1;
2733                 ptlrpcd_add_req(req);
2734         }
2735 }
2736
2737 static int osc_set_lock_data(struct ldlm_lock *lock, void *data)
2738 {
2739         int set = 0;
2740
2741         LASSERT(lock != NULL);
2742
2743         lock_res_and_lock(lock);
2744
2745         if (lock->l_ast_data == NULL)
2746                 lock->l_ast_data = data;
2747         if (lock->l_ast_data == data)
2748                 set = 1;
2749
2750         unlock_res_and_lock(lock);
2751
2752         return set;
2753 }
2754
2755 int osc_enqueue_fini(struct ptlrpc_request *req, osc_enqueue_upcall_f upcall,
2756                      void *cookie, struct lustre_handle *lockh,
2757                      enum ldlm_mode mode, __u64 *flags, bool speculative,
2758                      int errcode)
2759 {
2760         bool intent = *flags & LDLM_FL_HAS_INTENT;
2761         int rc;
2762         ENTRY;
2763
2764         /* The request was created before ldlm_cli_enqueue call. */
2765         if (intent && errcode == ELDLM_LOCK_ABORTED) {
2766                 struct ldlm_reply *rep;
2767
2768                 rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
2769                 LASSERT(rep != NULL);
2770
2771                 rep->lock_policy_res1 =
2772                         ptlrpc_status_ntoh(rep->lock_policy_res1);
2773                 if (rep->lock_policy_res1)
2774                         errcode = rep->lock_policy_res1;
2775                 if (!speculative)
2776                         *flags |= LDLM_FL_LVB_READY;
2777         } else if (errcode == ELDLM_OK) {
2778                 *flags |= LDLM_FL_LVB_READY;
2779         }
2780
2781         /* Call the update callback. */
2782         rc = (*upcall)(cookie, lockh, errcode);
2783
2784         /* release the reference taken in ldlm_cli_enqueue() */
2785         if (errcode == ELDLM_LOCK_MATCHED)
2786                 errcode = ELDLM_OK;
2787         if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
2788                 ldlm_lock_decref(lockh, mode);
2789
2790         RETURN(rc);
2791 }
2792
2793 int osc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
2794                           void *args, int rc)
2795 {
2796         struct osc_enqueue_args *aa = args;
2797         struct ldlm_lock *lock;
2798         struct lustre_handle *lockh = &aa->oa_lockh;
2799         enum ldlm_mode mode = aa->oa_mode;
2800         struct ost_lvb *lvb = aa->oa_lvb;
2801         __u32 lvb_len = sizeof(*lvb);
2802         __u64 flags = 0;
2803         struct ldlm_enqueue_info einfo = {
2804                 .ei_type = aa->oa_type,
2805                 .ei_mode = mode,
2806         };
2807
2808         ENTRY;
2809
2810         /* ldlm_cli_enqueue is holding a reference on the lock, so it must
2811          * be valid. */
2812         lock = ldlm_handle2lock(lockh);
2813         LASSERTF(lock != NULL,
2814                  "lockh %#llx, req %p, aa %p - client evicted?\n",
2815                  lockh->cookie, req, aa);
2816
2817         /* Take an additional reference so that a blocking AST that
2818          * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
2819          * to arrive after an upcall has been executed by
2820          * osc_enqueue_fini(). */
2821         ldlm_lock_addref(lockh, mode);
2822
2823         /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
2824         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
2825
2826         /* Let CP AST to grant the lock first. */
2827         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
2828
2829         if (aa->oa_speculative) {
2830                 LASSERT(aa->oa_lvb == NULL);
2831                 LASSERT(aa->oa_flags == NULL);
2832                 aa->oa_flags = &flags;
2833         }
2834
2835         /* Complete obtaining the lock procedure. */
2836         rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, &einfo, 1, aa->oa_flags,
2837                                    lvb, lvb_len, lockh, rc, false);
2838         /* Complete osc stuff. */
2839         rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
2840                               aa->oa_flags, aa->oa_speculative, rc);
2841
2842         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
2843
2844         ldlm_lock_decref(lockh, mode);
2845         LDLM_LOCK_PUT(lock);
2846         RETURN(rc);
2847 }
2848
2849 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
2850  * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
2851  * other synchronous requests, however keeping some locks and trying to obtain
2852  * others may take a considerable amount of time in a case of ost failure; and
2853  * when other sync requests do not get released lock from a client, the client
2854  * is evicted from the cluster -- such scenarious make the life difficult, so
2855  * release locks just after they are obtained. */
2856 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2857                      __u64 *flags, union ldlm_policy_data *policy,
2858                      struct ost_lvb *lvb, osc_enqueue_upcall_f upcall,
2859                      void *cookie, struct ldlm_enqueue_info *einfo,
2860                      struct ptlrpc_request_set *rqset, int async,
2861                      bool speculative)
2862 {
2863         struct obd_device *obd = exp->exp_obd;
2864         struct lustre_handle lockh = { 0 };
2865         struct ptlrpc_request *req = NULL;
2866         int intent = *flags & LDLM_FL_HAS_INTENT;
2867         __u64 match_flags = *flags;
2868         enum ldlm_mode mode;
2869         int rc;
2870         ENTRY;
2871
2872         /* Filesystem lock extents are extended to page boundaries so that
2873          * dealing with the page cache is a little smoother.  */
2874         policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2875         policy->l_extent.end |= ~PAGE_MASK;
2876
2877         /* Next, search for already existing extent locks that will cover us */
2878         /* If we're trying to read, we also search for an existing PW lock.  The
2879          * VFS and page cache already protect us locally, so lots of readers/
2880          * writers can share a single PW lock.
2881          *
2882          * There are problems with conversion deadlocks, so instead of
2883          * converting a read lock to a write lock, we'll just enqueue a new
2884          * one.
2885          *
2886          * At some point we should cancel the read lock instead of making them
2887          * send us a blocking callback, but there are problems with canceling
2888          * locks out from other users right now, too. */
2889         mode = einfo->ei_mode;
2890         if (einfo->ei_mode == LCK_PR)
2891                 mode |= LCK_PW;
2892         /* Normal lock requests must wait for the LVB to be ready before
2893          * matching a lock; speculative lock requests do not need to,
2894          * because they will not actually use the lock. */
2895         if (!speculative)
2896                 match_flags |= LDLM_FL_LVB_READY;
2897         if (intent != 0)
2898                 match_flags |= LDLM_FL_BLOCK_GRANTED;
2899         mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
2900                                einfo->ei_type, policy, mode, &lockh);
2901         if (mode) {
2902                 struct ldlm_lock *matched;
2903
2904                 if (*flags & LDLM_FL_TEST_LOCK)
2905                         RETURN(ELDLM_OK);
2906
2907                 matched = ldlm_handle2lock(&lockh);
2908                 if (speculative) {
2909                         /* This DLM lock request is speculative, and does not
2910                          * have an associated IO request. Therefore if there
2911                          * is already a DLM lock, it wll just inform the
2912                          * caller to cancel the request for this stripe.*/
2913                         lock_res_and_lock(matched);
2914                         if (ldlm_extent_equal(&policy->l_extent,
2915                             &matched->l_policy_data.l_extent))
2916                                 rc = -EEXIST;
2917                         else
2918                                 rc = -ECANCELED;
2919                         unlock_res_and_lock(matched);
2920
2921                         ldlm_lock_decref(&lockh, mode);
2922                         LDLM_LOCK_PUT(matched);
2923                         RETURN(rc);
2924                 } else if (osc_set_lock_data(matched, einfo->ei_cbdata)) {
2925                         *flags |= LDLM_FL_LVB_READY;
2926
2927                         /* We already have a lock, and it's referenced. */
2928                         (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
2929
2930                         ldlm_lock_decref(&lockh, mode);
2931                         LDLM_LOCK_PUT(matched);
2932                         RETURN(ELDLM_OK);
2933                 } else {
2934                         ldlm_lock_decref(&lockh, mode);
2935                         LDLM_LOCK_PUT(matched);
2936                 }
2937         }
2938
2939         if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
2940                 RETURN(-ENOLCK);
2941
2942         /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
2943         *flags &= ~LDLM_FL_BLOCK_GRANTED;
2944
2945         rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
2946                               sizeof(*lvb), LVB_T_OST, &lockh, async);
2947         if (async) {
2948                 if (!rc) {
2949                         struct osc_enqueue_args *aa;
2950                         aa = ptlrpc_req_async_args(aa, req);
2951                         aa->oa_exp         = exp;
2952                         aa->oa_mode        = einfo->ei_mode;
2953                         aa->oa_type        = einfo->ei_type;
2954                         lustre_handle_copy(&aa->oa_lockh, &lockh);
2955                         aa->oa_upcall      = upcall;
2956                         aa->oa_cookie      = cookie;
2957                         aa->oa_speculative = speculative;
2958                         if (!speculative) {
2959                                 aa->oa_flags  = flags;
2960                                 aa->oa_lvb    = lvb;
2961                         } else {
2962                                 /* speculative locks are essentially to enqueue
2963                                  * a DLM lock  in advance, so we don't care
2964                                  * about the result of the enqueue. */
2965                                 aa->oa_lvb    = NULL;
2966                                 aa->oa_flags  = NULL;
2967                         }
2968
2969                         req->rq_interpret_reply = osc_enqueue_interpret;
2970                         ptlrpc_set_add_req(rqset, req);
2971                 }
2972                 RETURN(rc);
2973         }
2974
2975         rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
2976                               flags, speculative, rc);
2977
2978         RETURN(rc);
2979 }
2980
2981 int osc_match_base(const struct lu_env *env, struct obd_export *exp,
2982                    struct ldlm_res_id *res_id, enum ldlm_type type,
2983                    union ldlm_policy_data *policy, enum ldlm_mode mode,
2984                    __u64 *flags, struct osc_object *obj,
2985                    struct lustre_handle *lockh, enum ldlm_match_flags match_flags)
2986 {
2987         struct obd_device *obd = exp->exp_obd;
2988         __u64 lflags = *flags;
2989         enum ldlm_mode rc;
2990         ENTRY;
2991
2992         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
2993                 RETURN(-EIO);
2994
2995         /* Filesystem lock extents are extended to page boundaries so that
2996          * dealing with the page cache is a little smoother */
2997         policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2998         policy->l_extent.end |= ~PAGE_MASK;
2999
3000         /* Next, search for already existing extent locks that will cover us */
3001         rc = ldlm_lock_match_with_skip(obd->obd_namespace, lflags, 0,
3002                                         res_id, type, policy, mode, lockh,
3003                                         match_flags);
3004         if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
3005                 RETURN(rc);
3006
3007         if (obj != NULL) {
3008                 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
3009
3010                 LASSERT(lock != NULL);
3011                 if (osc_set_lock_data(lock, obj)) {
3012                         lock_res_and_lock(lock);
3013                         if (!ldlm_is_lvb_cached(lock)) {
3014                                 LASSERT(lock->l_ast_data == obj);
3015                                 osc_lock_lvb_update(env, obj, lock, NULL);
3016                                 ldlm_set_lvb_cached(lock);
3017                         }
3018                         unlock_res_and_lock(lock);
3019                 } else {
3020                         ldlm_lock_decref(lockh, rc);
3021                         rc = 0;
3022                 }
3023                 LDLM_LOCK_PUT(lock);
3024         }
3025         RETURN(rc);
3026 }
3027
3028 static int osc_statfs_interpret(const struct lu_env *env,
3029                                 struct ptlrpc_request *req, void *args, int rc)
3030 {
3031         struct osc_async_args *aa = args;
3032         struct obd_statfs *msfs;
3033
3034         ENTRY;
3035         if (rc == -EBADR)
3036                 /*
3037                  * The request has in fact never been sent due to issues at
3038                  * a higher level (LOV).  Exit immediately since the caller
3039                  * is aware of the problem and takes care of the clean up.
3040                  */
3041                 RETURN(rc);
3042
3043         if ((rc == -ENOTCONN || rc == -EAGAIN) &&
3044             (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
3045                 GOTO(out, rc = 0);
3046
3047         if (rc != 0)
3048                 GOTO(out, rc);
3049
3050         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3051         if (msfs == NULL)
3052                 GOTO(out, rc = -EPROTO);
3053
3054         *aa->aa_oi->oi_osfs = *msfs;
3055 out:
3056         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
3057
3058         RETURN(rc);
3059 }
3060
3061 static int osc_statfs_async(struct obd_export *exp,
3062                             struct obd_info *oinfo, time64_t max_age,
3063                             struct ptlrpc_request_set *rqset)
3064 {
3065         struct obd_device     *obd = class_exp2obd(exp);
3066         struct ptlrpc_request *req;
3067         struct osc_async_args *aa;
3068         int rc;
3069         ENTRY;
3070
3071         if (obd->obd_osfs_age >= max_age) {
3072                 CDEBUG(D_SUPER,
3073                        "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n",
3074                        obd->obd_name, &obd->obd_osfs,
3075                        obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
3076                        obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
3077                 spin_lock(&obd->obd_osfs_lock);
3078                 memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
3079                 spin_unlock(&obd->obd_osfs_lock);
3080                 oinfo->oi_flags |= OBD_STATFS_FROM_CACHE;
3081                 if (oinfo->oi_cb_up)
3082                         oinfo->oi_cb_up(oinfo, 0);
3083
3084                 RETURN(0);
3085         }
3086
3087         /* We could possibly pass max_age in the request (as an absolute
3088          * timestamp or a "seconds.usec ago") so the target can avoid doing
3089          * extra calls into the filesystem if that isn't necessary (e.g.
3090          * during mount that would help a bit).  Having relative timestamps
3091          * is not so great if request processing is slow, while absolute
3092          * timestamps are not ideal because they need time synchronization. */
3093         req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
3094         if (req == NULL)
3095                 RETURN(-ENOMEM);
3096
3097         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3098         if (rc) {
3099                 ptlrpc_request_free(req);
3100                 RETURN(rc);
3101         }
3102         ptlrpc_request_set_replen(req);
3103         req->rq_request_portal = OST_CREATE_PORTAL;
3104         ptlrpc_at_set_req_timeout(req);
3105
3106         if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
3107                 /* procfs requests not want stat in wait for avoid deadlock */
3108                 req->rq_no_resend = 1;
3109                 req->rq_no_delay = 1;
3110         }
3111
3112         req->rq_interpret_reply = osc_statfs_interpret;
3113         aa = ptlrpc_req_async_args(aa, req);
3114         aa->aa_oi = oinfo;
3115
3116         ptlrpc_set_add_req(rqset, req);
3117         RETURN(0);
3118 }
3119
3120 static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
3121                       struct obd_statfs *osfs, time64_t max_age, __u32 flags)
3122 {
3123         struct obd_device     *obd = class_exp2obd(exp);
3124         struct obd_statfs     *msfs;
3125         struct ptlrpc_request *req;
3126         struct obd_import     *imp, *imp0;
3127         int rc;
3128         ENTRY;
3129
3130         /*Since the request might also come from lprocfs, so we need
3131          *sync this with client_disconnect_export Bug15684
3132          */
3133         with_imp_locked(obd, imp0, rc)
3134                 imp = class_import_get(imp0);
3135         if (rc)
3136                 RETURN(rc);
3137
3138         /* We could possibly pass max_age in the request (as an absolute
3139          * timestamp or a "seconds.usec ago") so the target can avoid doing
3140          * extra calls into the filesystem if that isn't necessary (e.g.
3141          * during mount that would help a bit).  Having relative timestamps
3142          * is not so great if request processing is slow, while absolute
3143          * timestamps are not ideal because they need time synchronization. */
3144         req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
3145
3146         class_import_put(imp);
3147
3148         if (req == NULL)
3149                 RETURN(-ENOMEM);
3150
3151         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3152         if (rc) {
3153                 ptlrpc_request_free(req);
3154                 RETURN(rc);
3155         }
3156         ptlrpc_request_set_replen(req);
3157         req->rq_request_portal = OST_CREATE_PORTAL;
3158         ptlrpc_at_set_req_timeout(req);
3159
3160         if (flags & OBD_STATFS_NODELAY) {
3161                 /* procfs requests not want stat in wait for avoid deadlock */
3162                 req->rq_no_resend = 1;
3163                 req->rq_no_delay = 1;
3164         }
3165
3166         rc = ptlrpc_queue_wait(req);
3167         if (rc)
3168                 GOTO(out, rc);
3169
3170         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3171         if (msfs == NULL)
3172                 GOTO(out, rc = -EPROTO);
3173
3174         *osfs = *msfs;
3175
3176         EXIT;
3177 out:
3178         ptlrpc_req_finished(req);
3179         return rc;
3180 }
3181
3182 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
3183                          void *karg, void __user *uarg)
3184 {
3185         struct obd_device *obd = exp->exp_obd;
3186         struct obd_ioctl_data *data = karg;
3187         int rc = 0;
3188
3189         ENTRY;
3190         if (!try_module_get(THIS_MODULE)) {
3191                 CERROR("%s: cannot get module '%s'\n", obd->obd_name,
3192                        module_name(THIS_MODULE));
3193                 return -EINVAL;
3194         }
3195         switch (cmd) {
3196         case OBD_IOC_CLIENT_RECOVER:
3197                 rc = ptlrpc_recover_import(obd->u.cli.cl_import,
3198                                            data->ioc_inlbuf1, 0);
3199                 if (rc > 0)
3200                         rc = 0;
3201                 break;
3202         case OBD_IOC_GETATTR:
3203                 rc = obd_getattr(NULL, exp, &data->ioc_obdo1);
3204                 break;
3205         case IOC_OSC_SET_ACTIVE:
3206                 rc = ptlrpc_set_import_active(obd->u.cli.cl_import,
3207                                               data->ioc_offset);
3208                 break;
3209         default:
3210                 rc = -ENOTTY;
3211                 CDEBUG(D_INODE, "%s: unrecognised ioctl %#x by %s: rc = %d\n",
3212                        obd->obd_name, cmd, current->comm, rc);
3213                 break;
3214         }
3215
3216         module_put(THIS_MODULE);
3217         return rc;
3218 }
3219
3220 int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
3221                        u32 keylen, void *key, u32 vallen, void *val,
3222                        struct ptlrpc_request_set *set)
3223 {
3224         struct ptlrpc_request *req;
3225         struct obd_device     *obd = exp->exp_obd;
3226         struct obd_import     *imp = class_exp2cliimp(exp);
3227         char                  *tmp;
3228         int                    rc;
3229         ENTRY;
3230
3231         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
3232
3233         if (KEY_IS(KEY_CHECKSUM)) {
3234                 if (vallen != sizeof(int))
3235                         RETURN(-EINVAL);
3236                 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
3237                 RETURN(0);
3238         }
3239
3240         if (KEY_IS(KEY_SPTLRPC_CONF)) {
3241                 sptlrpc_conf_client_adapt(obd);
3242                 RETURN(0);
3243         }
3244
3245         if (KEY_IS(KEY_FLUSH_CTX)) {
3246                 sptlrpc_import_flush_my_ctx(imp);
3247                 RETURN(0);
3248         }
3249
3250         if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
3251                 struct client_obd *cli = &obd->u.cli;
3252                 long nr = atomic_long_read(&cli->cl_lru_in_list) >> 1;
3253                 long target = *(long *)val;
3254
3255                 nr = osc_lru_shrink(env, cli, min(nr, target), true);
3256                 *(long *)val -= nr;
3257                 RETURN(0);
3258         }
3259
3260         if (!set && !KEY_IS(KEY_GRANT_SHRINK))
3261                 RETURN(-EINVAL);
3262
3263         /* We pass all other commands directly to OST. Since nobody calls osc
3264            methods directly and everybody is supposed to go through LOV, we
3265            assume lov checked invalid values for us.
3266            The only recognised values so far are evict_by_nid and mds_conn.
3267            Even if something bad goes through, we'd get a -EINVAL from OST
3268            anyway. */
3269
3270         req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
3271                                                 &RQF_OST_SET_GRANT_INFO :
3272                                                 &RQF_OBD_SET_INFO);
3273         if (req == NULL)
3274                 RETURN(-ENOMEM);
3275
3276         req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3277                              RCL_CLIENT, keylen);
3278         if (!KEY_IS(KEY_GRANT_SHRINK))
3279                 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
3280                                      RCL_CLIENT, vallen);
3281         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
3282         if (rc) {
3283                 ptlrpc_request_free(req);
3284                 RETURN(rc);
3285         }
3286
3287         tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3288         memcpy(tmp, key, keylen);
3289         tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
3290                                                         &RMF_OST_BODY :
3291                                                         &RMF_SETINFO_VAL);
3292         memcpy(tmp, val, vallen);
3293
3294         if (KEY_IS(KEY_GRANT_SHRINK)) {
3295                 struct osc_grant_args *aa;
3296                 struct obdo *oa;
3297
3298                 aa = ptlrpc_req_async_args(aa, req);
3299                 OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
3300                 if (!oa) {
3301                         ptlrpc_req_finished(req);
3302                         RETURN(-ENOMEM);
3303                 }
3304                 *oa = ((struct ost_body *)val)->oa;
3305                 aa->aa_oa = oa;
3306                 req->rq_interpret_reply = osc_shrink_grant_interpret;
3307         }
3308
3309         ptlrpc_request_set_replen(req);
3310         if (!KEY_IS(KEY_GRANT_SHRINK)) {
3311                 LASSERT(set != NULL);
3312                 ptlrpc_set_add_req(set, req);
3313                 ptlrpc_check_set(NULL, set);
3314         } else {
3315                 ptlrpcd_add_req(req);
3316         }
3317
3318         RETURN(0);
3319 }
3320 EXPORT_SYMBOL(osc_set_info_async);
3321
3322 int osc_reconnect(const struct lu_env *env, struct obd_export *exp,
3323                   struct obd_device *obd, struct obd_uuid *cluuid,
3324                   struct obd_connect_data *data, void *localdata)
3325 {
3326         struct client_obd *cli = &obd->u.cli;
3327
3328         if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
3329                 long lost_grant;
3330                 long grant;
3331
3332                 spin_lock(&cli->cl_loi_list_lock);
3333                 grant = cli->cl_avail_grant + cli->cl_reserved_grant;
3334                 if (data->ocd_connect_flags & OBD_CONNECT_GRANT_PARAM) {
3335                         /* restore ocd_grant_blkbits as client page bits */
3336                         data->ocd_grant_blkbits = PAGE_SHIFT;
3337                         grant += cli->cl_dirty_grant;
3338                 } else {
3339                         grant += cli->cl_dirty_pages << PAGE_SHIFT;
3340                 }
3341                 data->ocd_grant = grant ? : 2 * cli_brw_size(obd);
3342                 lost_grant = cli->cl_lost_grant;
3343                 cli->cl_lost_grant = 0;
3344                 spin_unlock(&cli->cl_loi_list_lock);
3345
3346                 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d"
3347                        " ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags,
3348                        data->ocd_version, data->ocd_grant, lost_grant);
3349         }
3350
3351         RETURN(0);
3352 }
3353 EXPORT_SYMBOL(osc_reconnect);
3354
3355 int osc_disconnect(struct obd_export *exp)
3356 {
3357         struct obd_device *obd = class_exp2obd(exp);
3358         int rc;
3359
3360         rc = client_disconnect_export(exp);
3361         /**
3362          * Initially we put del_shrink_grant before disconnect_export, but it
3363          * causes the following problem if setup (connect) and cleanup
3364          * (disconnect) are tangled together.
3365          *      connect p1                     disconnect p2
3366          *   ptlrpc_connect_import
3367          *     ...............               class_manual_cleanup
3368          *                                     osc_disconnect
3369          *                                     del_shrink_grant
3370          *   ptlrpc_connect_interrupt
3371          *     osc_init_grant
3372          *   add this client to shrink list
3373          *                                      cleanup_osc
3374          * Bang! grant shrink thread trigger the shrink. BUG18662
3375          */
3376         osc_del_grant_list(&obd->u.cli);
3377         return rc;
3378 }
3379 EXPORT_SYMBOL(osc_disconnect);
3380
3381 int osc_ldlm_resource_invalidate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
3382                                  struct hlist_node *hnode, void *arg)
3383 {
3384         struct lu_env *env = arg;
3385         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
3386         struct ldlm_lock *lock;
3387         struct osc_object *osc = NULL;
3388         ENTRY;
3389
3390         lock_res(res);
3391         list_for_each_entry(lock, &res->lr_granted, l_res_link) {
3392                 if (lock->l_ast_data != NULL && osc == NULL) {
3393                         osc = lock->l_ast_data;
3394                         cl_object_get(osc2cl(osc));
3395                 }
3396
3397                 /* clear LDLM_FL_CLEANED flag to make sure it will be canceled
3398                  * by the 2nd round of ldlm_namespace_clean() call in
3399                  * osc_import_event(). */
3400                 ldlm_clear_cleaned(lock);
3401         }
3402         unlock_res(res);
3403
3404         if (osc != NULL) {
3405                 osc_object_invalidate(env, osc);
3406                 cl_object_put(env, osc2cl(osc));
3407         }
3408
3409         RETURN(0);
3410 }
3411 EXPORT_SYMBOL(osc_ldlm_resource_invalidate);
3412
3413 static int osc_import_event(struct obd_device *obd,
3414                             struct obd_import *imp,
3415                             enum obd_import_event event)
3416 {
3417         struct client_obd *cli;
3418         int rc = 0;
3419
3420         ENTRY;
3421         LASSERT(imp->imp_obd == obd);
3422
3423         switch (event) {
3424         case IMP_EVENT_DISCON: {
3425                 cli = &obd->u.cli;
3426                 spin_lock(&cli->cl_loi_list_lock);
3427                 cli->cl_avail_grant = 0;
3428                 cli->cl_lost_grant = 0;
3429                 spin_unlock(&cli->cl_loi_list_lock);
3430                 break;
3431         }
3432         case IMP_EVENT_INACTIVE: {
3433                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE);
3434                 break;
3435         }
3436         case IMP_EVENT_INVALIDATE: {
3437                 struct ldlm_namespace *ns = obd->obd_namespace;
3438                 struct lu_env         *env;
3439                 __u16                  refcheck;
3440
3441                 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3442
3443                 env = cl_env_get(&refcheck);
3444                 if (!IS_ERR(env)) {
3445                         osc_io_unplug(env, &obd->u.cli, NULL);
3446
3447                         cfs_hash_for_each_nolock(ns->ns_rs_hash,
3448                                                  osc_ldlm_resource_invalidate,
3449                                                  env, 0);
3450                         cl_env_put(env, &refcheck);
3451
3452                         ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3453                 } else
3454                         rc = PTR_ERR(env);
3455                 break;
3456         }
3457         case IMP_EVENT_ACTIVE: {
3458                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE);
3459                 break;
3460         }
3461         case IMP_EVENT_OCD: {
3462                 struct obd_connect_data *ocd = &imp->imp_connect_data;
3463
3464                 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
3465                         osc_init_grant(&obd->u.cli, ocd);
3466
3467                 /* See bug 7198 */
3468                 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
3469                         imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
3470
3471                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD);
3472                 break;
3473         }
3474         case IMP_EVENT_DEACTIVATE: {
3475                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE);
3476                 break;
3477         }
3478         case IMP_EVENT_ACTIVATE: {
3479                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE);
3480                 break;
3481         }
3482         default:
3483                 CERROR("Unknown import event %d\n", event);
3484                 LBUG();
3485         }
3486         RETURN(rc);
3487 }
3488
3489 /**
3490  * Determine whether the lock can be canceled before replaying the lock
3491  * during recovery, see bug16774 for detailed information.
3492  *
3493  * \retval zero the lock can't be canceled
3494  * \retval other ok to cancel
3495  */
3496 static int osc_cancel_weight(struct ldlm_lock *lock)
3497 {
3498         /*
3499          * Cancel all unused and granted extent lock.
3500          */
3501         if (lock->l_resource->lr_type == LDLM_EXTENT &&
3502             ldlm_is_granted(lock) &&
3503             osc_ldlm_weigh_ast(lock) == 0)
3504                 RETURN(1);
3505
3506         RETURN(0);
3507 }
3508
3509 static int brw_queue_work(const struct lu_env *env, void *data)
3510 {
3511         struct client_obd *cli = data;
3512
3513         CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
3514
3515         osc_io_unplug(env, cli, NULL);
3516         RETURN(0);
3517 }
3518
3519 int osc_setup_common(struct obd_device *obd, struct lustre_cfg *lcfg)
3520 {
3521         struct client_obd *cli = &obd->u.cli;
3522         void *handler;
3523         int rc;
3524
3525         ENTRY;
3526
3527         rc = ptlrpcd_addref();
3528         if (rc)
3529                 RETURN(rc);
3530
3531         rc = client_obd_setup(obd, lcfg);
3532         if (rc)
3533                 GOTO(out_ptlrpcd, rc);
3534
3535
3536         handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
3537         if (IS_ERR(handler))
3538                 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3539         cli->cl_writeback_work = handler;
3540
3541         handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
3542         if (IS_ERR(handler))
3543                 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3544         cli->cl_lru_work = handler;
3545
3546         rc = osc_quota_setup(obd);
3547         if (rc)
3548                 GOTO(out_ptlrpcd_work, rc);
3549
3550         cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
3551         cli->cl_root_squash = 0;
3552         osc_update_next_shrink(cli);
3553
3554         RETURN(rc);
3555
3556 out_ptlrpcd_work:
3557         if (cli->cl_writeback_work != NULL) {
3558                 ptlrpcd_destroy_work(cli->cl_writeback_work);
3559                 cli->cl_writeback_work = NULL;
3560         }
3561         if (cli->cl_lru_work != NULL) {
3562                 ptlrpcd_destroy_work(cli->cl_lru_work);
3563                 cli->cl_lru_work = NULL;
3564         }
3565         client_obd_cleanup(obd);
3566 out_ptlrpcd:
3567         ptlrpcd_decref();
3568         RETURN(rc);
3569 }
3570 EXPORT_SYMBOL(osc_setup_common);
3571
3572 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3573 {
3574         struct client_obd *cli = &obd->u.cli;
3575         int                adding;
3576         int                added;
3577         int                req_count;
3578         int                rc;
3579
3580         ENTRY;
3581
3582         rc = osc_setup_common(obd, lcfg);
3583         if (rc < 0)
3584                 RETURN(rc);
3585
3586         rc = osc_tunables_init(obd);
3587         if (rc)
3588                 RETURN(rc);
3589
3590         /*
3591          * We try to control the total number of requests with a upper limit
3592          * osc_reqpool_maxreqcount. There might be some race which will cause
3593          * over-limit allocation, but it is fine.
3594          */
3595         req_count = atomic_read(&osc_pool_req_count);
3596         if (req_count < osc_reqpool_maxreqcount) {
3597                 adding = cli->cl_max_rpcs_in_flight + 2;
3598                 if (req_count + adding > osc_reqpool_maxreqcount)
3599                         adding = osc_reqpool_maxreqcount - req_count;
3600
3601                 added = ptlrpc_add_rqs_to_pool(osc_rq_pool, adding);
3602                 atomic_add(added, &osc_pool_req_count);
3603         }
3604
3605         ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
3606
3607         spin_lock(&osc_shrink_lock);
3608         list_add_tail(&cli->cl_shrink_list, &osc_shrink_list);
3609         spin_unlock(&osc_shrink_lock);
3610         cli->cl_import->imp_idle_timeout = osc_idle_timeout;
3611         cli->cl_import->imp_idle_debug = D_HA;
3612
3613         RETURN(0);
3614 }
3615
3616 int osc_precleanup_common(struct obd_device *obd)
3617 {
3618         struct client_obd *cli = &obd->u.cli;
3619         ENTRY;
3620
3621         /* LU-464
3622          * for echo client, export may be on zombie list, wait for
3623          * zombie thread to cull it, because cli.cl_import will be
3624          * cleared in client_disconnect_export():
3625          *   class_export_destroy() -> obd_cleanup() ->
3626          *   echo_device_free() -> echo_client_cleanup() ->
3627          *   obd_disconnect() -> osc_disconnect() ->
3628          *   client_disconnect_export()
3629          */
3630         obd_zombie_barrier();
3631         if (cli->cl_writeback_work) {
3632                 ptlrpcd_destroy_work(cli->cl_writeback_work);
3633                 cli->cl_writeback_work = NULL;
3634         }
3635
3636         if (cli->cl_lru_work) {
3637                 ptlrpcd_destroy_work(cli->cl_lru_work);
3638                 cli->cl_lru_work = NULL;
3639         }
3640
3641         obd_cleanup_client_import(obd);
3642         RETURN(0);
3643 }
3644 EXPORT_SYMBOL(osc_precleanup_common);
3645
3646 static int osc_precleanup(struct obd_device *obd)
3647 {
3648         ENTRY;
3649
3650         osc_precleanup_common(obd);
3651
3652         ptlrpc_lprocfs_unregister_obd(obd);
3653         RETURN(0);
3654 }
3655
3656 int osc_cleanup_common(struct obd_device *obd)
3657 {
3658         struct client_obd *cli = &obd->u.cli;
3659         int rc;
3660
3661         ENTRY;
3662
3663         spin_lock(&osc_shrink_lock);
3664         list_del(&cli->cl_shrink_list);
3665         spin_unlock(&osc_shrink_lock);
3666
3667         /* lru cleanup */
3668         if (cli->cl_cache != NULL) {
3669                 LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
3670                 spin_lock(&cli->cl_cache->ccc_lru_lock);
3671                 list_del_init(&cli->cl_lru_osc);
3672                 spin_unlock(&cli->cl_cache->ccc_lru_lock);
3673                 cli->cl_lru_left = NULL;
3674                 cl_cache_decref(cli->cl_cache);
3675                 cli->cl_cache = NULL;
3676         }
3677
3678         /* free memory of osc quota cache */
3679         osc_quota_cleanup(obd);
3680
3681         rc = client_obd_cleanup(obd);
3682
3683         ptlrpcd_decref();
3684         RETURN(rc);
3685 }
3686 EXPORT_SYMBOL(osc_cleanup_common);
3687
3688 static const struct obd_ops osc_obd_ops = {
3689         .o_owner                = THIS_MODULE,
3690         .o_setup                = osc_setup,
3691         .o_precleanup           = osc_precleanup,
3692         .o_cleanup              = osc_cleanup_common,
3693         .o_add_conn             = client_import_add_conn,
3694         .o_del_conn             = client_import_del_conn,
3695         .o_connect              = client_connect_import,
3696         .o_reconnect            = osc_reconnect,
3697         .o_disconnect           = osc_disconnect,
3698         .o_statfs               = osc_statfs,
3699         .o_statfs_async         = osc_statfs_async,
3700         .o_create               = osc_create,
3701         .o_destroy              = osc_destroy,
3702         .o_getattr              = osc_getattr,
3703         .o_setattr              = osc_setattr,
3704         .o_iocontrol            = osc_iocontrol,
3705         .o_set_info_async       = osc_set_info_async,
3706         .o_import_event         = osc_import_event,
3707         .o_quotactl             = osc_quotactl,
3708 };
3709
3710 LIST_HEAD(osc_shrink_list);
3711 DEFINE_SPINLOCK(osc_shrink_lock);
3712
3713 #ifdef HAVE_SHRINKER_COUNT
3714 static struct shrinker osc_cache_shrinker = {
3715         .count_objects  = osc_cache_shrink_count,
3716         .scan_objects   = osc_cache_shrink_scan,
3717         .seeks          = DEFAULT_SEEKS,
3718 };
3719 #else
3720 static int osc_cache_shrink(struct shrinker *shrinker,
3721                             struct shrink_control *sc)
3722 {
3723         (void)osc_cache_shrink_scan(shrinker, sc);
3724
3725         return osc_cache_shrink_count(shrinker, sc);
3726 }
3727
3728 static struct shrinker osc_cache_shrinker = {
3729         .shrink   = osc_cache_shrink,
3730         .seeks    = DEFAULT_SEEKS,
3731 };
3732 #endif
3733
3734 static int __init osc_init(void)
3735 {
3736         unsigned int reqpool_size;
3737         unsigned int reqsize;
3738         int rc;
3739         ENTRY;
3740
3741         /* print an address of _any_ initialized kernel symbol from this
3742          * module, to allow debugging with gdb that doesn't support data
3743          * symbols from modules.*/
3744         CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
3745
3746         rc = lu_kmem_init(osc_caches);
3747         if (rc)
3748                 RETURN(rc);
3749
3750         rc = class_register_type(&osc_obd_ops, NULL, true,
3751                                  LUSTRE_OSC_NAME, &osc_device_type);
3752         if (rc)
3753                 GOTO(out_kmem, rc);
3754
3755         rc = register_shrinker(&osc_cache_shrinker);
3756         if (rc)
3757                 GOTO(out_type, rc);
3758
3759         /* This is obviously too much memory, only prevent overflow here */
3760         if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0)
3761                 GOTO(out_shrinker, rc = -EINVAL);
3762
3763         reqpool_size = osc_reqpool_mem_max << 20;
3764
3765         reqsize = 1;
3766         while (reqsize < OST_IO_MAXREQSIZE)
3767                 reqsize = reqsize << 1;
3768
3769         /*
3770          * We don't enlarge the request count in OSC pool according to
3771          * cl_max_rpcs_in_flight. The allocation from the pool will only be
3772          * tried after normal allocation failed. So a small OSC pool won't
3773          * cause much performance degression in most of cases.
3774          */
3775         osc_reqpool_maxreqcount = reqpool_size / reqsize;
3776
3777         atomic_set(&osc_pool_req_count, 0);
3778         osc_rq_pool = ptlrpc_init_rq_pool(0, OST_IO_MAXREQSIZE,
3779                                           ptlrpc_add_rqs_to_pool);
3780
3781         if (osc_rq_pool == NULL)
3782                 GOTO(out_shrinker, rc = -ENOMEM);
3783
3784         rc = osc_start_grant_work();
3785         if (rc != 0)
3786                 GOTO(out_req_pool, rc);
3787
3788         RETURN(rc);
3789
3790 out_req_pool:
3791         ptlrpc_free_rq_pool(osc_rq_pool);
3792 out_shrinker:
3793         unregister_shrinker(&osc_cache_shrinker);
3794 out_type:
3795         class_unregister_type(LUSTRE_OSC_NAME);
3796 out_kmem:
3797         lu_kmem_fini(osc_caches);
3798
3799         RETURN(rc);
3800 }
3801
3802 static void __exit osc_exit(void)
3803 {
3804         osc_stop_grant_work();
3805         unregister_shrinker(&osc_cache_shrinker);
3806         class_unregister_type(LUSTRE_OSC_NAME);
3807         lu_kmem_fini(osc_caches);
3808         ptlrpc_free_rq_pool(osc_rq_pool);
3809 }
3810
3811 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3812 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3813 MODULE_VERSION(LUSTRE_VERSION_STRING);
3814 MODULE_LICENSE("GPL");
3815
3816 module_init(osc_init);
3817 module_exit(osc_exit);