Whamcloud - gitweb
4ca9be098e23c9aa6dc302f68a960520096f2357
[fs/lustre-release.git] / lustre / osc / osc_request.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  */
31
32 #define DEBUG_SUBSYSTEM S_OSC
33
34 #include <linux/workqueue.h>
35 #include <libcfs/libcfs.h>
36 #include <linux/falloc.h>
37 #include <lprocfs_status.h>
38 #include <lustre_dlm.h>
39 #include <lustre_fid.h>
40 #include <lustre_ha.h>
41 #include <uapi/linux/lustre/lustre_ioctl.h>
42 #include <lustre_net.h>
43 #include <lustre_obdo.h>
44 #include <obd.h>
45 #include <obd_cksum.h>
46 #include <obd_class.h>
47 #include <lustre_osc.h>
48 #include <linux/falloc.h>
49
50 #include "osc_internal.h"
51 #include <lnet/lnet_rdma.h>
52
53 atomic_t osc_pool_req_count;
54 unsigned int osc_reqpool_maxreqcount;
55 struct ptlrpc_request_pool *osc_rq_pool;
56
57 /* max memory used for request pool, unit is MB */
58 static unsigned int osc_reqpool_mem_max = 5;
59 module_param(osc_reqpool_mem_max, uint, 0444);
60
61 static int osc_idle_timeout = 20;
62 module_param(osc_idle_timeout, uint, 0644);
63
64 #define osc_grant_args osc_brw_async_args
65
66 struct osc_setattr_args {
67         struct obdo             *sa_oa;
68         obd_enqueue_update_f     sa_upcall;
69         void                    *sa_cookie;
70 };
71
72 struct osc_fsync_args {
73         struct osc_object       *fa_obj;
74         struct obdo             *fa_oa;
75         obd_enqueue_update_f    fa_upcall;
76         void                    *fa_cookie;
77 };
78
79 struct osc_ladvise_args {
80         struct obdo             *la_oa;
81         obd_enqueue_update_f     la_upcall;
82         void                    *la_cookie;
83 };
84
85 static void osc_release_ppga(struct brw_page **ppga, size_t count);
86 static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req,
87                          void *data, int rc);
88
89 void osc_pack_req_body(struct ptlrpc_request *req, struct obdo *oa)
90 {
91         struct ost_body *body;
92
93         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
94         LASSERT(body);
95
96         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
97 }
98
99 static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
100                        struct obdo *oa)
101 {
102         struct ptlrpc_request   *req;
103         struct ost_body         *body;
104         int                      rc;
105
106         ENTRY;
107         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
108         if (req == NULL)
109                 RETURN(-ENOMEM);
110
111         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
112         if (rc) {
113                 ptlrpc_request_free(req);
114                 RETURN(rc);
115         }
116
117         osc_pack_req_body(req, oa);
118
119         ptlrpc_request_set_replen(req);
120
121         rc = ptlrpc_queue_wait(req);
122         if (rc)
123                 GOTO(out, rc);
124
125         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
126         if (body == NULL)
127                 GOTO(out, rc = -EPROTO);
128
129         CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
130         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
131
132         oa->o_blksize = cli_brw_size(exp->exp_obd);
133         oa->o_valid |= OBD_MD_FLBLKSZ;
134
135         EXIT;
136 out:
137         ptlrpc_req_finished(req);
138
139         return rc;
140 }
141
142 static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
143                        struct obdo *oa)
144 {
145         struct ptlrpc_request   *req;
146         struct ost_body         *body;
147         int                      rc;
148
149         ENTRY;
150         LASSERT(oa->o_valid & OBD_MD_FLGROUP);
151
152         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
153         if (req == NULL)
154                 RETURN(-ENOMEM);
155
156         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
157         if (rc) {
158                 ptlrpc_request_free(req);
159                 RETURN(rc);
160         }
161
162         osc_pack_req_body(req, oa);
163
164         ptlrpc_request_set_replen(req);
165
166         rc = ptlrpc_queue_wait(req);
167         if (rc)
168                 GOTO(out, rc);
169
170         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
171         if (body == NULL)
172                 GOTO(out, rc = -EPROTO);
173
174         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
175
176         EXIT;
177 out:
178         ptlrpc_req_finished(req);
179
180         RETURN(rc);
181 }
182
183 static int osc_setattr_interpret(const struct lu_env *env,
184                                  struct ptlrpc_request *req, void *args, int rc)
185 {
186         struct osc_setattr_args *sa = args;
187         struct ost_body *body;
188
189         ENTRY;
190
191         if (rc != 0)
192                 GOTO(out, rc);
193
194         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
195         if (body == NULL)
196                 GOTO(out, rc = -EPROTO);
197
198         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
199                              &body->oa);
200 out:
201         rc = sa->sa_upcall(sa->sa_cookie, rc);
202         RETURN(rc);
203 }
204
205 int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
206                       obd_enqueue_update_f upcall, void *cookie,
207                       struct ptlrpc_request_set *rqset)
208 {
209         struct ptlrpc_request   *req;
210         struct osc_setattr_args *sa;
211         int                      rc;
212
213         ENTRY;
214
215         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
216         if (req == NULL)
217                 RETURN(-ENOMEM);
218
219         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
220         if (rc) {
221                 ptlrpc_request_free(req);
222                 RETURN(rc);
223         }
224
225         osc_pack_req_body(req, oa);
226
227         ptlrpc_request_set_replen(req);
228
229         /* do mds to ost setattr asynchronously */
230         if (!rqset) {
231                 /* Do not wait for response. */
232                 ptlrpcd_add_req(req);
233         } else {
234                 req->rq_interpret_reply = osc_setattr_interpret;
235
236                 sa = ptlrpc_req_async_args(sa, req);
237                 sa->sa_oa = oa;
238                 sa->sa_upcall = upcall;
239                 sa->sa_cookie = cookie;
240
241                 ptlrpc_set_add_req(rqset, req);
242         }
243
244         RETURN(0);
245 }
246
247 static int osc_ladvise_interpret(const struct lu_env *env,
248                                  struct ptlrpc_request *req,
249                                  void *arg, int rc)
250 {
251         struct osc_ladvise_args *la = arg;
252         struct ost_body *body;
253         ENTRY;
254
255         if (rc != 0)
256                 GOTO(out, rc);
257
258         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
259         if (body == NULL)
260                 GOTO(out, rc = -EPROTO);
261
262         *la->la_oa = body->oa;
263 out:
264         rc = la->la_upcall(la->la_cookie, rc);
265         RETURN(rc);
266 }
267
268 /**
269  * If rqset is NULL, do not wait for response. Upcall and cookie could also
270  * be NULL in this case
271  */
272 int osc_ladvise_base(struct obd_export *exp, struct obdo *oa,
273                      struct ladvise_hdr *ladvise_hdr,
274                      obd_enqueue_update_f upcall, void *cookie,
275                      struct ptlrpc_request_set *rqset)
276 {
277         struct ptlrpc_request   *req;
278         struct ost_body         *body;
279         struct osc_ladvise_args *la;
280         int                      rc;
281         struct lu_ladvise       *req_ladvise;
282         struct lu_ladvise       *ladvise = ladvise_hdr->lah_advise;
283         int                      num_advise = ladvise_hdr->lah_count;
284         struct ladvise_hdr      *req_ladvise_hdr;
285         ENTRY;
286
287         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_LADVISE);
288         if (req == NULL)
289                 RETURN(-ENOMEM);
290
291         req_capsule_set_size(&req->rq_pill, &RMF_OST_LADVISE, RCL_CLIENT,
292                              num_advise * sizeof(*ladvise));
293         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_LADVISE);
294         if (rc != 0) {
295                 ptlrpc_request_free(req);
296                 RETURN(rc);
297         }
298         req->rq_request_portal = OST_IO_PORTAL;
299         ptlrpc_at_set_req_timeout(req);
300
301         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
302         LASSERT(body);
303         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
304                              oa);
305
306         req_ladvise_hdr = req_capsule_client_get(&req->rq_pill,
307                                                  &RMF_OST_LADVISE_HDR);
308         memcpy(req_ladvise_hdr, ladvise_hdr, sizeof(*ladvise_hdr));
309
310         req_ladvise = req_capsule_client_get(&req->rq_pill, &RMF_OST_LADVISE);
311         memcpy(req_ladvise, ladvise, sizeof(*ladvise) * num_advise);
312         ptlrpc_request_set_replen(req);
313
314         if (rqset == NULL) {
315                 /* Do not wait for response. */
316                 ptlrpcd_add_req(req);
317                 RETURN(0);
318         }
319
320         req->rq_interpret_reply = osc_ladvise_interpret;
321         la = ptlrpc_req_async_args(la, req);
322         la->la_oa = oa;
323         la->la_upcall = upcall;
324         la->la_cookie = cookie;
325
326         ptlrpc_set_add_req(rqset, req);
327
328         RETURN(0);
329 }
330
331 static int osc_create(const struct lu_env *env, struct obd_export *exp,
332                       struct obdo *oa)
333 {
334         struct ptlrpc_request *req;
335         struct ost_body       *body;
336         int                    rc;
337         ENTRY;
338
339         LASSERT(oa != NULL);
340         LASSERT(oa->o_valid & OBD_MD_FLGROUP);
341         LASSERT(fid_seq_is_echo(ostid_seq(&oa->o_oi)));
342
343         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
344         if (req == NULL)
345                 GOTO(out, rc = -ENOMEM);
346
347         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
348         if (rc) {
349                 ptlrpc_request_free(req);
350                 GOTO(out, rc);
351         }
352
353         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
354         LASSERT(body);
355
356         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
357
358         ptlrpc_request_set_replen(req);
359
360         rc = ptlrpc_queue_wait(req);
361         if (rc)
362                 GOTO(out_req, rc);
363
364         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
365         if (body == NULL)
366                 GOTO(out_req, rc = -EPROTO);
367
368         CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
369         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
370
371         oa->o_blksize = cli_brw_size(exp->exp_obd);
372         oa->o_valid |= OBD_MD_FLBLKSZ;
373
374         CDEBUG(D_HA, "transno: %lld\n",
375                lustre_msg_get_transno(req->rq_repmsg));
376 out_req:
377         ptlrpc_req_finished(req);
378 out:
379         RETURN(rc);
380 }
381
382 int osc_punch_send(struct obd_export *exp, struct obdo *oa,
383                    obd_enqueue_update_f upcall, void *cookie)
384 {
385         struct ptlrpc_request *req;
386         struct osc_setattr_args *sa;
387         struct obd_import *imp = class_exp2cliimp(exp);
388         struct ost_body *body;
389         int rc;
390
391         ENTRY;
392
393         req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
394         if (req == NULL)
395                 RETURN(-ENOMEM);
396
397         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
398         if (rc < 0) {
399                 ptlrpc_request_free(req);
400                 RETURN(rc);
401         }
402
403         osc_set_io_portal(req);
404
405         ptlrpc_at_set_req_timeout(req);
406
407         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
408
409         lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
410
411         ptlrpc_request_set_replen(req);
412
413         req->rq_interpret_reply = osc_setattr_interpret;
414         sa = ptlrpc_req_async_args(sa, req);
415         sa->sa_oa = oa;
416         sa->sa_upcall = upcall;
417         sa->sa_cookie = cookie;
418
419         ptlrpcd_add_req(req);
420
421         RETURN(0);
422 }
423 EXPORT_SYMBOL(osc_punch_send);
424
425 /**
426  * osc_fallocate_base() - Handles fallocate request.
427  *
428  * @exp:        Export structure
429  * @oa:         Attributes passed to OSS from client (obdo structure)
430  * @upcall:     Primary & supplementary group information
431  * @cookie:     Exclusive identifier
432  * @rqset:      Request list.
433  * @mode:       Operation done on given range.
434  *
435  * osc_fallocate_base() - Handles fallocate requests only. Only block
436  * allocation or standard preallocate operation is supported currently.
437  * Other mode flags is not supported yet. ftruncate(2) or truncate(2)
438  * is supported via SETATTR request.
439  *
440  * Return: Non-zero on failure and O on success.
441  */
442 int osc_fallocate_base(struct obd_export *exp, struct obdo *oa,
443                        obd_enqueue_update_f upcall, void *cookie, int mode)
444 {
445         struct ptlrpc_request *req;
446         struct osc_setattr_args *sa;
447         struct ost_body *body;
448         struct obd_import *imp = class_exp2cliimp(exp);
449         int rc;
450         ENTRY;
451
452         oa->o_falloc_mode = mode;
453         req = ptlrpc_request_alloc(class_exp2cliimp(exp),
454                                    &RQF_OST_FALLOCATE);
455         if (req == NULL)
456                 RETURN(-ENOMEM);
457
458         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_FALLOCATE);
459         if (rc != 0) {
460                 ptlrpc_request_free(req);
461                 RETURN(rc);
462         }
463
464         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
465         LASSERT(body);
466
467         lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
468
469         ptlrpc_request_set_replen(req);
470
471         req->rq_interpret_reply = osc_setattr_interpret;
472         BUILD_BUG_ON(sizeof(*sa) > sizeof(req->rq_async_args));
473         sa = ptlrpc_req_async_args(sa, req);
474         sa->sa_oa = oa;
475         sa->sa_upcall = upcall;
476         sa->sa_cookie = cookie;
477
478         ptlrpcd_add_req(req);
479
480         RETURN(0);
481 }
482 EXPORT_SYMBOL(osc_fallocate_base);
483
484 static int osc_sync_interpret(const struct lu_env *env,
485                               struct ptlrpc_request *req, void *args, int rc)
486 {
487         struct osc_fsync_args *fa = args;
488         struct ost_body *body;
489         struct cl_attr *attr = &osc_env_info(env)->oti_attr;
490         unsigned long valid = 0;
491         struct cl_object *obj;
492         ENTRY;
493
494         if (rc != 0)
495                 GOTO(out, rc);
496
497         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
498         if (body == NULL) {
499                 CERROR("can't unpack ost_body\n");
500                 GOTO(out, rc = -EPROTO);
501         }
502
503         *fa->fa_oa = body->oa;
504         obj = osc2cl(fa->fa_obj);
505
506         /* Update osc object's blocks attribute */
507         cl_object_attr_lock(obj);
508         if (body->oa.o_valid & OBD_MD_FLBLOCKS) {
509                 attr->cat_blocks = body->oa.o_blocks;
510                 valid |= CAT_BLOCKS;
511         }
512
513         if (valid != 0)
514                 cl_object_attr_update(env, obj, attr, valid);
515         cl_object_attr_unlock(obj);
516
517 out:
518         rc = fa->fa_upcall(fa->fa_cookie, rc);
519         RETURN(rc);
520 }
521
522 int osc_sync_base(struct osc_object *obj, struct obdo *oa,
523                   obd_enqueue_update_f upcall, void *cookie,
524                   struct ptlrpc_request_set *rqset)
525 {
526         struct obd_export     *exp = osc_export(obj);
527         struct ptlrpc_request *req;
528         struct ost_body       *body;
529         struct osc_fsync_args *fa;
530         int                    rc;
531         ENTRY;
532
533         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
534         if (req == NULL)
535                 RETURN(-ENOMEM);
536
537         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
538         if (rc) {
539                 ptlrpc_request_free(req);
540                 RETURN(rc);
541         }
542
543         /* overload the size and blocks fields in the oa with start/end */
544         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
545         LASSERT(body);
546         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
547
548         ptlrpc_request_set_replen(req);
549         req->rq_interpret_reply = osc_sync_interpret;
550
551         fa = ptlrpc_req_async_args(fa, req);
552         fa->fa_obj = obj;
553         fa->fa_oa = oa;
554         fa->fa_upcall = upcall;
555         fa->fa_cookie = cookie;
556
557         ptlrpc_set_add_req(rqset, req);
558
559         RETURN (0);
560 }
561
562 /* Find and cancel locally locks matched by @mode in the resource found by
563  * @objid. Found locks are added into @cancel list. Returns the amount of
564  * locks added to @cancels list. */
565 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
566                                    struct list_head *cancels,
567                                    enum ldlm_mode mode, __u64 lock_flags)
568 {
569         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
570         struct ldlm_res_id res_id;
571         struct ldlm_resource *res;
572         int count;
573         ENTRY;
574
575         /* Return, i.e. cancel nothing, only if ELC is supported (flag in
576          * export) but disabled through procfs (flag in NS).
577          *
578          * This distinguishes from a case when ELC is not supported originally,
579          * when we still want to cancel locks in advance and just cancel them
580          * locally, without sending any RPC. */
581         if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
582                 RETURN(0);
583
584         ostid_build_res_name(&oa->o_oi, &res_id);
585         res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
586         if (IS_ERR(res))
587                 RETURN(0);
588
589         LDLM_RESOURCE_ADDREF(res);
590         count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
591                                            lock_flags, 0, NULL);
592         LDLM_RESOURCE_DELREF(res);
593         ldlm_resource_putref(res);
594         RETURN(count);
595 }
596
597 static int osc_destroy_interpret(const struct lu_env *env,
598                                  struct ptlrpc_request *req, void *args, int rc)
599 {
600         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
601
602         atomic_dec(&cli->cl_destroy_in_flight);
603         wake_up(&cli->cl_destroy_waitq);
604
605         return 0;
606 }
607
608 static int osc_can_send_destroy(struct client_obd *cli)
609 {
610         if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
611             cli->cl_max_rpcs_in_flight) {
612                 /* The destroy request can be sent */
613                 return 1;
614         }
615         if (atomic_dec_return(&cli->cl_destroy_in_flight) <
616             cli->cl_max_rpcs_in_flight) {
617                 /*
618                  * The counter has been modified between the two atomic
619                  * operations.
620                  */
621                 wake_up(&cli->cl_destroy_waitq);
622         }
623         return 0;
624 }
625
626 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
627                        struct obdo *oa)
628 {
629         struct client_obd     *cli = &exp->exp_obd->u.cli;
630         struct ptlrpc_request *req;
631         struct ost_body       *body;
632         LIST_HEAD(cancels);
633         int rc, count;
634         ENTRY;
635
636         if (!oa) {
637                 CDEBUG(D_INFO, "oa NULL\n");
638                 RETURN(-EINVAL);
639         }
640
641         count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
642                                         LDLM_FL_DISCARD_DATA);
643
644         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
645         if (req == NULL) {
646                 ldlm_lock_list_put(&cancels, l_bl_ast, count);
647                 RETURN(-ENOMEM);
648         }
649
650         rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
651                                0, &cancels, count);
652         if (rc) {
653                 ptlrpc_request_free(req);
654                 RETURN(rc);
655         }
656
657         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
658         ptlrpc_at_set_req_timeout(req);
659
660         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
661         LASSERT(body);
662         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
663
664         ptlrpc_request_set_replen(req);
665
666         req->rq_interpret_reply = osc_destroy_interpret;
667         if (!osc_can_send_destroy(cli)) {
668                 /*
669                  * Wait until the number of on-going destroy RPCs drops
670                  * under max_rpc_in_flight
671                  */
672                 rc = l_wait_event_abortable_exclusive(
673                         cli->cl_destroy_waitq,
674                         osc_can_send_destroy(cli));
675                 if (rc) {
676                         ptlrpc_req_finished(req);
677                         RETURN(-EINTR);
678                 }
679         }
680
681         /* Do not wait for response */
682         ptlrpcd_add_req(req);
683         RETURN(0);
684 }
685
686 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
687                                 long writing_bytes)
688 {
689         u64 bits = OBD_MD_FLBLOCKS | OBD_MD_FLGRANT;
690
691         LASSERT(!(oa->o_valid & bits));
692
693         oa->o_valid |= bits;
694         spin_lock(&cli->cl_loi_list_lock);
695         if (cli->cl_ocd_grant_param)
696                 oa->o_dirty = cli->cl_dirty_grant;
697         else
698                 oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
699         if (unlikely(cli->cl_dirty_pages > cli->cl_dirty_max_pages)) {
700                 CERROR("dirty %lu > dirty_max %lu\n",
701                        cli->cl_dirty_pages,
702                        cli->cl_dirty_max_pages);
703                 oa->o_undirty = 0;
704         } else if (unlikely(atomic_long_read(&obd_dirty_pages) >
705                             (long)(obd_max_dirty_pages + 1))) {
706                 /* The atomic_read() allowing the atomic_inc() are
707                  * not covered by a lock thus they may safely race and trip
708                  * this CERROR() unless we add in a small fudge factor (+1). */
709                 CERROR("%s: dirty %ld > system dirty_max %ld\n",
710                        cli_name(cli), atomic_long_read(&obd_dirty_pages),
711                        obd_max_dirty_pages);
712                 oa->o_undirty = 0;
713         } else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
714                             0x7fffffff)) {
715                 CERROR("dirty %lu - dirty_max %lu too big???\n",
716                        cli->cl_dirty_pages, cli->cl_dirty_max_pages);
717                 oa->o_undirty = 0;
718         } else {
719                 unsigned long nrpages;
720                 unsigned long undirty;
721
722                 nrpages = cli->cl_max_pages_per_rpc;
723                 nrpages *= cli->cl_max_rpcs_in_flight + 1;
724                 nrpages = max(nrpages, cli->cl_dirty_max_pages);
725                 undirty = nrpages << PAGE_SHIFT;
726                 if (cli->cl_ocd_grant_param) {
727                         int nrextents;
728
729                         /* take extent tax into account when asking for more
730                          * grant space */
731                         nrextents = (nrpages + cli->cl_max_extent_pages - 1) /
732                                      cli->cl_max_extent_pages;
733                         undirty += nrextents * cli->cl_grant_extent_tax;
734                 }
735                 /* Do not ask for more than OBD_MAX_GRANT - a margin for server
736                  * to add extent tax, etc.
737                  */
738                 oa->o_undirty = min(undirty, OBD_MAX_GRANT &
739                                     ~(PTLRPC_MAX_BRW_SIZE * 4UL));
740         }
741         oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
742         /* o_dropped AKA o_misc is 32 bits, but cl_lost_grant is 64 bits */
743         if (cli->cl_lost_grant > INT_MAX) {
744                 CDEBUG(D_CACHE,
745                       "%s: avoided o_dropped overflow: cl_lost_grant %lu\n",
746                       cli_name(cli), cli->cl_lost_grant);
747                 oa->o_dropped = INT_MAX;
748         } else {
749                 oa->o_dropped = cli->cl_lost_grant;
750         }
751         cli->cl_lost_grant -= oa->o_dropped;
752         spin_unlock(&cli->cl_loi_list_lock);
753         CDEBUG(D_CACHE, "%s: dirty: %llu undirty: %u dropped %u grant: %llu"
754                " cl_lost_grant %lu\n", cli_name(cli), oa->o_dirty,
755                oa->o_undirty, oa->o_dropped, oa->o_grant, cli->cl_lost_grant);
756 }
757
758 void osc_update_next_shrink(struct client_obd *cli)
759 {
760         cli->cl_next_shrink_grant = ktime_get_seconds() +
761                                     cli->cl_grant_shrink_interval;
762
763         CDEBUG(D_CACHE, "next time %lld to shrink grant\n",
764                cli->cl_next_shrink_grant);
765 }
766
767 static void __osc_update_grant(struct client_obd *cli, u64 grant)
768 {
769         spin_lock(&cli->cl_loi_list_lock);
770         cli->cl_avail_grant += grant;
771         spin_unlock(&cli->cl_loi_list_lock);
772 }
773
774 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
775 {
776         if (body->oa.o_valid & OBD_MD_FLGRANT) {
777                 CDEBUG(D_CACHE, "got %llu extra grant\n", body->oa.o_grant);
778                 __osc_update_grant(cli, body->oa.o_grant);
779         }
780 }
781
782 /**
783  * grant thread data for shrinking space.
784  */
785 struct grant_thread_data {
786         struct list_head        gtd_clients;
787         struct mutex            gtd_mutex;
788         unsigned long           gtd_stopped:1;
789 };
790 static struct grant_thread_data client_gtd;
791
792 static int osc_shrink_grant_interpret(const struct lu_env *env,
793                                       struct ptlrpc_request *req,
794                                       void *args, int rc)
795 {
796         struct osc_grant_args *aa = args;
797         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
798         struct ost_body *body;
799
800         if (rc != 0) {
801                 __osc_update_grant(cli, aa->aa_oa->o_grant);
802                 GOTO(out, rc);
803         }
804
805         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
806         LASSERT(body);
807         osc_update_grant(cli, body);
808 out:
809         OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
810         aa->aa_oa = NULL;
811
812         return rc;
813 }
814
815 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
816 {
817         spin_lock(&cli->cl_loi_list_lock);
818         oa->o_grant = cli->cl_avail_grant / 4;
819         cli->cl_avail_grant -= oa->o_grant;
820         spin_unlock(&cli->cl_loi_list_lock);
821         if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
822                 oa->o_valid |= OBD_MD_FLFLAGS;
823                 oa->o_flags = 0;
824         }
825         oa->o_flags |= OBD_FL_SHRINK_GRANT;
826         osc_update_next_shrink(cli);
827 }
828
829 /* Shrink the current grant, either from some large amount to enough for a
830  * full set of in-flight RPCs, or if we have already shrunk to that limit
831  * then to enough for a single RPC.  This avoids keeping more grant than
832  * needed, and avoids shrinking the grant piecemeal. */
833 static int osc_shrink_grant(struct client_obd *cli)
834 {
835         __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
836                              (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
837
838         spin_lock(&cli->cl_loi_list_lock);
839         if (cli->cl_avail_grant <= target_bytes)
840                 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
841         spin_unlock(&cli->cl_loi_list_lock);
842
843         return osc_shrink_grant_to_target(cli, target_bytes);
844 }
845
846 int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
847 {
848         int                     rc = 0;
849         struct ost_body        *body;
850         ENTRY;
851
852         spin_lock(&cli->cl_loi_list_lock);
853         /* Don't shrink if we are already above or below the desired limit
854          * We don't want to shrink below a single RPC, as that will negatively
855          * impact block allocation and long-term performance. */
856         if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
857                 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
858
859         if (target_bytes >= cli->cl_avail_grant) {
860                 spin_unlock(&cli->cl_loi_list_lock);
861                 RETURN(0);
862         }
863         spin_unlock(&cli->cl_loi_list_lock);
864
865         OBD_ALLOC_PTR(body);
866         if (!body)
867                 RETURN(-ENOMEM);
868
869         osc_announce_cached(cli, &body->oa, 0);
870
871         spin_lock(&cli->cl_loi_list_lock);
872         if (target_bytes >= cli->cl_avail_grant) {
873                 /* available grant has changed since target calculation */
874                 spin_unlock(&cli->cl_loi_list_lock);
875                 GOTO(out_free, rc = 0);
876         }
877         body->oa.o_grant = cli->cl_avail_grant - target_bytes;
878         cli->cl_avail_grant = target_bytes;
879         spin_unlock(&cli->cl_loi_list_lock);
880         if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
881                 body->oa.o_valid |= OBD_MD_FLFLAGS;
882                 body->oa.o_flags = 0;
883         }
884         body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
885         osc_update_next_shrink(cli);
886
887         rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
888                                 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
889                                 sizeof(*body), body, NULL);
890         if (rc != 0)
891                 __osc_update_grant(cli, body->oa.o_grant);
892 out_free:
893         OBD_FREE_PTR(body);
894         RETURN(rc);
895 }
896
897 static int osc_should_shrink_grant(struct client_obd *client)
898 {
899         time64_t next_shrink = client->cl_next_shrink_grant;
900
901         if (client->cl_import == NULL)
902                 return 0;
903
904         if (!OCD_HAS_FLAG(&client->cl_import->imp_connect_data, GRANT_SHRINK) ||
905             client->cl_import->imp_grant_shrink_disabled) {
906                 osc_update_next_shrink(client);
907                 return 0;
908         }
909
910         if (ktime_get_seconds() >= next_shrink - 5) {
911                 /* Get the current RPC size directly, instead of going via:
912                  * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
913                  * Keep comment here so that it can be found by searching. */
914                 int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
915
916                 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
917                     client->cl_avail_grant > brw_size)
918                         return 1;
919                 else
920                         osc_update_next_shrink(client);
921         }
922         return 0;
923 }
924
925 #define GRANT_SHRINK_RPC_BATCH  100
926
927 static struct delayed_work work;
928
929 static void osc_grant_work_handler(struct work_struct *data)
930 {
931         struct client_obd *cli;
932         int rpc_sent;
933         bool init_next_shrink = true;
934         time64_t next_shrink = ktime_get_seconds() + GRANT_SHRINK_INTERVAL;
935
936         rpc_sent = 0;
937         mutex_lock(&client_gtd.gtd_mutex);
938         list_for_each_entry(cli, &client_gtd.gtd_clients,
939                             cl_grant_chain) {
940                 if (rpc_sent < GRANT_SHRINK_RPC_BATCH &&
941                     osc_should_shrink_grant(cli)) {
942                         osc_shrink_grant(cli);
943                         rpc_sent++;
944                 }
945
946                 if (!init_next_shrink) {
947                         if (cli->cl_next_shrink_grant < next_shrink &&
948                             cli->cl_next_shrink_grant > ktime_get_seconds())
949                                 next_shrink = cli->cl_next_shrink_grant;
950                 } else {
951                         init_next_shrink = false;
952                         next_shrink = cli->cl_next_shrink_grant;
953                 }
954         }
955         mutex_unlock(&client_gtd.gtd_mutex);
956
957         if (client_gtd.gtd_stopped == 1)
958                 return;
959
960         if (next_shrink > ktime_get_seconds()) {
961                 time64_t delay = next_shrink - ktime_get_seconds();
962
963                 schedule_delayed_work(&work, cfs_time_seconds(delay));
964         } else {
965                 schedule_work(&work.work);
966         }
967 }
968
969 void osc_schedule_grant_work(void)
970 {
971         cancel_delayed_work_sync(&work);
972         schedule_work(&work.work);
973 }
974
975 /**
976  * Start grant thread for returing grant to server for idle clients.
977  */
978 static int osc_start_grant_work(void)
979 {
980         client_gtd.gtd_stopped = 0;
981         mutex_init(&client_gtd.gtd_mutex);
982         INIT_LIST_HEAD(&client_gtd.gtd_clients);
983
984         INIT_DELAYED_WORK(&work, osc_grant_work_handler);
985         schedule_work(&work.work);
986
987         return 0;
988 }
989
990 static void osc_stop_grant_work(void)
991 {
992         client_gtd.gtd_stopped = 1;
993         cancel_delayed_work_sync(&work);
994 }
995
996 static void osc_add_grant_list(struct client_obd *client)
997 {
998         mutex_lock(&client_gtd.gtd_mutex);
999         list_add(&client->cl_grant_chain, &client_gtd.gtd_clients);
1000         mutex_unlock(&client_gtd.gtd_mutex);
1001 }
1002
1003 static void osc_del_grant_list(struct client_obd *client)
1004 {
1005         if (list_empty(&client->cl_grant_chain))
1006                 return;
1007
1008         mutex_lock(&client_gtd.gtd_mutex);
1009         list_del_init(&client->cl_grant_chain);
1010         mutex_unlock(&client_gtd.gtd_mutex);
1011 }
1012
1013 void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1014 {
1015         /*
1016          * ocd_grant is the total grant amount we're expect to hold: if we've
1017          * been evicted, it's the new avail_grant amount, cl_dirty_pages will
1018          * drop to 0 as inflight RPCs fail out; otherwise, it's avail_grant +
1019          * dirty.
1020          *
1021          * race is tolerable here: if we're evicted, but imp_state already
1022          * left EVICTED state, then cl_dirty_pages must be 0 already.
1023          */
1024         spin_lock(&cli->cl_loi_list_lock);
1025         cli->cl_avail_grant = ocd->ocd_grant;
1026         if (cli->cl_import->imp_state != LUSTRE_IMP_EVICTED) {
1027                 unsigned long consumed = cli->cl_reserved_grant;
1028
1029                 if (OCD_HAS_FLAG(ocd, GRANT_PARAM))
1030                         consumed += cli->cl_dirty_grant;
1031                 else
1032                         consumed += cli->cl_dirty_pages << PAGE_SHIFT;
1033                 if (cli->cl_avail_grant < consumed) {
1034                         CERROR("%s: granted %ld but already consumed %ld\n",
1035                                cli_name(cli), cli->cl_avail_grant, consumed);
1036                         cli->cl_avail_grant = 0;
1037                 } else {
1038                         cli->cl_avail_grant -= consumed;
1039                 }
1040         }
1041
1042         if (OCD_HAS_FLAG(ocd, GRANT_PARAM)) {
1043                 u64 size;
1044                 int chunk_mask;
1045
1046                 /* overhead for each extent insertion */
1047                 cli->cl_grant_extent_tax = ocd->ocd_grant_tax_kb << 10;
1048                 /* determine the appropriate chunk size used by osc_extent. */
1049                 cli->cl_chunkbits = max_t(int, PAGE_SHIFT,
1050                                           ocd->ocd_grant_blkbits);
1051                 /* max_pages_per_rpc must be chunk aligned */
1052                 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
1053                 cli->cl_max_pages_per_rpc = (cli->cl_max_pages_per_rpc +
1054                                              ~chunk_mask) & chunk_mask;
1055                 /* determine maximum extent size, in #pages */
1056                 size = (u64)ocd->ocd_grant_max_blks << ocd->ocd_grant_blkbits;
1057                 cli->cl_max_extent_pages = (size >> PAGE_SHIFT) ?: 1;
1058                 cli->cl_ocd_grant_param = 1;
1059         } else {
1060                 cli->cl_ocd_grant_param = 0;
1061                 cli->cl_grant_extent_tax = 0;
1062                 cli->cl_chunkbits = PAGE_SHIFT;
1063                 cli->cl_max_extent_pages = DT_MAX_BRW_PAGES;
1064         }
1065         spin_unlock(&cli->cl_loi_list_lock);
1066
1067         CDEBUG(D_CACHE,
1068                "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld. chunk bits: %d cl_max_extent_pages: %d\n",
1069                cli_name(cli),
1070                cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
1071                cli->cl_max_extent_pages);
1072
1073         if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
1074                 osc_add_grant_list(cli);
1075 }
1076 EXPORT_SYMBOL(osc_init_grant);
1077
1078 /* We assume that the reason this OSC got a short read is because it read
1079  * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1080  * via the LOV, and it _knows_ it's reading inside the file, it's just that
1081  * this stripe never got written at or beyond this stripe offset yet. */
1082 static void handle_short_read(int nob_read, size_t page_count,
1083                               struct brw_page **pga)
1084 {
1085         char *ptr;
1086         int i = 0;
1087
1088         /* skip bytes read OK */
1089         while (nob_read > 0) {
1090                 LASSERT (page_count > 0);
1091
1092                 if (pga[i]->count > nob_read) {
1093                         /* EOF inside this page */
1094                         ptr = kmap(pga[i]->pg) +
1095                                 (pga[i]->off & ~PAGE_MASK);
1096                         memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1097                         kunmap(pga[i]->pg);
1098                         page_count--;
1099                         i++;
1100                         break;
1101                 }
1102
1103                 nob_read -= pga[i]->count;
1104                 page_count--;
1105                 i++;
1106         }
1107
1108         /* zero remaining pages */
1109         while (page_count-- > 0) {
1110                 ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
1111                 memset(ptr, 0, pga[i]->count);
1112                 kunmap(pga[i]->pg);
1113                 i++;
1114         }
1115 }
1116
1117 static int check_write_rcs(struct ptlrpc_request *req,
1118                            int requested_nob, int niocount,
1119                            size_t page_count, struct brw_page **pga)
1120 {
1121         int     i;
1122         __u32   *remote_rcs;
1123
1124         remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1125                                                   sizeof(*remote_rcs) *
1126                                                   niocount);
1127         if (remote_rcs == NULL) {
1128                 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1129                 return(-EPROTO);
1130         }
1131
1132         /* return error if any niobuf was in error */
1133         for (i = 0; i < niocount; i++) {
1134                 if ((int)remote_rcs[i] < 0) {
1135                         CDEBUG(D_INFO, "rc[%d]: %d req %p\n",
1136                                i, remote_rcs[i], req);
1137                         return remote_rcs[i];
1138                 }
1139
1140                 if (remote_rcs[i] != 0) {
1141                         CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1142                                 i, remote_rcs[i], req);
1143                         return(-EPROTO);
1144                 }
1145         }
1146         if (req->rq_bulk != NULL &&
1147             req->rq_bulk->bd_nob_transferred != requested_nob) {
1148                 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1149                        req->rq_bulk->bd_nob_transferred, requested_nob);
1150                 return(-EPROTO);
1151         }
1152
1153         return (0);
1154 }
1155
1156 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1157 {
1158         if (p1->flag != p2->flag) {
1159                 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
1160                                   OBD_BRW_SYNC       | OBD_BRW_ASYNC   |
1161                                   OBD_BRW_NOQUOTA    | OBD_BRW_SOFT_SYNC);
1162
1163                 /* warn if we try to combine flags that we don't know to be
1164                  * safe to combine */
1165                 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1166                         CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
1167                               "report this at https://jira.whamcloud.com/\n",
1168                               p1->flag, p2->flag);
1169                 }
1170                 return 0;
1171         }
1172
1173         return (p1->off + p1->count == p2->off);
1174 }
1175
1176 #if IS_ENABLED(CONFIG_CRC_T10DIF)
1177 static int osc_checksum_bulk_t10pi(const char *obd_name, int nob,
1178                                    size_t pg_count, struct brw_page **pga,
1179                                    int opc, obd_dif_csum_fn *fn,
1180                                    int sector_size,
1181                                    u32 *check_sum, bool resend)
1182 {
1183         struct ahash_request *req;
1184         /* Used Adler as the default checksum type on top of DIF tags */
1185         unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
1186         struct page *__page;
1187         unsigned char *buffer;
1188         __u16 *guard_start;
1189         unsigned int bufsize;
1190         int guard_number;
1191         int used_number = 0;
1192         int used;
1193         u32 cksum;
1194         int rc = 0;
1195         int i = 0;
1196
1197         LASSERT(pg_count > 0);
1198
1199         __page = alloc_page(GFP_KERNEL);
1200         if (__page == NULL)
1201                 return -ENOMEM;
1202
1203         req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1204         if (IS_ERR(req)) {
1205                 rc = PTR_ERR(req);
1206                 CERROR("%s: unable to initialize checksum hash %s: rc = %d\n",
1207                        obd_name, cfs_crypto_hash_name(cfs_alg), rc);
1208                 GOTO(out, rc);
1209         }
1210
1211         buffer = kmap(__page);
1212         guard_start = (__u16 *)buffer;
1213         guard_number = PAGE_SIZE / sizeof(*guard_start);
1214         CDEBUG(D_PAGE | (resend ? D_HA : 0),
1215                "GRD tags per page=%u, resend=%u, bytes=%u, pages=%zu\n",
1216                guard_number, resend, nob, pg_count);
1217
1218         while (nob > 0 && pg_count > 0) {
1219                 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1220
1221                 /* corrupt the data before we compute the checksum, to
1222                  * simulate an OST->client data error */
1223                 if (unlikely(i == 0 && opc == OST_READ &&
1224                              OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))) {
1225                         unsigned char *ptr = kmap(pga[i]->pg);
1226                         int off = pga[i]->off & ~PAGE_MASK;
1227
1228                         memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1229                         kunmap(pga[i]->pg);
1230                 }
1231
1232                 /*
1233                  * The left guard number should be able to hold checksums of a
1234                  * whole page
1235                  */
1236                 rc = obd_page_dif_generate_buffer(obd_name, pga[i]->pg,
1237                                                   pga[i]->off & ~PAGE_MASK,
1238                                                   count,
1239                                                   guard_start + used_number,
1240                                                   guard_number - used_number,
1241                                                   &used, sector_size,
1242                                                   fn);
1243                 if (unlikely(resend))
1244                         CDEBUG(D_PAGE | D_HA,
1245                                "pga[%u]: used %u off %llu+%u gen checksum: %*phN\n",
1246                                i, used, pga[i]->off & ~PAGE_MASK, count,
1247                                (int)(used * sizeof(*guard_start)),
1248                                guard_start + used_number);
1249                 if (rc)
1250                         break;
1251
1252                 used_number += used;
1253                 if (used_number == guard_number) {
1254                         cfs_crypto_hash_update_page(req, __page, 0,
1255                                 used_number * sizeof(*guard_start));
1256                         used_number = 0;
1257                 }
1258
1259                 nob -= pga[i]->count;
1260                 pg_count--;
1261                 i++;
1262         }
1263         kunmap(__page);
1264         if (rc)
1265                 GOTO(out, rc);
1266
1267         if (used_number != 0)
1268                 cfs_crypto_hash_update_page(req, __page, 0,
1269                         used_number * sizeof(*guard_start));
1270
1271         bufsize = sizeof(cksum);
1272         cfs_crypto_hash_final(req, (unsigned char *)&cksum, &bufsize);
1273
1274         /* For sending we only compute the wrong checksum instead
1275          * of corrupting the data so it is still correct on a redo */
1276         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1277                 cksum++;
1278
1279         *check_sum = cksum;
1280 out:
1281         __free_page(__page);
1282         return rc;
1283 }
1284 #else /* !CONFIG_CRC_T10DIF */
1285 #define obd_dif_ip_fn NULL
1286 #define obd_dif_crc_fn NULL
1287 #define osc_checksum_bulk_t10pi(name, nob, pgc, pga, opc, fn, ssize, csum, re) \
1288         -EOPNOTSUPP
1289 #endif /* CONFIG_CRC_T10DIF */
1290
1291 static int osc_checksum_bulk(int nob, size_t pg_count,
1292                              struct brw_page **pga, int opc,
1293                              enum cksum_types cksum_type,
1294                              u32 *cksum)
1295 {
1296         int                             i = 0;
1297         struct ahash_request           *req;
1298         unsigned int                    bufsize;
1299         unsigned char                   cfs_alg = cksum_obd2cfs(cksum_type);
1300
1301         LASSERT(pg_count > 0);
1302
1303         req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1304         if (IS_ERR(req)) {
1305                 CERROR("Unable to initialize checksum hash %s\n",
1306                        cfs_crypto_hash_name(cfs_alg));
1307                 return PTR_ERR(req);
1308         }
1309
1310         while (nob > 0 && pg_count > 0) {
1311                 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1312
1313                 /* corrupt the data before we compute the checksum, to
1314                  * simulate an OST->client data error */
1315                 if (i == 0 && opc == OST_READ &&
1316                     OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
1317                         unsigned char *ptr = kmap(pga[i]->pg);
1318                         int off = pga[i]->off & ~PAGE_MASK;
1319
1320                         memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1321                         kunmap(pga[i]->pg);
1322                 }
1323                 cfs_crypto_hash_update_page(req, pga[i]->pg,
1324                                             pga[i]->off & ~PAGE_MASK,
1325                                             count);
1326                 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
1327                                (int)(pga[i]->off & ~PAGE_MASK));
1328
1329                 nob -= pga[i]->count;
1330                 pg_count--;
1331                 i++;
1332         }
1333
1334         bufsize = sizeof(*cksum);
1335         cfs_crypto_hash_final(req, (unsigned char *)cksum, &bufsize);
1336
1337         /* For sending we only compute the wrong checksum instead
1338          * of corrupting the data so it is still correct on a redo */
1339         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1340                 (*cksum)++;
1341
1342         return 0;
1343 }
1344
1345 static int osc_checksum_bulk_rw(const char *obd_name,
1346                                 enum cksum_types cksum_type,
1347                                 int nob, size_t pg_count,
1348                                 struct brw_page **pga, int opc,
1349                                 u32 *check_sum, bool resend)
1350 {
1351         obd_dif_csum_fn *fn = NULL;
1352         int sector_size = 0;
1353         int rc;
1354
1355         ENTRY;
1356         obd_t10_cksum2dif(cksum_type, &fn, &sector_size);
1357
1358         if (fn)
1359                 rc = osc_checksum_bulk_t10pi(obd_name, nob, pg_count, pga,
1360                                              opc, fn, sector_size, check_sum,
1361                                              resend);
1362         else
1363                 rc = osc_checksum_bulk(nob, pg_count, pga, opc, cksum_type,
1364                                        check_sum);
1365
1366         RETURN(rc);
1367 }
1368
1369 static inline void osc_release_bounce_pages(struct brw_page **pga,
1370                                             u32 page_count)
1371 {
1372 #ifdef HAVE_LUSTRE_CRYPTO
1373         int i;
1374
1375         for (i = 0; i < page_count; i++) {
1376                 /* Bounce pages allocated by a call to
1377                  * llcrypt_encrypt_pagecache_blocks() in osc_brw_prep_request()
1378                  * are identified thanks to the PageChecked flag.
1379                  */
1380                 if (PageChecked(pga[i]->pg))
1381                         llcrypt_finalize_bounce_page(&pga[i]->pg);
1382                 pga[i]->count -= pga[i]->bp_count_diff;
1383                 pga[i]->off += pga[i]->bp_off_diff;
1384         }
1385 #endif
1386 }
1387
1388 static int
1389 osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa,
1390                      u32 page_count, struct brw_page **pga,
1391                      struct ptlrpc_request **reqp, int resend)
1392 {
1393         struct ptlrpc_request *req;
1394         struct ptlrpc_bulk_desc *desc;
1395         struct ost_body *body;
1396         struct obd_ioobj *ioobj;
1397         struct niobuf_remote *niobuf;
1398         int niocount, i, requested_nob, opc, rc, short_io_size = 0;
1399         struct osc_brw_async_args *aa;
1400         struct req_capsule *pill;
1401         struct brw_page *pg_prev;
1402         void *short_io_buf;
1403         const char *obd_name = cli->cl_import->imp_obd->obd_name;
1404         struct inode *inode = NULL;
1405         bool directio = false;
1406         bool enable_checksum = true;
1407
1408         ENTRY;
1409         if (pga[0]->pg) {
1410                 inode = page2inode(pga[0]->pg);
1411                 if (inode == NULL) {
1412                         /* Try to get reference to inode from cl_page if we are
1413                          * dealing with direct IO, as handled pages are not
1414                          * actual page cache pages.
1415                          */
1416                         struct osc_async_page *oap = brw_page2oap(pga[0]);
1417                         struct cl_page *clpage = oap2cl_page(oap);
1418
1419                         inode = clpage->cp_inode;
1420                         if (inode)
1421                                 directio = true;
1422                 }
1423         }
1424         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1425                 RETURN(-ENOMEM); /* Recoverable */
1426         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1427                 RETURN(-EINVAL); /* Fatal */
1428
1429         if ((cmd & OBD_BRW_WRITE) != 0) {
1430                 opc = OST_WRITE;
1431                 req = ptlrpc_request_alloc_pool(cli->cl_import,
1432                                                 osc_rq_pool,
1433                                                 &RQF_OST_BRW_WRITE);
1434         } else {
1435                 opc = OST_READ;
1436                 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1437         }
1438         if (req == NULL)
1439                 RETURN(-ENOMEM);
1440
1441         if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode)) {
1442                 for (i = 0; i < page_count; i++) {
1443                         struct brw_page *pg = pga[i];
1444                         struct page *data_page = NULL;
1445                         bool retried = false;
1446                         bool lockedbymyself;
1447                         u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
1448                         struct address_space *map_orig = NULL;
1449                         pgoff_t index_orig;
1450
1451 retry_encrypt:
1452                         if (nunits & ~LUSTRE_ENCRYPTION_MASK)
1453                                 nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
1454                                         LUSTRE_ENCRYPTION_UNIT_SIZE;
1455                         /* The page can already be locked when we arrive here.
1456                          * This is possible when cl_page_assume/vvp_page_assume
1457                          * is stuck on wait_on_page_writeback with page lock
1458                          * held. In this case there is no risk for the lock to
1459                          * be released while we are doing our encryption
1460                          * processing, because writeback against that page will
1461                          * end in vvp_page_completion_write/cl_page_completion,
1462                          * which means only once the page is fully processed.
1463                          */
1464                         lockedbymyself = trylock_page(pg->pg);
1465                         if (directio) {
1466                                 map_orig = pg->pg->mapping;
1467                                 pg->pg->mapping = inode->i_mapping;
1468                                 index_orig = pg->pg->index;
1469                                 pg->pg->index = pg->off >> PAGE_SHIFT;
1470                         }
1471                         data_page =
1472                                 llcrypt_encrypt_pagecache_blocks(pg->pg,
1473                                                                  nunits, 0,
1474                                                                  GFP_NOFS);
1475                         if (directio) {
1476                                 pg->pg->mapping = map_orig;
1477                                 pg->pg->index = index_orig;
1478                         }
1479                         if (lockedbymyself)
1480                                 unlock_page(pg->pg);
1481                         if (IS_ERR(data_page)) {
1482                                 rc = PTR_ERR(data_page);
1483                                 if (rc == -ENOMEM && !retried) {
1484                                         retried = true;
1485                                         rc = 0;
1486                                         goto retry_encrypt;
1487                                 }
1488                                 ptlrpc_request_free(req);
1489                                 RETURN(rc);
1490                         }
1491                         /* Set PageChecked flag on bounce page for
1492                          * disambiguation in osc_release_bounce_pages().
1493                          */
1494                         SetPageChecked(data_page);
1495                         pg->pg = data_page;
1496                         /* there should be no gap in the middle of page array */
1497                         if (i == page_count - 1) {
1498                                 struct osc_async_page *oap = brw_page2oap(pg);
1499
1500                                 oa->o_size = oap->oap_count +
1501                                         oap->oap_obj_off + oap->oap_page_off;
1502                         }
1503                         /* len is forced to nunits, and relative offset to 0
1504                          * so store the old, clear text info
1505                          */
1506                         pg->bp_count_diff = nunits - pg->count;
1507                         pg->count = nunits;
1508                         pg->bp_off_diff = pg->off & ~PAGE_MASK;
1509                         pg->off = pg->off & PAGE_MASK;
1510                 }
1511         } else if (opc == OST_READ && inode && IS_ENCRYPTED(inode)) {
1512                 for (i = 0; i < page_count; i++) {
1513                         struct brw_page *pg = pga[i];
1514                         u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
1515
1516                         if (nunits & ~LUSTRE_ENCRYPTION_MASK)
1517                                 nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
1518                                         LUSTRE_ENCRYPTION_UNIT_SIZE;
1519                         /* count/off are forced to cover the whole encryption
1520                          * unit size so that all encrypted data is stored on the
1521                          * OST, so adjust bp_{count,off}_diff for the size of
1522                          * the clear text.
1523                          */
1524                         pg->bp_count_diff = nunits - pg->count;
1525                         pg->count = nunits;
1526                         pg->bp_off_diff = pg->off & ~PAGE_MASK;
1527                         pg->off = pg->off & PAGE_MASK;
1528                 }
1529         }
1530
1531         for (niocount = i = 1; i < page_count; i++) {
1532                 if (!can_merge_pages(pga[i - 1], pga[i]))
1533                         niocount++;
1534         }
1535
1536         pill = &req->rq_pill;
1537         req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1538                              sizeof(*ioobj));
1539         req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1540                              niocount * sizeof(*niobuf));
1541
1542         for (i = 0; i < page_count; i++) {
1543                 short_io_size += pga[i]->count;
1544                 if (!inode || !IS_ENCRYPTED(inode)) {
1545                         pga[i]->bp_count_diff = 0;
1546                         pga[i]->bp_off_diff = 0;
1547                 }
1548         }
1549
1550         if (lnet_is_rdma_only_page(pga[0]->pg)) {
1551                 enable_checksum = false;
1552                 short_io_size = 0;
1553         }
1554
1555         /* Check if read/write is small enough to be a short io. */
1556         if (short_io_size > cli->cl_max_short_io_bytes || niocount > 1 ||
1557             !imp_connect_shortio(cli->cl_import))
1558                 short_io_size = 0;
1559
1560         /* If this is an empty RPC to old server, just ignore it */
1561         if (!short_io_size && !pga[0]->pg) {
1562                 ptlrpc_request_free(req);
1563                 RETURN(-ENODATA);
1564         }
1565
1566         req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_CLIENT,
1567                              opc == OST_READ ? 0 : short_io_size);
1568         if (opc == OST_READ)
1569                 req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_SERVER,
1570                                      short_io_size);
1571
1572         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1573         if (rc) {
1574                 ptlrpc_request_free(req);
1575                 RETURN(rc);
1576         }
1577         osc_set_io_portal(req);
1578
1579         ptlrpc_at_set_req_timeout(req);
1580         /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
1581          * retry logic */
1582         req->rq_no_retry_einprogress = 1;
1583
1584         if (short_io_size != 0) {
1585                 desc = NULL;
1586                 short_io_buf = NULL;
1587                 goto no_bulk;
1588         }
1589
1590         desc = ptlrpc_prep_bulk_imp(req, page_count,
1591                 cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
1592                 (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
1593                         PTLRPC_BULK_PUT_SINK),
1594                 OST_BULK_PORTAL,
1595                 &ptlrpc_bulk_kiov_pin_ops);
1596
1597         if (desc == NULL)
1598                 GOTO(out, rc = -ENOMEM);
1599         /* NB request now owns desc and will free it when it gets freed */
1600 no_bulk:
1601         body = req_capsule_client_get(pill, &RMF_OST_BODY);
1602         ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1603         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1604         LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1605
1606         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1607
1608         /* For READ and WRITE, we can't fill o_uid and o_gid using from_kuid()
1609          * and from_kgid(), because they are asynchronous. Fortunately, variable
1610          * oa contains valid o_uid and o_gid in these two operations.
1611          * Besides, filling o_uid and o_gid is enough for nrs-tbf, see LU-9658.
1612          * OBD_MD_FLUID and OBD_MD_FLUID is not set in order to avoid breaking
1613          * other process logic */
1614         body->oa.o_uid = oa->o_uid;
1615         body->oa.o_gid = oa->o_gid;
1616
1617         obdo_to_ioobj(oa, ioobj);
1618         ioobj->ioo_bufcnt = niocount;
1619         /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
1620          * that might be send for this request.  The actual number is decided
1621          * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
1622          * "max - 1" for old client compatibility sending "0", and also so the
1623          * the actual maximum is a power-of-two number, not one less. LU-1431 */
1624         if (desc != NULL)
1625                 ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
1626         else /* short io */
1627                 ioobj_max_brw_set(ioobj, 0);
1628
1629         if (short_io_size != 0) {
1630                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1631                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1632                         body->oa.o_flags = 0;
1633                 }
1634                 body->oa.o_flags |= OBD_FL_SHORT_IO;
1635                 CDEBUG(D_CACHE, "Using short io for data transfer, size = %d\n",
1636                        short_io_size);
1637                 if (opc == OST_WRITE) {
1638                         short_io_buf = req_capsule_client_get(pill,
1639                                                               &RMF_SHORT_IO);
1640                         LASSERT(short_io_buf != NULL);
1641                 }
1642         }
1643
1644         LASSERT(page_count > 0);
1645         pg_prev = pga[0];
1646         for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1647                 struct brw_page *pg = pga[i];
1648                 int poff = pg->off & ~PAGE_MASK;
1649
1650                 LASSERT(pg->count > 0);
1651                 /* make sure there is no gap in the middle of page array */
1652                 LASSERTF(page_count == 1 ||
1653                          (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
1654                           ergo(i > 0 && i < page_count - 1,
1655                                poff == 0 && pg->count == PAGE_SIZE)   &&
1656                           ergo(i == page_count - 1, poff == 0)),
1657                          "i: %d/%d pg: %p off: %llu, count: %u\n",
1658                          i, page_count, pg, pg->off, pg->count);
1659                 LASSERTF(i == 0 || pg->off > pg_prev->off,
1660                          "i %d p_c %u pg %p [pri %lu ind %lu] off %llu"
1661                          " prev_pg %p [pri %lu ind %lu] off %llu\n",
1662                          i, page_count,
1663                          pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1664                          pg_prev->pg, page_private(pg_prev->pg),
1665                          pg_prev->pg->index, pg_prev->off);
1666                 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1667                         (pg->flag & OBD_BRW_SRVLOCK));
1668                 if (short_io_size != 0 && opc == OST_WRITE) {
1669                         unsigned char *ptr = kmap_atomic(pg->pg);
1670
1671                         LASSERT(short_io_size >= requested_nob + pg->count);
1672                         memcpy(short_io_buf + requested_nob,
1673                                ptr + poff,
1674                                pg->count);
1675                         kunmap_atomic(ptr);
1676                 } else if (short_io_size == 0) {
1677                         desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff,
1678                                                          pg->count);
1679                 }
1680                 requested_nob += pg->count;
1681
1682                 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1683                         niobuf--;
1684                         niobuf->rnb_len += pg->count;
1685                 } else {
1686                         niobuf->rnb_offset = pg->off;
1687                         niobuf->rnb_len    = pg->count;
1688                         niobuf->rnb_flags  = pg->flag;
1689                 }
1690                 pg_prev = pg;
1691         }
1692
1693         LASSERTF((void *)(niobuf - niocount) ==
1694                 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1695                 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1696                 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1697
1698         osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1699         if (resend) {
1700                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1701                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1702                         body->oa.o_flags = 0;
1703                 }
1704                 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1705         }
1706
1707         if (osc_should_shrink_grant(cli))
1708                 osc_shrink_grant_local(cli, &body->oa);
1709
1710         if (!cli->cl_checksum || sptlrpc_flavor_has_bulk(&req->rq_flvr))
1711                 enable_checksum = false;
1712
1713         /* size[REQ_REC_OFF] still sizeof (*body) */
1714         if (opc == OST_WRITE) {
1715                 if (enable_checksum) {
1716                         /* store cl_cksum_type in a local variable since
1717                          * it can be changed via lprocfs */
1718                         enum cksum_types cksum_type = cli->cl_cksum_type;
1719
1720                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1721                                 body->oa.o_flags = 0;
1722
1723                         body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1724                                                                 cksum_type);
1725                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1726
1727                         rc = osc_checksum_bulk_rw(obd_name, cksum_type,
1728                                                   requested_nob, page_count,
1729                                                   pga, OST_WRITE,
1730                                                   &body->oa.o_cksum, resend);
1731                         if (rc < 0) {
1732                                 CDEBUG(D_PAGE, "failed to checksum: rc = %d\n",
1733                                        rc);
1734                                 GOTO(out, rc);
1735                         }
1736                         CDEBUG(D_PAGE | (resend ? D_HA : 0),
1737                                "checksum at write origin: %x (%x)\n",
1738                                body->oa.o_cksum, cksum_type);
1739
1740                         /* save this in 'oa', too, for later checking */
1741                         oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1742                         oa->o_flags |= obd_cksum_type_pack(obd_name,
1743                                                            cksum_type);
1744                 } else {
1745                         /* clear out the checksum flag, in case this is a
1746                          * resend but cl_checksum is no longer set. b=11238 */
1747                         oa->o_valid &= ~OBD_MD_FLCKSUM;
1748                 }
1749                 oa->o_cksum = body->oa.o_cksum;
1750                 /* 1 RC per niobuf */
1751                 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1752                                      sizeof(__u32) * niocount);
1753         } else {
1754                 if (enable_checksum) {
1755                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1756                                 body->oa.o_flags = 0;
1757                         body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1758                                 cli->cl_cksum_type);
1759                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1760                 }
1761
1762                 /* Client cksum has been already copied to wire obdo in previous
1763                  * lustre_set_wire_obdo(), and in the case a bulk-read is being
1764                  * resent due to cksum error, this will allow Server to
1765                  * check+dump pages on its side */
1766         }
1767         ptlrpc_request_set_replen(req);
1768
1769         aa = ptlrpc_req_async_args(aa, req);
1770         aa->aa_oa = oa;
1771         aa->aa_requested_nob = requested_nob;
1772         aa->aa_nio_count = niocount;
1773         aa->aa_page_count = page_count;
1774         aa->aa_resends = 0;
1775         aa->aa_ppga = pga;
1776         aa->aa_cli = cli;
1777         INIT_LIST_HEAD(&aa->aa_oaps);
1778
1779         *reqp = req;
1780         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1781         CDEBUG(D_RPCTRACE, "brw rpc %p - object "DOSTID" offset %lld<>%lld\n",
1782                 req, POSTID(&oa->o_oi), niobuf[0].rnb_offset,
1783                 niobuf[niocount - 1].rnb_offset + niobuf[niocount - 1].rnb_len);
1784         RETURN(0);
1785
1786  out:
1787         ptlrpc_req_finished(req);
1788         RETURN(rc);
1789 }
1790
1791 char dbgcksum_file_name[PATH_MAX];
1792
1793 static void dump_all_bulk_pages(struct obdo *oa, __u32 page_count,
1794                                 struct brw_page **pga, __u32 server_cksum,
1795                                 __u32 client_cksum)
1796 {
1797         struct file *filp;
1798         int rc, i;
1799         unsigned int len;
1800         char *buf;
1801
1802         /* will only keep dump of pages on first error for the same range in
1803          * file/fid, not during the resends/retries. */
1804         snprintf(dbgcksum_file_name, sizeof(dbgcksum_file_name),
1805                  "%s-checksum_dump-osc-"DFID":[%llu-%llu]-%x-%x",
1806                  (strncmp(libcfs_debug_file_path, "NONE", 4) != 0 ?
1807                   libcfs_debug_file_path : LIBCFS_DEBUG_FILE_PATH_DEFAULT),
1808                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : 0ULL,
1809                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1810                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1811                  pga[0]->off,
1812                  pga[page_count-1]->off + pga[page_count-1]->count - 1,
1813                  client_cksum, server_cksum);
1814         CWARN("dumping checksum data to %s\n", dbgcksum_file_name);
1815         filp = filp_open(dbgcksum_file_name,
1816                          O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, 0600);
1817         if (IS_ERR(filp)) {
1818                 rc = PTR_ERR(filp);
1819                 if (rc == -EEXIST)
1820                         CDEBUG(D_INFO, "%s: can't open to dump pages with "
1821                                "checksum error: rc = %d\n", dbgcksum_file_name,
1822                                rc);
1823                 else
1824                         CERROR("%s: can't open to dump pages with checksum "
1825                                "error: rc = %d\n", dbgcksum_file_name, rc);
1826                 return;
1827         }
1828
1829         for (i = 0; i < page_count; i++) {
1830                 len = pga[i]->count;
1831                 buf = kmap(pga[i]->pg);
1832                 while (len != 0) {
1833                         rc = cfs_kernel_write(filp, buf, len, &filp->f_pos);
1834                         if (rc < 0) {
1835                                 CERROR("%s: wanted to write %u but got %d "
1836                                        "error\n", dbgcksum_file_name, len, rc);
1837                                 break;
1838                         }
1839                         len -= rc;
1840                         buf += rc;
1841                 }
1842                 kunmap(pga[i]->pg);
1843         }
1844
1845         rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
1846         if (rc)
1847                 CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
1848         filp_close(filp, NULL);
1849
1850         libcfs_debug_dumplog();
1851 }
1852
1853 static int
1854 check_write_checksum(struct obdo *oa, const struct lnet_process_id *peer,
1855                      __u32 client_cksum, __u32 server_cksum,
1856                      struct osc_brw_async_args *aa)
1857 {
1858         const char *obd_name = aa->aa_cli->cl_import->imp_obd->obd_name;
1859         enum cksum_types cksum_type;
1860         obd_dif_csum_fn *fn = NULL;
1861         int sector_size = 0;
1862         __u32 new_cksum;
1863         char *msg;
1864         int rc;
1865
1866         if (server_cksum == client_cksum) {
1867                 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1868                 return 0;
1869         }
1870
1871         if (aa->aa_cli->cl_checksum_dump)
1872                 dump_all_bulk_pages(oa, aa->aa_page_count, aa->aa_ppga,
1873                                     server_cksum, client_cksum);
1874
1875         cksum_type = obd_cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1876                                            oa->o_flags : 0);
1877
1878         switch (cksum_type) {
1879         case OBD_CKSUM_T10IP512:
1880                 fn = obd_dif_ip_fn;
1881                 sector_size = 512;
1882                 break;
1883         case OBD_CKSUM_T10IP4K:
1884                 fn = obd_dif_ip_fn;
1885                 sector_size = 4096;
1886                 break;
1887         case OBD_CKSUM_T10CRC512:
1888                 fn = obd_dif_crc_fn;
1889                 sector_size = 512;
1890                 break;
1891         case OBD_CKSUM_T10CRC4K:
1892                 fn = obd_dif_crc_fn;
1893                 sector_size = 4096;
1894                 break;
1895         default:
1896                 break;
1897         }
1898
1899         if (fn)
1900                 rc = osc_checksum_bulk_t10pi(obd_name, aa->aa_requested_nob,
1901                                              aa->aa_page_count, aa->aa_ppga,
1902                                              OST_WRITE, fn, sector_size,
1903                                              &new_cksum, true);
1904         else
1905                 rc = osc_checksum_bulk(aa->aa_requested_nob, aa->aa_page_count,
1906                                        aa->aa_ppga, OST_WRITE, cksum_type,
1907                                        &new_cksum);
1908
1909         if (rc < 0)
1910                 msg = "failed to calculate the client write checksum";
1911         else if (cksum_type != obd_cksum_type_unpack(aa->aa_oa->o_flags))
1912                 msg = "the server did not use the checksum type specified in "
1913                       "the original request - likely a protocol problem";
1914         else if (new_cksum == server_cksum)
1915                 msg = "changed on the client after we checksummed it - "
1916                       "likely false positive due to mmap IO (bug 11742)";
1917         else if (new_cksum == client_cksum)
1918                 msg = "changed in transit before arrival at OST";
1919         else
1920                 msg = "changed in transit AND doesn't match the original - "
1921                       "likely false positive due to mmap IO (bug 11742)";
1922
1923         LCONSOLE_ERROR_MSG(0x132, "%s: BAD WRITE CHECKSUM: %s: from %s inode "
1924                            DFID " object "DOSTID" extent [%llu-%llu], original "
1925                            "client csum %x (type %x), server csum %x (type %x),"
1926                            " client csum now %x\n",
1927                            obd_name, msg, libcfs_nid2str(peer->nid),
1928                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1929                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1930                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1931                            POSTID(&oa->o_oi), aa->aa_ppga[0]->off,
1932                            aa->aa_ppga[aa->aa_page_count - 1]->off +
1933                                 aa->aa_ppga[aa->aa_page_count-1]->count - 1,
1934                            client_cksum,
1935                            obd_cksum_type_unpack(aa->aa_oa->o_flags),
1936                            server_cksum, cksum_type, new_cksum);
1937         return 1;
1938 }
1939
1940 /* Note rc enters this function as number of bytes transferred */
1941 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1942 {
1943         struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1944         struct client_obd *cli = aa->aa_cli;
1945         const char *obd_name = cli->cl_import->imp_obd->obd_name;
1946         const struct lnet_process_id *peer =
1947                 &req->rq_import->imp_connection->c_peer;
1948         struct ost_body *body;
1949         u32 client_cksum = 0;
1950         struct inode *inode;
1951         unsigned int blockbits = 0, blocksize = 0;
1952
1953         ENTRY;
1954
1955         if (rc < 0 && rc != -EDQUOT) {
1956                 DEBUG_REQ(D_INFO, req, "Failed request: rc = %d", rc);
1957                 RETURN(rc);
1958         }
1959
1960         LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1961         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1962         if (body == NULL) {
1963                 DEBUG_REQ(D_INFO, req, "cannot unpack body");
1964                 RETURN(-EPROTO);
1965         }
1966
1967         /* set/clear over quota flag for a uid/gid/projid */
1968         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1969             body->oa.o_valid & (OBD_MD_FLALLQUOTA)) {
1970                 unsigned qid[LL_MAXQUOTAS] = {
1971                                          body->oa.o_uid, body->oa.o_gid,
1972                                          body->oa.o_projid };
1973                 CDEBUG(D_QUOTA,
1974                        "setdq for [%u %u %u] with valid %#llx, flags %x\n",
1975                        body->oa.o_uid, body->oa.o_gid, body->oa.o_projid,
1976                        body->oa.o_valid, body->oa.o_flags);
1977                        osc_quota_setdq(cli, req->rq_xid, qid, body->oa.o_valid,
1978                                        body->oa.o_flags);
1979         }
1980
1981         osc_update_grant(cli, body);
1982
1983         if (rc < 0)
1984                 RETURN(rc);
1985
1986         if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1987                 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1988
1989         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1990                 if (rc > 0) {
1991                         CERROR("%s: unexpected positive size %d\n",
1992                                obd_name, rc);
1993                         RETURN(-EPROTO);
1994                 }
1995
1996                 if (req->rq_bulk != NULL &&
1997                     sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
1998                         RETURN(-EAGAIN);
1999
2000                 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
2001                     check_write_checksum(&body->oa, peer, client_cksum,
2002                                          body->oa.o_cksum, aa))
2003                         RETURN(-EAGAIN);
2004
2005                 rc = check_write_rcs(req, aa->aa_requested_nob,
2006                                      aa->aa_nio_count, aa->aa_page_count,
2007                                      aa->aa_ppga);
2008                 GOTO(out, rc);
2009         }
2010
2011         /* The rest of this function executes only for OST_READs */
2012
2013         if (req->rq_bulk == NULL) {
2014                 rc = req_capsule_get_size(&req->rq_pill, &RMF_SHORT_IO,
2015                                           RCL_SERVER);
2016                 LASSERT(rc == req->rq_status);
2017         } else {
2018                 /* if unwrap_bulk failed, return -EAGAIN to retry */
2019                 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
2020         }
2021         if (rc < 0)
2022                 GOTO(out, rc = -EAGAIN);
2023
2024         if (rc > aa->aa_requested_nob) {
2025                 CERROR("%s: unexpected size %d, requested %d\n", obd_name,
2026                        rc, aa->aa_requested_nob);
2027                 RETURN(-EPROTO);
2028         }
2029
2030         if (req->rq_bulk != NULL && rc != req->rq_bulk->bd_nob_transferred) {
2031                 CERROR("%s: unexpected size %d, transferred %d\n", obd_name,
2032                        rc, req->rq_bulk->bd_nob_transferred);
2033                 RETURN(-EPROTO);
2034         }
2035
2036         if (req->rq_bulk == NULL) {
2037                 /* short io */
2038                 int nob, pg_count, i = 0;
2039                 unsigned char *buf;
2040
2041                 CDEBUG(D_CACHE, "Using short io read, size %d\n", rc);
2042                 pg_count = aa->aa_page_count;
2043                 buf = req_capsule_server_sized_get(&req->rq_pill, &RMF_SHORT_IO,
2044                                                    rc);
2045                 nob = rc;
2046                 while (nob > 0 && pg_count > 0) {
2047                         unsigned char *ptr;
2048                         int count = aa->aa_ppga[i]->count > nob ?
2049                                     nob : aa->aa_ppga[i]->count;
2050
2051                         CDEBUG(D_CACHE, "page %p count %d\n",
2052                                aa->aa_ppga[i]->pg, count);
2053                         ptr = kmap_atomic(aa->aa_ppga[i]->pg);
2054                         memcpy(ptr + (aa->aa_ppga[i]->off & ~PAGE_MASK), buf,
2055                                count);
2056                         kunmap_atomic((void *) ptr);
2057
2058                         buf += count;
2059                         nob -= count;
2060                         i++;
2061                         pg_count--;
2062                 }
2063         }
2064
2065         if (rc < aa->aa_requested_nob)
2066                 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
2067
2068         if (body->oa.o_valid & OBD_MD_FLCKSUM) {
2069                 static int cksum_counter;
2070                 u32 server_cksum = body->oa.o_cksum;
2071                 int nob = rc;
2072                 char *via = "";
2073                 char *router = "";
2074                 enum cksum_types cksum_type;
2075                 u32 o_flags = body->oa.o_valid & OBD_MD_FLFLAGS ?
2076                         body->oa.o_flags : 0;
2077
2078                 cksum_type = obd_cksum_type_unpack(o_flags);
2079                 rc = osc_checksum_bulk_rw(obd_name, cksum_type, nob,
2080                                           aa->aa_page_count, aa->aa_ppga,
2081                                           OST_READ, &client_cksum, false);
2082                 if (rc < 0)
2083                         GOTO(out, rc);
2084
2085                 if (req->rq_bulk != NULL &&
2086                     peer->nid != req->rq_bulk->bd_sender) {
2087                         via = " via ";
2088                         router = libcfs_nid2str(req->rq_bulk->bd_sender);
2089                 }
2090
2091                 if (server_cksum != client_cksum) {
2092                         struct ost_body *clbody;
2093                         __u32 client_cksum2;
2094                         u32 page_count = aa->aa_page_count;
2095
2096                         osc_checksum_bulk_rw(obd_name, cksum_type, nob,
2097                                              page_count, aa->aa_ppga,
2098                                              OST_READ, &client_cksum2, true);
2099                         clbody = req_capsule_client_get(&req->rq_pill,
2100                                                         &RMF_OST_BODY);
2101                         if (cli->cl_checksum_dump)
2102                                 dump_all_bulk_pages(&clbody->oa, page_count,
2103                                                     aa->aa_ppga, server_cksum,
2104                                                     client_cksum);
2105
2106                         LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
2107                                            "%s%s%s inode "DFID" object "DOSTID
2108                                            " extent [%llu-%llu], client %x/%x, "
2109                                            "server %x, cksum_type %x\n",
2110                                            obd_name,
2111                                            libcfs_nid2str(peer->nid),
2112                                            via, router,
2113                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2114                                                 clbody->oa.o_parent_seq : 0ULL,
2115                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2116                                                 clbody->oa.o_parent_oid : 0,
2117                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2118                                                 clbody->oa.o_parent_ver : 0,
2119                                            POSTID(&body->oa.o_oi),
2120                                            aa->aa_ppga[0]->off,
2121                                            aa->aa_ppga[page_count-1]->off +
2122                                            aa->aa_ppga[page_count-1]->count - 1,
2123                                            client_cksum, client_cksum2,
2124                                            server_cksum, cksum_type);
2125                         cksum_counter = 0;
2126                         aa->aa_oa->o_cksum = client_cksum;
2127                         rc = -EAGAIN;
2128                 } else {
2129                         cksum_counter++;
2130                         CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
2131                         rc = 0;
2132                 }
2133         } else if (unlikely(client_cksum)) {
2134                 static int cksum_missed;
2135
2136                 cksum_missed++;
2137                 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
2138                         CERROR("%s: checksum %u requested from %s but not sent\n",
2139                                obd_name, cksum_missed,
2140                                libcfs_nid2str(peer->nid));
2141         } else {
2142                 rc = 0;
2143         }
2144
2145         inode = page2inode(aa->aa_ppga[0]->pg);
2146         if (inode == NULL) {
2147                 /* Try to get reference to inode from cl_page if we are
2148                  * dealing with direct IO, as handled pages are not
2149                  * actual page cache pages.
2150                  */
2151                 struct osc_async_page *oap = brw_page2oap(aa->aa_ppga[0]);
2152
2153                 inode = oap2cl_page(oap)->cp_inode;
2154                 if (inode) {
2155                         blockbits = inode->i_blkbits;
2156                         blocksize = 1 << blockbits;
2157                 }
2158         }
2159         if (inode && IS_ENCRYPTED(inode)) {
2160                 int idx;
2161
2162                 if (!llcrypt_has_encryption_key(inode)) {
2163                         CDEBUG(D_SEC, "no enc key for ino %lu\n", inode->i_ino);
2164                         GOTO(out, rc);
2165                 }
2166                 for (idx = 0; idx < aa->aa_page_count; idx++) {
2167                         struct brw_page *pg = aa->aa_ppga[idx];
2168                         unsigned int offs = 0;
2169
2170                         while (offs < PAGE_SIZE) {
2171                                 /* do not decrypt if page is all 0s */
2172                                 if (memchr_inv(page_address(pg->pg) + offs, 0,
2173                                          LUSTRE_ENCRYPTION_UNIT_SIZE) == NULL) {
2174                                         /* if page is empty forward info to
2175                                          * upper layers (ll_io_zero_page) by
2176                                          * clearing PagePrivate2
2177                                          */
2178                                         if (!offs)
2179                                                 ClearPagePrivate2(pg->pg);
2180                                         break;
2181                                 }
2182
2183                                 if (blockbits) {
2184                                         /* This is direct IO case. Directly call
2185                                          * decrypt function that takes inode as
2186                                          * input parameter. Page does not need
2187                                          * to be locked.
2188                                          */
2189                                         u64 lblk_num =
2190                                                 ((u64)(pg->off >> PAGE_SHIFT) <<
2191                                                      (PAGE_SHIFT - blockbits)) +
2192                                                        (offs >> blockbits);
2193                                         unsigned int i;
2194
2195                                         for (i = offs;
2196                                              i < offs +
2197                                                     LUSTRE_ENCRYPTION_UNIT_SIZE;
2198                                              i += blocksize, lblk_num++) {
2199                                                 rc =
2200                                                   llcrypt_decrypt_block_inplace(
2201                                                           inode, pg->pg,
2202                                                           blocksize, i,
2203                                                           lblk_num);
2204                                                 if (rc)
2205                                                         break;
2206                                         }
2207                                 } else {
2208                                         rc = llcrypt_decrypt_pagecache_blocks(
2209                                                 pg->pg,
2210                                                 LUSTRE_ENCRYPTION_UNIT_SIZE,
2211                                                 offs);
2212                                 }
2213                                 if (rc)
2214                                         GOTO(out, rc);
2215
2216                                 offs += LUSTRE_ENCRYPTION_UNIT_SIZE;
2217                         }
2218                 }
2219         }
2220
2221 out:
2222         if (rc >= 0)
2223                 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
2224                                      aa->aa_oa, &body->oa);
2225
2226         RETURN(rc);
2227 }
2228
2229 static int osc_brw_redo_request(struct ptlrpc_request *request,
2230                                 struct osc_brw_async_args *aa, int rc)
2231 {
2232         struct ptlrpc_request *new_req;
2233         struct osc_brw_async_args *new_aa;
2234         struct osc_async_page *oap;
2235         ENTRY;
2236
2237         /* The below message is checked in replay-ost-single.sh test_8ae*/
2238         DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
2239                   "redo for recoverable error %d", rc);
2240
2241         rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
2242                                 OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
2243                                   aa->aa_cli, aa->aa_oa, aa->aa_page_count,
2244                                   aa->aa_ppga, &new_req, 1);
2245         if (rc)
2246                 RETURN(rc);
2247
2248         list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2249                 if (oap->oap_request != NULL) {
2250                         LASSERTF(request == oap->oap_request,
2251                                  "request %p != oap_request %p\n",
2252                                  request, oap->oap_request);
2253                 }
2254         }
2255         /*
2256          * New request takes over pga and oaps from old request.
2257          * Note that copying a list_head doesn't work, need to move it...
2258          */
2259         aa->aa_resends++;
2260         new_req->rq_interpret_reply = request->rq_interpret_reply;
2261         new_req->rq_async_args = request->rq_async_args;
2262         new_req->rq_commit_cb = request->rq_commit_cb;
2263         /* cap resend delay to the current request timeout, this is similar to
2264          * what ptlrpc does (see after_reply()) */
2265         if (aa->aa_resends > new_req->rq_timeout)
2266                 new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
2267         else
2268                 new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
2269         new_req->rq_generation_set = 1;
2270         new_req->rq_import_generation = request->rq_import_generation;
2271
2272         new_aa = ptlrpc_req_async_args(new_aa, new_req);
2273
2274         INIT_LIST_HEAD(&new_aa->aa_oaps);
2275         list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
2276         INIT_LIST_HEAD(&new_aa->aa_exts);
2277         list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
2278         new_aa->aa_resends = aa->aa_resends;
2279
2280         list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
2281                 if (oap->oap_request) {
2282                         ptlrpc_req_finished(oap->oap_request);
2283                         oap->oap_request = ptlrpc_request_addref(new_req);
2284                 }
2285         }
2286
2287         /* XXX: This code will run into problem if we're going to support
2288          * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
2289          * and wait for all of them to be finished. We should inherit request
2290          * set from old request. */
2291         ptlrpcd_add_req(new_req);
2292
2293         DEBUG_REQ(D_INFO, new_req, "new request");
2294         RETURN(0);
2295 }
2296
2297 /*
2298  * ugh, we want disk allocation on the target to happen in offset order.  we'll
2299  * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
2300  * fine for our small page arrays and doesn't require allocation.  its an
2301  * insertion sort that swaps elements that are strides apart, shrinking the
2302  * stride down until its '1' and the array is sorted.
2303  */
2304 static void sort_brw_pages(struct brw_page **array, int num)
2305 {
2306         int stride, i, j;
2307         struct brw_page *tmp;
2308
2309         if (num == 1)
2310                 return;
2311         for (stride = 1; stride < num ; stride = (stride * 3) + 1)
2312                 ;
2313
2314         do {
2315                 stride /= 3;
2316                 for (i = stride ; i < num ; i++) {
2317                         tmp = array[i];
2318                         j = i;
2319                         while (j >= stride && array[j - stride]->off > tmp->off) {
2320                                 array[j] = array[j - stride];
2321                                 j -= stride;
2322                         }
2323                         array[j] = tmp;
2324                 }
2325         } while (stride > 1);
2326 }
2327
2328 static void osc_release_ppga(struct brw_page **ppga, size_t count)
2329 {
2330         LASSERT(ppga != NULL);
2331         OBD_FREE_PTR_ARRAY_LARGE(ppga, count);
2332 }
2333
2334 static int brw_interpret(const struct lu_env *env,
2335                          struct ptlrpc_request *req, void *args, int rc)
2336 {
2337         struct osc_brw_async_args *aa = args;
2338         struct osc_extent *ext;
2339         struct osc_extent *tmp;
2340         struct client_obd *cli = aa->aa_cli;
2341         unsigned long transferred = 0;
2342
2343         ENTRY;
2344
2345         rc = osc_brw_fini_request(req, rc);
2346         CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
2347
2348         /* restore clear text pages */
2349         osc_release_bounce_pages(aa->aa_ppga, aa->aa_page_count);
2350
2351         /*
2352          * When server returns -EINPROGRESS, client should always retry
2353          * regardless of the number of times the bulk was resent already.
2354          */
2355         if (osc_recoverable_error(rc) && !req->rq_no_delay) {
2356                 if (req->rq_import_generation !=
2357                     req->rq_import->imp_generation) {
2358                         CDEBUG(D_HA, "%s: resend cross eviction for object: "
2359                                ""DOSTID", rc = %d.\n",
2360                                req->rq_import->imp_obd->obd_name,
2361                                POSTID(&aa->aa_oa->o_oi), rc);
2362                 } else if (rc == -EINPROGRESS ||
2363                            client_should_resend(aa->aa_resends, aa->aa_cli)) {
2364                         rc = osc_brw_redo_request(req, aa, rc);
2365                 } else {
2366                         CERROR("%s: too many resent retries for object: "
2367                                "%llu:%llu, rc = %d.\n",
2368                                req->rq_import->imp_obd->obd_name,
2369                                POSTID(&aa->aa_oa->o_oi), rc);
2370                 }
2371
2372                 if (rc == 0)
2373                         RETURN(0);
2374                 else if (rc == -EAGAIN || rc == -EINPROGRESS)
2375                         rc = -EIO;
2376         }
2377
2378         if (rc == 0) {
2379                 struct obdo *oa = aa->aa_oa;
2380                 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
2381                 unsigned long valid = 0;
2382                 struct cl_object *obj;
2383                 struct osc_async_page *last;
2384
2385                 last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
2386                 obj = osc2cl(last->oap_obj);
2387
2388                 cl_object_attr_lock(obj);
2389                 if (oa->o_valid & OBD_MD_FLBLOCKS) {
2390                         attr->cat_blocks = oa->o_blocks;
2391                         valid |= CAT_BLOCKS;
2392                 }
2393                 if (oa->o_valid & OBD_MD_FLMTIME) {
2394                         attr->cat_mtime = oa->o_mtime;
2395                         valid |= CAT_MTIME;
2396                 }
2397                 if (oa->o_valid & OBD_MD_FLATIME) {
2398                         attr->cat_atime = oa->o_atime;
2399                         valid |= CAT_ATIME;
2400                 }
2401                 if (oa->o_valid & OBD_MD_FLCTIME) {
2402                         attr->cat_ctime = oa->o_ctime;
2403                         valid |= CAT_CTIME;
2404                 }
2405
2406                 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
2407                         struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
2408                         loff_t last_off = last->oap_count + last->oap_obj_off +
2409                                 last->oap_page_off;
2410
2411                         /* Change file size if this is an out of quota or
2412                          * direct IO write and it extends the file size */
2413                         if (loi->loi_lvb.lvb_size < last_off) {
2414                                 attr->cat_size = last_off;
2415                                 valid |= CAT_SIZE;
2416                         }
2417                         /* Extend KMS if it's not a lockless write */
2418                         if (loi->loi_kms < last_off &&
2419                             oap2osc_page(last)->ops_srvlock == 0) {
2420                                 attr->cat_kms = last_off;
2421                                 valid |= CAT_KMS;
2422                         }
2423                 }
2424
2425                 if (valid != 0)
2426                         cl_object_attr_update(env, obj, attr, valid);
2427                 cl_object_attr_unlock(obj);
2428         }
2429         OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
2430         aa->aa_oa = NULL;
2431
2432         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0)
2433                 osc_inc_unstable_pages(req);
2434
2435         list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
2436                 list_del_init(&ext->oe_link);
2437                 osc_extent_finish(env, ext, 1,
2438                                   rc && req->rq_no_delay ? -EAGAIN : rc);
2439         }
2440         LASSERT(list_empty(&aa->aa_exts));
2441         LASSERT(list_empty(&aa->aa_oaps));
2442
2443         transferred = (req->rq_bulk == NULL ? /* short io */
2444                        aa->aa_requested_nob :
2445                        req->rq_bulk->bd_nob_transferred);
2446
2447         osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
2448         ptlrpc_lprocfs_brw(req, transferred);
2449
2450         spin_lock(&cli->cl_loi_list_lock);
2451         /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
2452          * is called so we know whether to go to sync BRWs or wait for more
2453          * RPCs to complete */
2454         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
2455                 cli->cl_w_in_flight--;
2456         else
2457                 cli->cl_r_in_flight--;
2458         osc_wake_cache_waiters(cli);
2459         spin_unlock(&cli->cl_loi_list_lock);
2460
2461         osc_io_unplug(env, cli, NULL);
2462         RETURN(rc);
2463 }
2464
2465 static void brw_commit(struct ptlrpc_request *req)
2466 {
2467         /* If osc_inc_unstable_pages (via osc_extent_finish) races with
2468          * this called via the rq_commit_cb, I need to ensure
2469          * osc_dec_unstable_pages is still called. Otherwise unstable
2470          * pages may be leaked. */
2471         spin_lock(&req->rq_lock);
2472         if (likely(req->rq_unstable)) {
2473                 req->rq_unstable = 0;
2474                 spin_unlock(&req->rq_lock);
2475
2476                 osc_dec_unstable_pages(req);
2477         } else {
2478                 req->rq_committed = 1;
2479                 spin_unlock(&req->rq_lock);
2480         }
2481 }
2482
2483 /**
2484  * Build an RPC by the list of extent @ext_list. The caller must ensure
2485  * that the total pages in this list are NOT over max pages per RPC.
2486  * Extents in the list must be in OES_RPC state.
2487  */
2488 int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
2489                   struct list_head *ext_list, int cmd)
2490 {
2491         struct ptlrpc_request           *req = NULL;
2492         struct osc_extent               *ext;
2493         struct brw_page                 **pga = NULL;
2494         struct osc_brw_async_args       *aa = NULL;
2495         struct obdo                     *oa = NULL;
2496         struct osc_async_page           *oap;
2497         struct osc_object               *obj = NULL;
2498         struct cl_req_attr              *crattr = NULL;
2499         loff_t                          starting_offset = OBD_OBJECT_EOF;
2500         loff_t                          ending_offset = 0;
2501         /* '1' for consistency with code that checks !mpflag to restore */
2502         int mpflag = 1;
2503         int                             mem_tight = 0;
2504         int                             page_count = 0;
2505         bool                            soft_sync = false;
2506         bool                            ndelay = false;
2507         int                             i;
2508         int                             grant = 0;
2509         int                             rc;
2510         __u32                           layout_version = 0;
2511         LIST_HEAD(rpc_list);
2512         struct ost_body                 *body;
2513         ENTRY;
2514         LASSERT(!list_empty(ext_list));
2515
2516         /* add pages into rpc_list to build BRW rpc */
2517         list_for_each_entry(ext, ext_list, oe_link) {
2518                 LASSERT(ext->oe_state == OES_RPC);
2519                 mem_tight |= ext->oe_memalloc;
2520                 grant += ext->oe_grants;
2521                 page_count += ext->oe_nr_pages;
2522                 layout_version = max(layout_version, ext->oe_layout_version);
2523                 if (obj == NULL)
2524                         obj = ext->oe_obj;
2525         }
2526
2527         soft_sync = osc_over_unstable_soft_limit(cli);
2528         if (mem_tight)
2529                 mpflag = memalloc_noreclaim_save();
2530
2531         OBD_ALLOC_PTR_ARRAY_LARGE(pga, page_count);
2532         if (pga == NULL)
2533                 GOTO(out, rc = -ENOMEM);
2534
2535         OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
2536         if (oa == NULL)
2537                 GOTO(out, rc = -ENOMEM);
2538
2539         i = 0;
2540         list_for_each_entry(ext, ext_list, oe_link) {
2541                 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
2542                         if (mem_tight)
2543                                 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
2544                         if (soft_sync)
2545                                 oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
2546                         pga[i] = &oap->oap_brw_page;
2547                         pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
2548                         i++;
2549
2550                         list_add_tail(&oap->oap_rpc_item, &rpc_list);
2551                         if (starting_offset == OBD_OBJECT_EOF ||
2552                             starting_offset > oap->oap_obj_off)
2553                                 starting_offset = oap->oap_obj_off;
2554                         else
2555                                 LASSERT(oap->oap_page_off == 0);
2556                         if (ending_offset < oap->oap_obj_off + oap->oap_count)
2557                                 ending_offset = oap->oap_obj_off +
2558                                                 oap->oap_count;
2559                         else
2560                                 LASSERT(oap->oap_page_off + oap->oap_count ==
2561                                         PAGE_SIZE);
2562                 }
2563                 if (ext->oe_ndelay)
2564                         ndelay = true;
2565         }
2566
2567         /* first page in the list */
2568         oap = list_first_entry(&rpc_list, typeof(*oap), oap_rpc_item);
2569
2570         crattr = &osc_env_info(env)->oti_req_attr;
2571         memset(crattr, 0, sizeof(*crattr));
2572         crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
2573         crattr->cra_flags = ~0ULL;
2574         crattr->cra_page = oap2cl_page(oap);
2575         crattr->cra_oa = oa;
2576         cl_req_attr_set(env, osc2cl(obj), crattr);
2577
2578         if (cmd == OBD_BRW_WRITE) {
2579                 oa->o_grant_used = grant;
2580                 if (layout_version > 0) {
2581                         CDEBUG(D_LAYOUT, DFID": write with layout version %u\n",
2582                                PFID(&oa->o_oi.oi_fid), layout_version);
2583
2584                         oa->o_layout_version = layout_version;
2585                         oa->o_valid |= OBD_MD_LAYOUT_VERSION;
2586                 }
2587         }
2588
2589         sort_brw_pages(pga, page_count);
2590         rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 0);
2591         if (rc != 0) {
2592                 CERROR("prep_req failed: %d\n", rc);
2593                 GOTO(out, rc);
2594         }
2595
2596         req->rq_commit_cb = brw_commit;
2597         req->rq_interpret_reply = brw_interpret;
2598         req->rq_memalloc = mem_tight != 0;
2599         oap->oap_request = ptlrpc_request_addref(req);
2600         if (ndelay) {
2601                 req->rq_no_resend = req->rq_no_delay = 1;
2602                 /* probably set a shorter timeout value.
2603                  * to handle ETIMEDOUT in brw_interpret() correctly. */
2604                 /* lustre_msg_set_timeout(req, req->rq_timeout / 2); */
2605         }
2606
2607         /* Need to update the timestamps after the request is built in case
2608          * we race with setattr (locally or in queue at OST).  If OST gets
2609          * later setattr before earlier BRW (as determined by the request xid),
2610          * the OST will not use BRW timestamps.  Sadly, there is no obvious
2611          * way to do this in a single call.  bug 10150 */
2612         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
2613         crattr->cra_oa = &body->oa;
2614         crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME;
2615         cl_req_attr_set(env, osc2cl(obj), crattr);
2616         lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
2617
2618         aa = ptlrpc_req_async_args(aa, req);
2619         INIT_LIST_HEAD(&aa->aa_oaps);
2620         list_splice_init(&rpc_list, &aa->aa_oaps);
2621         INIT_LIST_HEAD(&aa->aa_exts);
2622         list_splice_init(ext_list, &aa->aa_exts);
2623
2624         spin_lock(&cli->cl_loi_list_lock);
2625         starting_offset >>= PAGE_SHIFT;
2626         if (cmd == OBD_BRW_READ) {
2627                 cli->cl_r_in_flight++;
2628                 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2629                 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2630                 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2631                                       starting_offset + 1);
2632         } else {
2633                 cli->cl_w_in_flight++;
2634                 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2635                 lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
2636                 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2637                                       starting_offset + 1);
2638         }
2639         spin_unlock(&cli->cl_loi_list_lock);
2640
2641         DEBUG_REQ(D_INODE, req, "%d pages, aa %p, now %ur/%uw in flight",
2642                   page_count, aa, cli->cl_r_in_flight,
2643                   cli->cl_w_in_flight);
2644         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
2645
2646         ptlrpcd_add_req(req);
2647         rc = 0;
2648         EXIT;
2649
2650 out:
2651         if (mem_tight)
2652                 memalloc_noreclaim_restore(mpflag);
2653
2654         if (rc != 0) {
2655                 LASSERT(req == NULL);
2656
2657                 if (oa)
2658                         OBD_SLAB_FREE_PTR(oa, osc_obdo_kmem);
2659                 if (pga) {
2660                         osc_release_bounce_pages(pga, page_count);
2661                         osc_release_ppga(pga, page_count);
2662                 }
2663                 /* this should happen rarely and is pretty bad, it makes the
2664                  * pending list not follow the dirty order
2665                  */
2666                 while ((ext = list_first_entry_or_null(ext_list,
2667                                                        struct osc_extent,
2668                                                        oe_link)) != NULL) {
2669                         list_del_init(&ext->oe_link);
2670                         osc_extent_finish(env, ext, 0, rc);
2671                 }
2672         }
2673         RETURN(rc);
2674 }
2675
2676 /* This is to refresh our lock in face of no RPCs. */
2677 void osc_send_empty_rpc(struct osc_object *osc, pgoff_t start)
2678 {
2679         struct ptlrpc_request *req;
2680         struct obdo oa;
2681         struct brw_page bpg = { .off = start, .count = 1};
2682         struct brw_page *pga = &bpg;
2683         int rc;
2684
2685         memset(&oa, 0, sizeof(oa));
2686         oa.o_oi = osc->oo_oinfo->loi_oi;
2687         oa.o_valid = OBD_MD_FLID | OBD_MD_FLGROUP | OBD_MD_FLFLAGS;
2688         /* For updated servers - don't do a read */
2689         oa.o_flags = OBD_FL_NORPC;
2690
2691         rc = osc_brw_prep_request(OBD_BRW_READ, osc_cli(osc), &oa, 1, &pga,
2692                                   &req, 0);
2693
2694         /* If we succeeded we ship it off, if not there's no point in doing
2695          * anything. Also no resends.
2696          * No interpret callback, no commit callback.
2697          */
2698         if (!rc) {
2699                 req->rq_no_resend = 1;
2700                 ptlrpcd_add_req(req);
2701         }
2702 }
2703
2704 static int osc_set_lock_data(struct ldlm_lock *lock, void *data)
2705 {
2706         int set = 0;
2707
2708         LASSERT(lock != NULL);
2709
2710         lock_res_and_lock(lock);
2711
2712         if (lock->l_ast_data == NULL)
2713                 lock->l_ast_data = data;
2714         if (lock->l_ast_data == data)
2715                 set = 1;
2716
2717         unlock_res_and_lock(lock);
2718
2719         return set;
2720 }
2721
2722 int osc_enqueue_fini(struct ptlrpc_request *req, osc_enqueue_upcall_f upcall,
2723                      void *cookie, struct lustre_handle *lockh,
2724                      enum ldlm_mode mode, __u64 *flags, bool speculative,
2725                      int errcode)
2726 {
2727         bool intent = *flags & LDLM_FL_HAS_INTENT;
2728         int rc;
2729         ENTRY;
2730
2731         /* The request was created before ldlm_cli_enqueue call. */
2732         if (intent && errcode == ELDLM_LOCK_ABORTED) {
2733                 struct ldlm_reply *rep;
2734
2735                 rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
2736                 LASSERT(rep != NULL);
2737
2738                 rep->lock_policy_res1 =
2739                         ptlrpc_status_ntoh(rep->lock_policy_res1);
2740                 if (rep->lock_policy_res1)
2741                         errcode = rep->lock_policy_res1;
2742                 if (!speculative)
2743                         *flags |= LDLM_FL_LVB_READY;
2744         } else if (errcode == ELDLM_OK) {
2745                 *flags |= LDLM_FL_LVB_READY;
2746         }
2747
2748         /* Call the update callback. */
2749         rc = (*upcall)(cookie, lockh, errcode);
2750
2751         /* release the reference taken in ldlm_cli_enqueue() */
2752         if (errcode == ELDLM_LOCK_MATCHED)
2753                 errcode = ELDLM_OK;
2754         if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
2755                 ldlm_lock_decref(lockh, mode);
2756
2757         RETURN(rc);
2758 }
2759
2760 int osc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
2761                           void *args, int rc)
2762 {
2763         struct osc_enqueue_args *aa = args;
2764         struct ldlm_lock *lock;
2765         struct lustre_handle *lockh = &aa->oa_lockh;
2766         enum ldlm_mode mode = aa->oa_mode;
2767         struct ost_lvb *lvb = aa->oa_lvb;
2768         __u32 lvb_len = sizeof(*lvb);
2769         __u64 flags = 0;
2770         struct ldlm_enqueue_info einfo = {
2771                 .ei_type = aa->oa_type,
2772                 .ei_mode = mode,
2773         };
2774
2775         ENTRY;
2776
2777         /* ldlm_cli_enqueue is holding a reference on the lock, so it must
2778          * be valid. */
2779         lock = ldlm_handle2lock(lockh);
2780         LASSERTF(lock != NULL,
2781                  "lockh %#llx, req %p, aa %p - client evicted?\n",
2782                  lockh->cookie, req, aa);
2783
2784         /* Take an additional reference so that a blocking AST that
2785          * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
2786          * to arrive after an upcall has been executed by
2787          * osc_enqueue_fini(). */
2788         ldlm_lock_addref(lockh, mode);
2789
2790         /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
2791         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
2792
2793         /* Let CP AST to grant the lock first. */
2794         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
2795
2796         if (aa->oa_speculative) {
2797                 LASSERT(aa->oa_lvb == NULL);
2798                 LASSERT(aa->oa_flags == NULL);
2799                 aa->oa_flags = &flags;
2800         }
2801
2802         /* Complete obtaining the lock procedure. */
2803         rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, &einfo, 1, aa->oa_flags,
2804                                    lvb, lvb_len, lockh, rc);
2805         /* Complete osc stuff. */
2806         rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
2807                               aa->oa_flags, aa->oa_speculative, rc);
2808
2809         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
2810
2811         ldlm_lock_decref(lockh, mode);
2812         LDLM_LOCK_PUT(lock);
2813         RETURN(rc);
2814 }
2815
2816 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
2817  * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
2818  * other synchronous requests, however keeping some locks and trying to obtain
2819  * others may take a considerable amount of time in a case of ost failure; and
2820  * when other sync requests do not get released lock from a client, the client
2821  * is evicted from the cluster -- such scenarious make the life difficult, so
2822  * release locks just after they are obtained. */
2823 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2824                      __u64 *flags, union ldlm_policy_data *policy,
2825                      struct ost_lvb *lvb, osc_enqueue_upcall_f upcall,
2826                      void *cookie, struct ldlm_enqueue_info *einfo,
2827                      struct ptlrpc_request_set *rqset, int async,
2828                      bool speculative)
2829 {
2830         struct obd_device *obd = exp->exp_obd;
2831         struct lustre_handle lockh = { 0 };
2832         struct ptlrpc_request *req = NULL;
2833         int intent = *flags & LDLM_FL_HAS_INTENT;
2834         __u64 match_flags = *flags;
2835         enum ldlm_mode mode;
2836         int rc;
2837         ENTRY;
2838
2839         /* Filesystem lock extents are extended to page boundaries so that
2840          * dealing with the page cache is a little smoother.  */
2841         policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2842         policy->l_extent.end |= ~PAGE_MASK;
2843
2844         /* Next, search for already existing extent locks that will cover us */
2845         /* If we're trying to read, we also search for an existing PW lock.  The
2846          * VFS and page cache already protect us locally, so lots of readers/
2847          * writers can share a single PW lock.
2848          *
2849          * There are problems with conversion deadlocks, so instead of
2850          * converting a read lock to a write lock, we'll just enqueue a new
2851          * one.
2852          *
2853          * At some point we should cancel the read lock instead of making them
2854          * send us a blocking callback, but there are problems with canceling
2855          * locks out from other users right now, too. */
2856         mode = einfo->ei_mode;
2857         if (einfo->ei_mode == LCK_PR)
2858                 mode |= LCK_PW;
2859         /* Normal lock requests must wait for the LVB to be ready before
2860          * matching a lock; speculative lock requests do not need to,
2861          * because they will not actually use the lock. */
2862         if (!speculative)
2863                 match_flags |= LDLM_FL_LVB_READY;
2864         if (intent != 0)
2865                 match_flags |= LDLM_FL_BLOCK_GRANTED;
2866         mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
2867                                einfo->ei_type, policy, mode, &lockh);
2868         if (mode) {
2869                 struct ldlm_lock *matched;
2870
2871                 if (*flags & LDLM_FL_TEST_LOCK)
2872                         RETURN(ELDLM_OK);
2873
2874                 matched = ldlm_handle2lock(&lockh);
2875                 if (speculative) {
2876                         /* This DLM lock request is speculative, and does not
2877                          * have an associated IO request. Therefore if there
2878                          * is already a DLM lock, it wll just inform the
2879                          * caller to cancel the request for this stripe.*/
2880                         lock_res_and_lock(matched);
2881                         if (ldlm_extent_equal(&policy->l_extent,
2882                             &matched->l_policy_data.l_extent))
2883                                 rc = -EEXIST;
2884                         else
2885                                 rc = -ECANCELED;
2886                         unlock_res_and_lock(matched);
2887
2888                         ldlm_lock_decref(&lockh, mode);
2889                         LDLM_LOCK_PUT(matched);
2890                         RETURN(rc);
2891                 } else if (osc_set_lock_data(matched, einfo->ei_cbdata)) {
2892                         *flags |= LDLM_FL_LVB_READY;
2893
2894                         /* We already have a lock, and it's referenced. */
2895                         (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
2896
2897                         ldlm_lock_decref(&lockh, mode);
2898                         LDLM_LOCK_PUT(matched);
2899                         RETURN(ELDLM_OK);
2900                 } else {
2901                         ldlm_lock_decref(&lockh, mode);
2902                         LDLM_LOCK_PUT(matched);
2903                 }
2904         }
2905
2906         if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
2907                 RETURN(-ENOLCK);
2908
2909         /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
2910         *flags &= ~LDLM_FL_BLOCK_GRANTED;
2911
2912         rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
2913                               sizeof(*lvb), LVB_T_OST, &lockh, async);
2914         if (async) {
2915                 if (!rc) {
2916                         struct osc_enqueue_args *aa;
2917                         aa = ptlrpc_req_async_args(aa, req);
2918                         aa->oa_exp         = exp;
2919                         aa->oa_mode        = einfo->ei_mode;
2920                         aa->oa_type        = einfo->ei_type;
2921                         lustre_handle_copy(&aa->oa_lockh, &lockh);
2922                         aa->oa_upcall      = upcall;
2923                         aa->oa_cookie      = cookie;
2924                         aa->oa_speculative = speculative;
2925                         if (!speculative) {
2926                                 aa->oa_flags  = flags;
2927                                 aa->oa_lvb    = lvb;
2928                         } else {
2929                                 /* speculative locks are essentially to enqueue
2930                                  * a DLM lock  in advance, so we don't care
2931                                  * about the result of the enqueue. */
2932                                 aa->oa_lvb    = NULL;
2933                                 aa->oa_flags  = NULL;
2934                         }
2935
2936                         req->rq_interpret_reply = osc_enqueue_interpret;
2937                         ptlrpc_set_add_req(rqset, req);
2938                 }
2939                 RETURN(rc);
2940         }
2941
2942         rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
2943                               flags, speculative, rc);
2944
2945         RETURN(rc);
2946 }
2947
2948 int osc_match_base(const struct lu_env *env, struct obd_export *exp,
2949                    struct ldlm_res_id *res_id, enum ldlm_type type,
2950                    union ldlm_policy_data *policy, enum ldlm_mode mode,
2951                    __u64 *flags, struct osc_object *obj,
2952                    struct lustre_handle *lockh, enum ldlm_match_flags match_flags)
2953 {
2954         struct obd_device *obd = exp->exp_obd;
2955         __u64 lflags = *flags;
2956         enum ldlm_mode rc;
2957         ENTRY;
2958
2959         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
2960                 RETURN(-EIO);
2961
2962         /* Filesystem lock extents are extended to page boundaries so that
2963          * dealing with the page cache is a little smoother */
2964         policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2965         policy->l_extent.end |= ~PAGE_MASK;
2966
2967         /* Next, search for already existing extent locks that will cover us */
2968         rc = ldlm_lock_match_with_skip(obd->obd_namespace, lflags, 0,
2969                                         res_id, type, policy, mode, lockh,
2970                                         match_flags);
2971         if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
2972                 RETURN(rc);
2973
2974         if (obj != NULL) {
2975                 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2976
2977                 LASSERT(lock != NULL);
2978                 if (osc_set_lock_data(lock, obj)) {
2979                         lock_res_and_lock(lock);
2980                         if (!ldlm_is_lvb_cached(lock)) {
2981                                 LASSERT(lock->l_ast_data == obj);
2982                                 osc_lock_lvb_update(env, obj, lock, NULL);
2983                                 ldlm_set_lvb_cached(lock);
2984                         }
2985                         unlock_res_and_lock(lock);
2986                 } else {
2987                         ldlm_lock_decref(lockh, rc);
2988                         rc = 0;
2989                 }
2990                 LDLM_LOCK_PUT(lock);
2991         }
2992         RETURN(rc);
2993 }
2994
2995 static int osc_statfs_interpret(const struct lu_env *env,
2996                                 struct ptlrpc_request *req, void *args, int rc)
2997 {
2998         struct osc_async_args *aa = args;
2999         struct obd_statfs *msfs;
3000
3001         ENTRY;
3002         if (rc == -EBADR)
3003                 /*
3004                  * The request has in fact never been sent due to issues at
3005                  * a higher level (LOV).  Exit immediately since the caller
3006                  * is aware of the problem and takes care of the clean up.
3007                  */
3008                 RETURN(rc);
3009
3010         if ((rc == -ENOTCONN || rc == -EAGAIN) &&
3011             (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
3012                 GOTO(out, rc = 0);
3013
3014         if (rc != 0)
3015                 GOTO(out, rc);
3016
3017         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3018         if (msfs == NULL)
3019                 GOTO(out, rc = -EPROTO);
3020
3021         *aa->aa_oi->oi_osfs = *msfs;
3022 out:
3023         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
3024
3025         RETURN(rc);
3026 }
3027
3028 static int osc_statfs_async(struct obd_export *exp,
3029                             struct obd_info *oinfo, time64_t max_age,
3030                             struct ptlrpc_request_set *rqset)
3031 {
3032         struct obd_device     *obd = class_exp2obd(exp);
3033         struct ptlrpc_request *req;
3034         struct osc_async_args *aa;
3035         int rc;
3036         ENTRY;
3037
3038         if (obd->obd_osfs_age >= max_age) {
3039                 CDEBUG(D_SUPER,
3040                        "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n",
3041                        obd->obd_name, &obd->obd_osfs,
3042                        obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
3043                        obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
3044                 spin_lock(&obd->obd_osfs_lock);
3045                 memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
3046                 spin_unlock(&obd->obd_osfs_lock);
3047                 oinfo->oi_flags |= OBD_STATFS_FROM_CACHE;
3048                 if (oinfo->oi_cb_up)
3049                         oinfo->oi_cb_up(oinfo, 0);
3050
3051                 RETURN(0);
3052         }
3053
3054         /* We could possibly pass max_age in the request (as an absolute
3055          * timestamp or a "seconds.usec ago") so the target can avoid doing
3056          * extra calls into the filesystem if that isn't necessary (e.g.
3057          * during mount that would help a bit).  Having relative timestamps
3058          * is not so great if request processing is slow, while absolute
3059          * timestamps are not ideal because they need time synchronization. */
3060         req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
3061         if (req == NULL)
3062                 RETURN(-ENOMEM);
3063
3064         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3065         if (rc) {
3066                 ptlrpc_request_free(req);
3067                 RETURN(rc);
3068         }
3069         ptlrpc_request_set_replen(req);
3070         req->rq_request_portal = OST_CREATE_PORTAL;
3071         ptlrpc_at_set_req_timeout(req);
3072
3073         if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
3074                 /* procfs requests not want stat in wait for avoid deadlock */
3075                 req->rq_no_resend = 1;
3076                 req->rq_no_delay = 1;
3077         }
3078
3079         req->rq_interpret_reply = osc_statfs_interpret;
3080         aa = ptlrpc_req_async_args(aa, req);
3081         aa->aa_oi = oinfo;
3082
3083         ptlrpc_set_add_req(rqset, req);
3084         RETURN(0);
3085 }
3086
3087 static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
3088                       struct obd_statfs *osfs, time64_t max_age, __u32 flags)
3089 {
3090         struct obd_device     *obd = class_exp2obd(exp);
3091         struct obd_statfs     *msfs;
3092         struct ptlrpc_request *req;
3093         struct obd_import     *imp, *imp0;
3094         int rc;
3095         ENTRY;
3096
3097         /*Since the request might also come from lprocfs, so we need
3098          *sync this with client_disconnect_export Bug15684
3099          */
3100         with_imp_locked(obd, imp0, rc)
3101                 imp = class_import_get(imp0);
3102         if (rc)
3103                 RETURN(rc);
3104
3105         /* We could possibly pass max_age in the request (as an absolute
3106          * timestamp or a "seconds.usec ago") so the target can avoid doing
3107          * extra calls into the filesystem if that isn't necessary (e.g.
3108          * during mount that would help a bit).  Having relative timestamps
3109          * is not so great if request processing is slow, while absolute
3110          * timestamps are not ideal because they need time synchronization. */
3111         req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
3112
3113         class_import_put(imp);
3114
3115         if (req == NULL)
3116                 RETURN(-ENOMEM);
3117
3118         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
3119         if (rc) {
3120                 ptlrpc_request_free(req);
3121                 RETURN(rc);
3122         }
3123         ptlrpc_request_set_replen(req);
3124         req->rq_request_portal = OST_CREATE_PORTAL;
3125         ptlrpc_at_set_req_timeout(req);
3126
3127         if (flags & OBD_STATFS_NODELAY) {
3128                 /* procfs requests not want stat in wait for avoid deadlock */
3129                 req->rq_no_resend = 1;
3130                 req->rq_no_delay = 1;
3131         }
3132
3133         rc = ptlrpc_queue_wait(req);
3134         if (rc)
3135                 GOTO(out, rc);
3136
3137         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
3138         if (msfs == NULL)
3139                 GOTO(out, rc = -EPROTO);
3140
3141         *osfs = *msfs;
3142
3143         EXIT;
3144 out:
3145         ptlrpc_req_finished(req);
3146         return rc;
3147 }
3148
3149 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
3150                          void *karg, void __user *uarg)
3151 {
3152         struct obd_device *obd = exp->exp_obd;
3153         struct obd_ioctl_data *data = karg;
3154         int rc = 0;
3155
3156         ENTRY;
3157         if (!try_module_get(THIS_MODULE)) {
3158                 CERROR("%s: cannot get module '%s'\n", obd->obd_name,
3159                        module_name(THIS_MODULE));
3160                 return -EINVAL;
3161         }
3162         switch (cmd) {
3163         case OBD_IOC_CLIENT_RECOVER:
3164                 rc = ptlrpc_recover_import(obd->u.cli.cl_import,
3165                                            data->ioc_inlbuf1, 0);
3166                 if (rc > 0)
3167                         rc = 0;
3168                 break;
3169         case IOC_OSC_SET_ACTIVE:
3170                 rc = ptlrpc_set_import_active(obd->u.cli.cl_import,
3171                                               data->ioc_offset);
3172                 break;
3173         default:
3174                 rc = -ENOTTY;
3175                 CDEBUG(D_INODE, "%s: unrecognised ioctl %#x by %s: rc = %d\n",
3176                        obd->obd_name, cmd, current->comm, rc);
3177                 break;
3178         }
3179
3180         module_put(THIS_MODULE);
3181         return rc;
3182 }
3183
3184 int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
3185                        u32 keylen, void *key, u32 vallen, void *val,
3186                        struct ptlrpc_request_set *set)
3187 {
3188         struct ptlrpc_request *req;
3189         struct obd_device     *obd = exp->exp_obd;
3190         struct obd_import     *imp = class_exp2cliimp(exp);
3191         char                  *tmp;
3192         int                    rc;
3193         ENTRY;
3194
3195         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
3196
3197         if (KEY_IS(KEY_CHECKSUM)) {
3198                 if (vallen != sizeof(int))
3199                         RETURN(-EINVAL);
3200                 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
3201                 RETURN(0);
3202         }
3203
3204         if (KEY_IS(KEY_SPTLRPC_CONF)) {
3205                 sptlrpc_conf_client_adapt(obd);
3206                 RETURN(0);
3207         }
3208
3209         if (KEY_IS(KEY_FLUSH_CTX)) {
3210                 sptlrpc_import_flush_my_ctx(imp);
3211                 RETURN(0);
3212         }
3213
3214         if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
3215                 struct client_obd *cli = &obd->u.cli;
3216                 long nr = atomic_long_read(&cli->cl_lru_in_list) >> 1;
3217                 long target = *(long *)val;
3218
3219                 nr = osc_lru_shrink(env, cli, min(nr, target), true);
3220                 *(long *)val -= nr;
3221                 RETURN(0);
3222         }
3223
3224         if (!set && !KEY_IS(KEY_GRANT_SHRINK))
3225                 RETURN(-EINVAL);
3226
3227         /* We pass all other commands directly to OST. Since nobody calls osc
3228            methods directly and everybody is supposed to go through LOV, we
3229            assume lov checked invalid values for us.
3230            The only recognised values so far are evict_by_nid and mds_conn.
3231            Even if something bad goes through, we'd get a -EINVAL from OST
3232            anyway. */
3233
3234         req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
3235                                                 &RQF_OST_SET_GRANT_INFO :
3236                                                 &RQF_OBD_SET_INFO);
3237         if (req == NULL)
3238                 RETURN(-ENOMEM);
3239
3240         req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3241                              RCL_CLIENT, keylen);
3242         if (!KEY_IS(KEY_GRANT_SHRINK))
3243                 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
3244                                      RCL_CLIENT, vallen);
3245         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
3246         if (rc) {
3247                 ptlrpc_request_free(req);
3248                 RETURN(rc);
3249         }
3250
3251         tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3252         memcpy(tmp, key, keylen);
3253         tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
3254                                                         &RMF_OST_BODY :
3255                                                         &RMF_SETINFO_VAL);
3256         memcpy(tmp, val, vallen);
3257
3258         if (KEY_IS(KEY_GRANT_SHRINK)) {
3259                 struct osc_grant_args *aa;
3260                 struct obdo *oa;
3261
3262                 aa = ptlrpc_req_async_args(aa, req);
3263                 OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
3264                 if (!oa) {
3265                         ptlrpc_req_finished(req);
3266                         RETURN(-ENOMEM);
3267                 }
3268                 *oa = ((struct ost_body *)val)->oa;
3269                 aa->aa_oa = oa;
3270                 req->rq_interpret_reply = osc_shrink_grant_interpret;
3271         }
3272
3273         ptlrpc_request_set_replen(req);
3274         if (!KEY_IS(KEY_GRANT_SHRINK)) {
3275                 LASSERT(set != NULL);
3276                 ptlrpc_set_add_req(set, req);
3277                 ptlrpc_check_set(NULL, set);
3278         } else {
3279                 ptlrpcd_add_req(req);
3280         }
3281
3282         RETURN(0);
3283 }
3284 EXPORT_SYMBOL(osc_set_info_async);
3285
3286 int osc_reconnect(const struct lu_env *env, struct obd_export *exp,
3287                   struct obd_device *obd, struct obd_uuid *cluuid,
3288                   struct obd_connect_data *data, void *localdata)
3289 {
3290         struct client_obd *cli = &obd->u.cli;
3291
3292         if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
3293                 long lost_grant;
3294                 long grant;
3295
3296                 spin_lock(&cli->cl_loi_list_lock);
3297                 grant = cli->cl_avail_grant + cli->cl_reserved_grant;
3298                 if (data->ocd_connect_flags & OBD_CONNECT_GRANT_PARAM) {
3299                         /* restore ocd_grant_blkbits as client page bits */
3300                         data->ocd_grant_blkbits = PAGE_SHIFT;
3301                         grant += cli->cl_dirty_grant;
3302                 } else {
3303                         grant += cli->cl_dirty_pages << PAGE_SHIFT;
3304                 }
3305                 data->ocd_grant = grant ? : 2 * cli_brw_size(obd);
3306                 lost_grant = cli->cl_lost_grant;
3307                 cli->cl_lost_grant = 0;
3308                 spin_unlock(&cli->cl_loi_list_lock);
3309
3310                 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d"
3311                        " ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags,
3312                        data->ocd_version, data->ocd_grant, lost_grant);
3313         }
3314
3315         RETURN(0);
3316 }
3317 EXPORT_SYMBOL(osc_reconnect);
3318
3319 int osc_disconnect(struct obd_export *exp)
3320 {
3321         struct obd_device *obd = class_exp2obd(exp);
3322         int rc;
3323
3324         rc = client_disconnect_export(exp);
3325         /**
3326          * Initially we put del_shrink_grant before disconnect_export, but it
3327          * causes the following problem if setup (connect) and cleanup
3328          * (disconnect) are tangled together.
3329          *      connect p1                     disconnect p2
3330          *   ptlrpc_connect_import
3331          *     ...............               class_manual_cleanup
3332          *                                     osc_disconnect
3333          *                                     del_shrink_grant
3334          *   ptlrpc_connect_interrupt
3335          *     osc_init_grant
3336          *   add this client to shrink list
3337          *                                      cleanup_osc
3338          * Bang! grant shrink thread trigger the shrink. BUG18662
3339          */
3340         osc_del_grant_list(&obd->u.cli);
3341         return rc;
3342 }
3343 EXPORT_SYMBOL(osc_disconnect);
3344
3345 int osc_ldlm_resource_invalidate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
3346                                  struct hlist_node *hnode, void *arg)
3347 {
3348         struct lu_env *env = arg;
3349         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
3350         struct ldlm_lock *lock;
3351         struct osc_object *osc = NULL;
3352         ENTRY;
3353
3354         lock_res(res);
3355         list_for_each_entry(lock, &res->lr_granted, l_res_link) {
3356                 if (lock->l_ast_data != NULL && osc == NULL) {
3357                         osc = lock->l_ast_data;
3358                         cl_object_get(osc2cl(osc));
3359                 }
3360
3361                 /* clear LDLM_FL_CLEANED flag to make sure it will be canceled
3362                  * by the 2nd round of ldlm_namespace_clean() call in
3363                  * osc_import_event(). */
3364                 ldlm_clear_cleaned(lock);
3365         }
3366         unlock_res(res);
3367
3368         if (osc != NULL) {
3369                 osc_object_invalidate(env, osc);
3370                 cl_object_put(env, osc2cl(osc));
3371         }
3372
3373         RETURN(0);
3374 }
3375 EXPORT_SYMBOL(osc_ldlm_resource_invalidate);
3376
3377 static int osc_import_event(struct obd_device *obd,
3378                             struct obd_import *imp,
3379                             enum obd_import_event event)
3380 {
3381         struct client_obd *cli;
3382         int rc = 0;
3383
3384         ENTRY;
3385         LASSERT(imp->imp_obd == obd);
3386
3387         switch (event) {
3388         case IMP_EVENT_DISCON: {
3389                 cli = &obd->u.cli;
3390                 spin_lock(&cli->cl_loi_list_lock);
3391                 cli->cl_avail_grant = 0;
3392                 cli->cl_lost_grant = 0;
3393                 spin_unlock(&cli->cl_loi_list_lock);
3394                 break;
3395         }
3396         case IMP_EVENT_INACTIVE: {
3397                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE);
3398                 break;
3399         }
3400         case IMP_EVENT_INVALIDATE: {
3401                 struct ldlm_namespace *ns = obd->obd_namespace;
3402                 struct lu_env         *env;
3403                 __u16                  refcheck;
3404
3405                 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3406
3407                 env = cl_env_get(&refcheck);
3408                 if (!IS_ERR(env)) {
3409                         osc_io_unplug(env, &obd->u.cli, NULL);
3410
3411                         cfs_hash_for_each_nolock(ns->ns_rs_hash,
3412                                                  osc_ldlm_resource_invalidate,
3413                                                  env, 0);
3414                         cl_env_put(env, &refcheck);
3415
3416                         ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3417                 } else
3418                         rc = PTR_ERR(env);
3419                 break;
3420         }
3421         case IMP_EVENT_ACTIVE: {
3422                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE);
3423                 break;
3424         }
3425         case IMP_EVENT_OCD: {
3426                 struct obd_connect_data *ocd = &imp->imp_connect_data;
3427
3428                 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
3429                         osc_init_grant(&obd->u.cli, ocd);
3430
3431                 /* See bug 7198 */
3432                 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
3433                         imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
3434
3435                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD);
3436                 break;
3437         }
3438         case IMP_EVENT_DEACTIVATE: {
3439                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE);
3440                 break;
3441         }
3442         case IMP_EVENT_ACTIVATE: {
3443                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE);
3444                 break;
3445         }
3446         default:
3447                 CERROR("Unknown import event %d\n", event);
3448                 LBUG();
3449         }
3450         RETURN(rc);
3451 }
3452
3453 /**
3454  * Determine whether the lock can be canceled before replaying the lock
3455  * during recovery, see bug16774 for detailed information.
3456  *
3457  * \retval zero the lock can't be canceled
3458  * \retval other ok to cancel
3459  */
3460 static int osc_cancel_weight(struct ldlm_lock *lock)
3461 {
3462         /*
3463          * Cancel all unused and granted extent lock.
3464          */
3465         if (lock->l_resource->lr_type == LDLM_EXTENT &&
3466             ldlm_is_granted(lock) &&
3467             osc_ldlm_weigh_ast(lock) == 0)
3468                 RETURN(1);
3469
3470         RETURN(0);
3471 }
3472
3473 static int brw_queue_work(const struct lu_env *env, void *data)
3474 {
3475         struct client_obd *cli = data;
3476
3477         CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
3478
3479         osc_io_unplug(env, cli, NULL);
3480         RETURN(0);
3481 }
3482
3483 int osc_setup_common(struct obd_device *obd, struct lustre_cfg *lcfg)
3484 {
3485         struct client_obd *cli = &obd->u.cli;
3486         void *handler;
3487         int rc;
3488
3489         ENTRY;
3490
3491         rc = ptlrpcd_addref();
3492         if (rc)
3493                 RETURN(rc);
3494
3495         rc = client_obd_setup(obd, lcfg);
3496         if (rc)
3497                 GOTO(out_ptlrpcd, rc);
3498
3499
3500         handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
3501         if (IS_ERR(handler))
3502                 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3503         cli->cl_writeback_work = handler;
3504
3505         handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
3506         if (IS_ERR(handler))
3507                 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3508         cli->cl_lru_work = handler;
3509
3510         rc = osc_quota_setup(obd);
3511         if (rc)
3512                 GOTO(out_ptlrpcd_work, rc);
3513
3514         cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
3515         osc_update_next_shrink(cli);
3516
3517         RETURN(rc);
3518
3519 out_ptlrpcd_work:
3520         if (cli->cl_writeback_work != NULL) {
3521                 ptlrpcd_destroy_work(cli->cl_writeback_work);
3522                 cli->cl_writeback_work = NULL;
3523         }
3524         if (cli->cl_lru_work != NULL) {
3525                 ptlrpcd_destroy_work(cli->cl_lru_work);
3526                 cli->cl_lru_work = NULL;
3527         }
3528         client_obd_cleanup(obd);
3529 out_ptlrpcd:
3530         ptlrpcd_decref();
3531         RETURN(rc);
3532 }
3533 EXPORT_SYMBOL(osc_setup_common);
3534
3535 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3536 {
3537         struct client_obd *cli = &obd->u.cli;
3538         int                adding;
3539         int                added;
3540         int                req_count;
3541         int                rc;
3542
3543         ENTRY;
3544
3545         rc = osc_setup_common(obd, lcfg);
3546         if (rc < 0)
3547                 RETURN(rc);
3548
3549         rc = osc_tunables_init(obd);
3550         if (rc)
3551                 RETURN(rc);
3552
3553         /*
3554          * We try to control the total number of requests with a upper limit
3555          * osc_reqpool_maxreqcount. There might be some race which will cause
3556          * over-limit allocation, but it is fine.
3557          */
3558         req_count = atomic_read(&osc_pool_req_count);
3559         if (req_count < osc_reqpool_maxreqcount) {
3560                 adding = cli->cl_max_rpcs_in_flight + 2;
3561                 if (req_count + adding > osc_reqpool_maxreqcount)
3562                         adding = osc_reqpool_maxreqcount - req_count;
3563
3564                 added = ptlrpc_add_rqs_to_pool(osc_rq_pool, adding);
3565                 atomic_add(added, &osc_pool_req_count);
3566         }
3567
3568         ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
3569
3570         spin_lock(&osc_shrink_lock);
3571         list_add_tail(&cli->cl_shrink_list, &osc_shrink_list);
3572         spin_unlock(&osc_shrink_lock);
3573         cli->cl_import->imp_idle_timeout = osc_idle_timeout;
3574         cli->cl_import->imp_idle_debug = D_HA;
3575
3576         RETURN(0);
3577 }
3578
3579 int osc_precleanup_common(struct obd_device *obd)
3580 {
3581         struct client_obd *cli = &obd->u.cli;
3582         ENTRY;
3583
3584         /* LU-464
3585          * for echo client, export may be on zombie list, wait for
3586          * zombie thread to cull it, because cli.cl_import will be
3587          * cleared in client_disconnect_export():
3588          *   class_export_destroy() -> obd_cleanup() ->
3589          *   echo_device_free() -> echo_client_cleanup() ->
3590          *   obd_disconnect() -> osc_disconnect() ->
3591          *   client_disconnect_export()
3592          */
3593         obd_zombie_barrier();
3594         if (cli->cl_writeback_work) {
3595                 ptlrpcd_destroy_work(cli->cl_writeback_work);
3596                 cli->cl_writeback_work = NULL;
3597         }
3598
3599         if (cli->cl_lru_work) {
3600                 ptlrpcd_destroy_work(cli->cl_lru_work);
3601                 cli->cl_lru_work = NULL;
3602         }
3603
3604         obd_cleanup_client_import(obd);
3605         RETURN(0);
3606 }
3607 EXPORT_SYMBOL(osc_precleanup_common);
3608
3609 static int osc_precleanup(struct obd_device *obd)
3610 {
3611         ENTRY;
3612
3613         osc_precleanup_common(obd);
3614
3615         ptlrpc_lprocfs_unregister_obd(obd);
3616         RETURN(0);
3617 }
3618
3619 int osc_cleanup_common(struct obd_device *obd)
3620 {
3621         struct client_obd *cli = &obd->u.cli;
3622         int rc;
3623
3624         ENTRY;
3625
3626         spin_lock(&osc_shrink_lock);
3627         list_del(&cli->cl_shrink_list);
3628         spin_unlock(&osc_shrink_lock);
3629
3630         /* lru cleanup */
3631         if (cli->cl_cache != NULL) {
3632                 LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
3633                 spin_lock(&cli->cl_cache->ccc_lru_lock);
3634                 list_del_init(&cli->cl_lru_osc);
3635                 spin_unlock(&cli->cl_cache->ccc_lru_lock);
3636                 cli->cl_lru_left = NULL;
3637                 cl_cache_decref(cli->cl_cache);
3638                 cli->cl_cache = NULL;
3639         }
3640
3641         /* free memory of osc quota cache */
3642         osc_quota_cleanup(obd);
3643
3644         rc = client_obd_cleanup(obd);
3645
3646         ptlrpcd_decref();
3647         RETURN(rc);
3648 }
3649 EXPORT_SYMBOL(osc_cleanup_common);
3650
3651 static const struct obd_ops osc_obd_ops = {
3652         .o_owner                = THIS_MODULE,
3653         .o_setup                = osc_setup,
3654         .o_precleanup           = osc_precleanup,
3655         .o_cleanup              = osc_cleanup_common,
3656         .o_add_conn             = client_import_add_conn,
3657         .o_del_conn             = client_import_del_conn,
3658         .o_connect              = client_connect_import,
3659         .o_reconnect            = osc_reconnect,
3660         .o_disconnect           = osc_disconnect,
3661         .o_statfs               = osc_statfs,
3662         .o_statfs_async         = osc_statfs_async,
3663         .o_create               = osc_create,
3664         .o_destroy              = osc_destroy,
3665         .o_getattr              = osc_getattr,
3666         .o_setattr              = osc_setattr,
3667         .o_iocontrol            = osc_iocontrol,
3668         .o_set_info_async       = osc_set_info_async,
3669         .o_import_event         = osc_import_event,
3670         .o_quotactl             = osc_quotactl,
3671 };
3672
3673 LIST_HEAD(osc_shrink_list);
3674 DEFINE_SPINLOCK(osc_shrink_lock);
3675
3676 #ifdef HAVE_SHRINKER_COUNT
3677 static struct shrinker osc_cache_shrinker = {
3678         .count_objects  = osc_cache_shrink_count,
3679         .scan_objects   = osc_cache_shrink_scan,
3680         .seeks          = DEFAULT_SEEKS,
3681 };
3682 #else
3683 static int osc_cache_shrink(struct shrinker *shrinker,
3684                             struct shrink_control *sc)
3685 {
3686         (void)osc_cache_shrink_scan(shrinker, sc);
3687
3688         return osc_cache_shrink_count(shrinker, sc);
3689 }
3690
3691 static struct shrinker osc_cache_shrinker = {
3692         .shrink   = osc_cache_shrink,
3693         .seeks    = DEFAULT_SEEKS,
3694 };
3695 #endif
3696
3697 static int __init osc_init(void)
3698 {
3699         unsigned int reqpool_size;
3700         unsigned int reqsize;
3701         int rc;
3702         ENTRY;
3703
3704         /* print an address of _any_ initialized kernel symbol from this
3705          * module, to allow debugging with gdb that doesn't support data
3706          * symbols from modules.*/
3707         CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
3708
3709         rc = lu_kmem_init(osc_caches);
3710         if (rc)
3711                 RETURN(rc);
3712
3713         rc = class_register_type(&osc_obd_ops, NULL, true,
3714                                  LUSTRE_OSC_NAME, &osc_device_type);
3715         if (rc)
3716                 GOTO(out_kmem, rc);
3717
3718         rc = register_shrinker(&osc_cache_shrinker);
3719         if (rc)
3720                 GOTO(out_type, rc);
3721
3722         /* This is obviously too much memory, only prevent overflow here */
3723         if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0)
3724                 GOTO(out_shrinker, rc = -EINVAL);
3725
3726         reqpool_size = osc_reqpool_mem_max << 20;
3727
3728         reqsize = 1;
3729         while (reqsize < OST_IO_MAXREQSIZE)
3730                 reqsize = reqsize << 1;
3731
3732         /*
3733          * We don't enlarge the request count in OSC pool according to
3734          * cl_max_rpcs_in_flight. The allocation from the pool will only be
3735          * tried after normal allocation failed. So a small OSC pool won't
3736          * cause much performance degression in most of cases.
3737          */
3738         osc_reqpool_maxreqcount = reqpool_size / reqsize;
3739
3740         atomic_set(&osc_pool_req_count, 0);
3741         osc_rq_pool = ptlrpc_init_rq_pool(0, OST_IO_MAXREQSIZE,
3742                                           ptlrpc_add_rqs_to_pool);
3743
3744         if (osc_rq_pool == NULL)
3745                 GOTO(out_shrinker, rc = -ENOMEM);
3746
3747         rc = osc_start_grant_work();
3748         if (rc != 0)
3749                 GOTO(out_req_pool, rc);
3750
3751         RETURN(rc);
3752
3753 out_req_pool:
3754         ptlrpc_free_rq_pool(osc_rq_pool);
3755 out_shrinker:
3756         unregister_shrinker(&osc_cache_shrinker);
3757 out_type:
3758         class_unregister_type(LUSTRE_OSC_NAME);
3759 out_kmem:
3760         lu_kmem_fini(osc_caches);
3761
3762         RETURN(rc);
3763 }
3764
3765 static void __exit osc_exit(void)
3766 {
3767         osc_stop_grant_work();
3768         unregister_shrinker(&osc_cache_shrinker);
3769         class_unregister_type(LUSTRE_OSC_NAME);
3770         lu_kmem_fini(osc_caches);
3771         ptlrpc_free_rq_pool(osc_rq_pool);
3772 }
3773
3774 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3775 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3776 MODULE_VERSION(LUSTRE_VERSION_STRING);
3777 MODULE_LICENSE("GPL");
3778
3779 module_init(osc_init);
3780 module_exit(osc_exit);