Whamcloud - gitweb
LU-12275 sec: decryption for read path
[fs/lustre-release.git] / lustre / osc / osc_request.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2011, 2017, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32
33 #define DEBUG_SUBSYSTEM S_OSC
34
35 #include <linux/workqueue.h>
36 #include <libcfs/libcfs.h>
37 #include <linux/falloc.h>
38 #include <lprocfs_status.h>
39 #include <lustre_debug.h>
40 #include <lustre_dlm.h>
41 #include <lustre_fid.h>
42 #include <lustre_ha.h>
43 #include <uapi/linux/lustre/lustre_ioctl.h>
44 #include <lustre_net.h>
45 #include <lustre_obdo.h>
46 #include <obd.h>
47 #include <obd_cksum.h>
48 #include <obd_class.h>
49 #include <lustre_osc.h>
50 #include <linux/falloc.h>
51
52 #include "osc_internal.h"
53
54 atomic_t osc_pool_req_count;
55 unsigned int osc_reqpool_maxreqcount;
56 struct ptlrpc_request_pool *osc_rq_pool;
57
58 /* max memory used for request pool, unit is MB */
59 static unsigned int osc_reqpool_mem_max = 5;
60 module_param(osc_reqpool_mem_max, uint, 0444);
61
62 static int osc_idle_timeout = 20;
63 module_param(osc_idle_timeout, uint, 0644);
64
65 #define osc_grant_args osc_brw_async_args
66
67 struct osc_setattr_args {
68         struct obdo             *sa_oa;
69         obd_enqueue_update_f     sa_upcall;
70         void                    *sa_cookie;
71 };
72
73 struct osc_fsync_args {
74         struct osc_object       *fa_obj;
75         struct obdo             *fa_oa;
76         obd_enqueue_update_f    fa_upcall;
77         void                    *fa_cookie;
78 };
79
80 struct osc_ladvise_args {
81         struct obdo             *la_oa;
82         obd_enqueue_update_f     la_upcall;
83         void                    *la_cookie;
84 };
85
86 static void osc_release_ppga(struct brw_page **ppga, size_t count);
87 static int brw_interpret(const struct lu_env *env, struct ptlrpc_request *req,
88                          void *data, int rc);
89
90 void osc_pack_req_body(struct ptlrpc_request *req, struct obdo *oa)
91 {
92         struct ost_body *body;
93
94         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
95         LASSERT(body);
96
97         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
98 }
99
100 static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
101                        struct obdo *oa)
102 {
103         struct ptlrpc_request   *req;
104         struct ost_body         *body;
105         int                      rc;
106
107         ENTRY;
108         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
109         if (req == NULL)
110                 RETURN(-ENOMEM);
111
112         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
113         if (rc) {
114                 ptlrpc_request_free(req);
115                 RETURN(rc);
116         }
117
118         osc_pack_req_body(req, oa);
119
120         ptlrpc_request_set_replen(req);
121
122         rc = ptlrpc_queue_wait(req);
123         if (rc)
124                 GOTO(out, rc);
125
126         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
127         if (body == NULL)
128                 GOTO(out, rc = -EPROTO);
129
130         CDEBUG(D_INODE, "mode: %o\n", body->oa.o_mode);
131         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
132
133         oa->o_blksize = cli_brw_size(exp->exp_obd);
134         oa->o_valid |= OBD_MD_FLBLKSZ;
135
136         EXIT;
137 out:
138         ptlrpc_req_finished(req);
139
140         return rc;
141 }
142
143 static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
144                        struct obdo *oa)
145 {
146         struct ptlrpc_request   *req;
147         struct ost_body         *body;
148         int                      rc;
149
150         ENTRY;
151         LASSERT(oa->o_valid & OBD_MD_FLGROUP);
152
153         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
154         if (req == NULL)
155                 RETURN(-ENOMEM);
156
157         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
158         if (rc) {
159                 ptlrpc_request_free(req);
160                 RETURN(rc);
161         }
162
163         osc_pack_req_body(req, oa);
164
165         ptlrpc_request_set_replen(req);
166
167         rc = ptlrpc_queue_wait(req);
168         if (rc)
169                 GOTO(out, rc);
170
171         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
172         if (body == NULL)
173                 GOTO(out, rc = -EPROTO);
174
175         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
176
177         EXIT;
178 out:
179         ptlrpc_req_finished(req);
180
181         RETURN(rc);
182 }
183
184 static int osc_setattr_interpret(const struct lu_env *env,
185                                  struct ptlrpc_request *req, void *args, int rc)
186 {
187         struct osc_setattr_args *sa = args;
188         struct ost_body *body;
189
190         ENTRY;
191
192         if (rc != 0)
193                 GOTO(out, rc);
194
195         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
196         if (body == NULL)
197                 GOTO(out, rc = -EPROTO);
198
199         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa,
200                              &body->oa);
201 out:
202         rc = sa->sa_upcall(sa->sa_cookie, rc);
203         RETURN(rc);
204 }
205
206 int osc_setattr_async(struct obd_export *exp, struct obdo *oa,
207                       obd_enqueue_update_f upcall, void *cookie,
208                       struct ptlrpc_request_set *rqset)
209 {
210         struct ptlrpc_request   *req;
211         struct osc_setattr_args *sa;
212         int                      rc;
213
214         ENTRY;
215
216         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
217         if (req == NULL)
218                 RETURN(-ENOMEM);
219
220         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
221         if (rc) {
222                 ptlrpc_request_free(req);
223                 RETURN(rc);
224         }
225
226         osc_pack_req_body(req, oa);
227
228         ptlrpc_request_set_replen(req);
229
230         /* do mds to ost setattr asynchronously */
231         if (!rqset) {
232                 /* Do not wait for response. */
233                 ptlrpcd_add_req(req);
234         } else {
235                 req->rq_interpret_reply = osc_setattr_interpret;
236
237                 sa = ptlrpc_req_async_args(sa, req);
238                 sa->sa_oa = oa;
239                 sa->sa_upcall = upcall;
240                 sa->sa_cookie = cookie;
241
242                 ptlrpc_set_add_req(rqset, req);
243         }
244
245         RETURN(0);
246 }
247
248 static int osc_ladvise_interpret(const struct lu_env *env,
249                                  struct ptlrpc_request *req,
250                                  void *arg, int rc)
251 {
252         struct osc_ladvise_args *la = arg;
253         struct ost_body *body;
254         ENTRY;
255
256         if (rc != 0)
257                 GOTO(out, rc);
258
259         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
260         if (body == NULL)
261                 GOTO(out, rc = -EPROTO);
262
263         *la->la_oa = body->oa;
264 out:
265         rc = la->la_upcall(la->la_cookie, rc);
266         RETURN(rc);
267 }
268
269 /**
270  * If rqset is NULL, do not wait for response. Upcall and cookie could also
271  * be NULL in this case
272  */
273 int osc_ladvise_base(struct obd_export *exp, struct obdo *oa,
274                      struct ladvise_hdr *ladvise_hdr,
275                      obd_enqueue_update_f upcall, void *cookie,
276                      struct ptlrpc_request_set *rqset)
277 {
278         struct ptlrpc_request   *req;
279         struct ost_body         *body;
280         struct osc_ladvise_args *la;
281         int                      rc;
282         struct lu_ladvise       *req_ladvise;
283         struct lu_ladvise       *ladvise = ladvise_hdr->lah_advise;
284         int                      num_advise = ladvise_hdr->lah_count;
285         struct ladvise_hdr      *req_ladvise_hdr;
286         ENTRY;
287
288         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_LADVISE);
289         if (req == NULL)
290                 RETURN(-ENOMEM);
291
292         req_capsule_set_size(&req->rq_pill, &RMF_OST_LADVISE, RCL_CLIENT,
293                              num_advise * sizeof(*ladvise));
294         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_LADVISE);
295         if (rc != 0) {
296                 ptlrpc_request_free(req);
297                 RETURN(rc);
298         }
299         req->rq_request_portal = OST_IO_PORTAL;
300         ptlrpc_at_set_req_timeout(req);
301
302         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
303         LASSERT(body);
304         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa,
305                              oa);
306
307         req_ladvise_hdr = req_capsule_client_get(&req->rq_pill,
308                                                  &RMF_OST_LADVISE_HDR);
309         memcpy(req_ladvise_hdr, ladvise_hdr, sizeof(*ladvise_hdr));
310
311         req_ladvise = req_capsule_client_get(&req->rq_pill, &RMF_OST_LADVISE);
312         memcpy(req_ladvise, ladvise, sizeof(*ladvise) * num_advise);
313         ptlrpc_request_set_replen(req);
314
315         if (rqset == NULL) {
316                 /* Do not wait for response. */
317                 ptlrpcd_add_req(req);
318                 RETURN(0);
319         }
320
321         req->rq_interpret_reply = osc_ladvise_interpret;
322         la = ptlrpc_req_async_args(la, req);
323         la->la_oa = oa;
324         la->la_upcall = upcall;
325         la->la_cookie = cookie;
326
327         ptlrpc_set_add_req(rqset, req);
328
329         RETURN(0);
330 }
331
332 static int osc_create(const struct lu_env *env, struct obd_export *exp,
333                       struct obdo *oa)
334 {
335         struct ptlrpc_request *req;
336         struct ost_body       *body;
337         int                    rc;
338         ENTRY;
339
340         LASSERT(oa != NULL);
341         LASSERT(oa->o_valid & OBD_MD_FLGROUP);
342         LASSERT(fid_seq_is_echo(ostid_seq(&oa->o_oi)));
343
344         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
345         if (req == NULL)
346                 GOTO(out, rc = -ENOMEM);
347
348         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE);
349         if (rc) {
350                 ptlrpc_request_free(req);
351                 GOTO(out, rc);
352         }
353
354         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
355         LASSERT(body);
356
357         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
358
359         ptlrpc_request_set_replen(req);
360
361         rc = ptlrpc_queue_wait(req);
362         if (rc)
363                 GOTO(out_req, rc);
364
365         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
366         if (body == NULL)
367                 GOTO(out_req, rc = -EPROTO);
368
369         CDEBUG(D_INFO, "oa flags %x\n", oa->o_flags);
370         lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa);
371
372         oa->o_blksize = cli_brw_size(exp->exp_obd);
373         oa->o_valid |= OBD_MD_FLBLKSZ;
374
375         CDEBUG(D_HA, "transno: %lld\n",
376                lustre_msg_get_transno(req->rq_repmsg));
377 out_req:
378         ptlrpc_req_finished(req);
379 out:
380         RETURN(rc);
381 }
382
383 int osc_punch_send(struct obd_export *exp, struct obdo *oa,
384                    obd_enqueue_update_f upcall, void *cookie)
385 {
386         struct ptlrpc_request *req;
387         struct osc_setattr_args *sa;
388         struct obd_import *imp = class_exp2cliimp(exp);
389         struct ost_body *body;
390         int rc;
391
392         ENTRY;
393
394         req = ptlrpc_request_alloc(imp, &RQF_OST_PUNCH);
395         if (req == NULL)
396                 RETURN(-ENOMEM);
397
398         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
399         if (rc < 0) {
400                 ptlrpc_request_free(req);
401                 RETURN(rc);
402         }
403
404         osc_set_io_portal(req);
405
406         ptlrpc_at_set_req_timeout(req);
407
408         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
409
410         lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
411
412         ptlrpc_request_set_replen(req);
413
414         req->rq_interpret_reply = osc_setattr_interpret;
415         sa = ptlrpc_req_async_args(sa, req);
416         sa->sa_oa = oa;
417         sa->sa_upcall = upcall;
418         sa->sa_cookie = cookie;
419
420         ptlrpcd_add_req(req);
421
422         RETURN(0);
423 }
424 EXPORT_SYMBOL(osc_punch_send);
425
426 /**
427  * osc_fallocate_base() - Handles fallocate request.
428  *
429  * @exp:        Export structure
430  * @oa:         Attributes passed to OSS from client (obdo structure)
431  * @upcall:     Primary & supplementary group information
432  * @cookie:     Exclusive identifier
433  * @rqset:      Request list.
434  * @mode:       Operation done on given range.
435  *
436  * osc_fallocate_base() - Handles fallocate requests only. Only block
437  * allocation or standard preallocate operation is supported currently.
438  * Other mode flags is not supported yet. ftruncate(2) or truncate(2)
439  * is supported via SETATTR request.
440  *
441  * Return: Non-zero on failure and O on success.
442  */
443 int osc_fallocate_base(struct obd_export *exp, struct obdo *oa,
444                        obd_enqueue_update_f upcall, void *cookie, int mode)
445 {
446         struct ptlrpc_request *req;
447         struct osc_setattr_args *sa;
448         struct ost_body *body;
449         struct obd_import *imp = class_exp2cliimp(exp);
450         int rc;
451         ENTRY;
452
453         /*
454          * Only mode == 0 (which is standard prealloc) is supported now.
455          * Punch is not supported yet.
456          */
457         if (mode & ~FALLOC_FL_KEEP_SIZE)
458                 RETURN(-EOPNOTSUPP);
459         oa->o_falloc_mode = mode;
460
461         req = ptlrpc_request_alloc(class_exp2cliimp(exp),
462                                    &RQF_OST_FALLOCATE);
463         if (req == NULL)
464                 RETURN(-ENOMEM);
465
466         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_FALLOCATE);
467         if (rc != 0) {
468                 ptlrpc_request_free(req);
469                 RETURN(rc);
470         }
471
472         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
473         LASSERT(body);
474
475         lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
476
477         ptlrpc_request_set_replen(req);
478
479         req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
480         BUILD_BUG_ON(sizeof(*sa) > sizeof(req->rq_async_args));
481         sa = ptlrpc_req_async_args(sa, req);
482         sa->sa_oa = oa;
483         sa->sa_upcall = upcall;
484         sa->sa_cookie = cookie;
485
486         ptlrpcd_add_req(req);
487
488         RETURN(0);
489 }
490
491 static int osc_sync_interpret(const struct lu_env *env,
492                               struct ptlrpc_request *req, void *args, int rc)
493 {
494         struct osc_fsync_args *fa = args;
495         struct ost_body *body;
496         struct cl_attr *attr = &osc_env_info(env)->oti_attr;
497         unsigned long valid = 0;
498         struct cl_object *obj;
499         ENTRY;
500
501         if (rc != 0)
502                 GOTO(out, rc);
503
504         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
505         if (body == NULL) {
506                 CERROR("can't unpack ost_body\n");
507                 GOTO(out, rc = -EPROTO);
508         }
509
510         *fa->fa_oa = body->oa;
511         obj = osc2cl(fa->fa_obj);
512
513         /* Update osc object's blocks attribute */
514         cl_object_attr_lock(obj);
515         if (body->oa.o_valid & OBD_MD_FLBLOCKS) {
516                 attr->cat_blocks = body->oa.o_blocks;
517                 valid |= CAT_BLOCKS;
518         }
519
520         if (valid != 0)
521                 cl_object_attr_update(env, obj, attr, valid);
522         cl_object_attr_unlock(obj);
523
524 out:
525         rc = fa->fa_upcall(fa->fa_cookie, rc);
526         RETURN(rc);
527 }
528
529 int osc_sync_base(struct osc_object *obj, struct obdo *oa,
530                   obd_enqueue_update_f upcall, void *cookie,
531                   struct ptlrpc_request_set *rqset)
532 {
533         struct obd_export     *exp = osc_export(obj);
534         struct ptlrpc_request *req;
535         struct ost_body       *body;
536         struct osc_fsync_args *fa;
537         int                    rc;
538         ENTRY;
539
540         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
541         if (req == NULL)
542                 RETURN(-ENOMEM);
543
544         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
545         if (rc) {
546                 ptlrpc_request_free(req);
547                 RETURN(rc);
548         }
549
550         /* overload the size and blocks fields in the oa with start/end */
551         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
552         LASSERT(body);
553         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
554
555         ptlrpc_request_set_replen(req);
556         req->rq_interpret_reply = osc_sync_interpret;
557
558         fa = ptlrpc_req_async_args(fa, req);
559         fa->fa_obj = obj;
560         fa->fa_oa = oa;
561         fa->fa_upcall = upcall;
562         fa->fa_cookie = cookie;
563
564         ptlrpc_set_add_req(rqset, req);
565
566         RETURN (0);
567 }
568
569 /* Find and cancel locally locks matched by @mode in the resource found by
570  * @objid. Found locks are added into @cancel list. Returns the amount of
571  * locks added to @cancels list. */
572 static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
573                                    struct list_head *cancels,
574                                    enum ldlm_mode mode, __u64 lock_flags)
575 {
576         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
577         struct ldlm_res_id res_id;
578         struct ldlm_resource *res;
579         int count;
580         ENTRY;
581
582         /* Return, i.e. cancel nothing, only if ELC is supported (flag in
583          * export) but disabled through procfs (flag in NS).
584          *
585          * This distinguishes from a case when ELC is not supported originally,
586          * when we still want to cancel locks in advance and just cancel them
587          * locally, without sending any RPC. */
588         if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
589                 RETURN(0);
590
591         ostid_build_res_name(&oa->o_oi, &res_id);
592         res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
593         if (IS_ERR(res))
594                 RETURN(0);
595
596         LDLM_RESOURCE_ADDREF(res);
597         count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
598                                            lock_flags, 0, NULL);
599         LDLM_RESOURCE_DELREF(res);
600         ldlm_resource_putref(res);
601         RETURN(count);
602 }
603
604 static int osc_destroy_interpret(const struct lu_env *env,
605                                  struct ptlrpc_request *req, void *args, int rc)
606 {
607         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
608
609         atomic_dec(&cli->cl_destroy_in_flight);
610         wake_up(&cli->cl_destroy_waitq);
611
612         return 0;
613 }
614
615 static int osc_can_send_destroy(struct client_obd *cli)
616 {
617         if (atomic_inc_return(&cli->cl_destroy_in_flight) <=
618             cli->cl_max_rpcs_in_flight) {
619                 /* The destroy request can be sent */
620                 return 1;
621         }
622         if (atomic_dec_return(&cli->cl_destroy_in_flight) <
623             cli->cl_max_rpcs_in_flight) {
624                 /*
625                  * The counter has been modified between the two atomic
626                  * operations.
627                  */
628                 wake_up(&cli->cl_destroy_waitq);
629         }
630         return 0;
631 }
632
633 static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
634                        struct obdo *oa)
635 {
636         struct client_obd     *cli = &exp->exp_obd->u.cli;
637         struct ptlrpc_request *req;
638         struct ost_body       *body;
639         LIST_HEAD(cancels);
640         int rc, count;
641         ENTRY;
642
643         if (!oa) {
644                 CDEBUG(D_INFO, "oa NULL\n");
645                 RETURN(-EINVAL);
646         }
647
648         count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
649                                         LDLM_FL_DISCARD_DATA);
650
651         req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
652         if (req == NULL) {
653                 ldlm_lock_list_put(&cancels, l_bl_ast, count);
654                 RETURN(-ENOMEM);
655         }
656
657         rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY,
658                                0, &cancels, count);
659         if (rc) {
660                 ptlrpc_request_free(req);
661                 RETURN(rc);
662         }
663
664         req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
665         ptlrpc_at_set_req_timeout(req);
666
667         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
668         LASSERT(body);
669         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
670
671         ptlrpc_request_set_replen(req);
672
673         req->rq_interpret_reply = osc_destroy_interpret;
674         if (!osc_can_send_destroy(cli)) {
675                 /*
676                  * Wait until the number of on-going destroy RPCs drops
677                  * under max_rpc_in_flight
678                  */
679                 rc = l_wait_event_abortable_exclusive(
680                         cli->cl_destroy_waitq,
681                         osc_can_send_destroy(cli));
682                 if (rc) {
683                         ptlrpc_req_finished(req);
684                         RETURN(-EINTR);
685                 }
686         }
687
688         /* Do not wait for response */
689         ptlrpcd_add_req(req);
690         RETURN(0);
691 }
692
693 static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
694                                 long writing_bytes)
695 {
696         u64 bits = OBD_MD_FLBLOCKS | OBD_MD_FLGRANT;
697
698         LASSERT(!(oa->o_valid & bits));
699
700         oa->o_valid |= bits;
701         spin_lock(&cli->cl_loi_list_lock);
702         if (OCD_HAS_FLAG(&cli->cl_import->imp_connect_data, GRANT_PARAM))
703                 oa->o_dirty = cli->cl_dirty_grant;
704         else
705                 oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
706         if (unlikely(cli->cl_dirty_pages > cli->cl_dirty_max_pages)) {
707                 CERROR("dirty %lu > dirty_max %lu\n",
708                        cli->cl_dirty_pages,
709                        cli->cl_dirty_max_pages);
710                 oa->o_undirty = 0;
711         } else if (unlikely(atomic_long_read(&obd_dirty_pages) >
712                             (long)(obd_max_dirty_pages + 1))) {
713                 /* The atomic_read() allowing the atomic_inc() are
714                  * not covered by a lock thus they may safely race and trip
715                  * this CERROR() unless we add in a small fudge factor (+1). */
716                 CERROR("%s: dirty %ld > system dirty_max %ld\n",
717                        cli_name(cli), atomic_long_read(&obd_dirty_pages),
718                        obd_max_dirty_pages);
719                 oa->o_undirty = 0;
720         } else if (unlikely(cli->cl_dirty_max_pages - cli->cl_dirty_pages >
721                             0x7fffffff)) {
722                 CERROR("dirty %lu - dirty_max %lu too big???\n",
723                        cli->cl_dirty_pages, cli->cl_dirty_max_pages);
724                 oa->o_undirty = 0;
725         } else {
726                 unsigned long nrpages;
727                 unsigned long undirty;
728
729                 nrpages = cli->cl_max_pages_per_rpc;
730                 nrpages *= cli->cl_max_rpcs_in_flight + 1;
731                 nrpages = max(nrpages, cli->cl_dirty_max_pages);
732                 undirty = nrpages << PAGE_SHIFT;
733                 if (OCD_HAS_FLAG(&cli->cl_import->imp_connect_data,
734                                  GRANT_PARAM)) {
735                         int nrextents;
736
737                         /* take extent tax into account when asking for more
738                          * grant space */
739                         nrextents = (nrpages + cli->cl_max_extent_pages - 1)  /
740                                      cli->cl_max_extent_pages;
741                         undirty += nrextents * cli->cl_grant_extent_tax;
742                 }
743                 /* Do not ask for more than OBD_MAX_GRANT - a margin for server
744                  * to add extent tax, etc.
745                  */
746                 oa->o_undirty = min(undirty, OBD_MAX_GRANT &
747                                     ~(PTLRPC_MAX_BRW_SIZE * 4UL));
748         }
749         oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
750         oa->o_dropped = cli->cl_lost_grant;
751         cli->cl_lost_grant = 0;
752         spin_unlock(&cli->cl_loi_list_lock);
753         CDEBUG(D_CACHE, "dirty: %llu undirty: %u dropped %u grant: %llu\n",
754                oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
755 }
756
757 void osc_update_next_shrink(struct client_obd *cli)
758 {
759         cli->cl_next_shrink_grant = ktime_get_seconds() +
760                                     cli->cl_grant_shrink_interval;
761
762         CDEBUG(D_CACHE, "next time %lld to shrink grant\n",
763                cli->cl_next_shrink_grant);
764 }
765
766 static void __osc_update_grant(struct client_obd *cli, u64 grant)
767 {
768         spin_lock(&cli->cl_loi_list_lock);
769         cli->cl_avail_grant += grant;
770         spin_unlock(&cli->cl_loi_list_lock);
771 }
772
773 static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
774 {
775         if (body->oa.o_valid & OBD_MD_FLGRANT) {
776                 CDEBUG(D_CACHE, "got %llu extra grant\n", body->oa.o_grant);
777                 __osc_update_grant(cli, body->oa.o_grant);
778         }
779 }
780
781 /**
782  * grant thread data for shrinking space.
783  */
784 struct grant_thread_data {
785         struct list_head        gtd_clients;
786         struct mutex            gtd_mutex;
787         unsigned long           gtd_stopped:1;
788 };
789 static struct grant_thread_data client_gtd;
790
791 static int osc_shrink_grant_interpret(const struct lu_env *env,
792                                       struct ptlrpc_request *req,
793                                       void *args, int rc)
794 {
795         struct osc_grant_args *aa = args;
796         struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
797         struct ost_body *body;
798
799         if (rc != 0) {
800                 __osc_update_grant(cli, aa->aa_oa->o_grant);
801                 GOTO(out, rc);
802         }
803
804         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
805         LASSERT(body);
806         osc_update_grant(cli, body);
807 out:
808         OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
809         aa->aa_oa = NULL;
810
811         return rc;
812 }
813
814 static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
815 {
816         spin_lock(&cli->cl_loi_list_lock);
817         oa->o_grant = cli->cl_avail_grant / 4;
818         cli->cl_avail_grant -= oa->o_grant;
819         spin_unlock(&cli->cl_loi_list_lock);
820         if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
821                 oa->o_valid |= OBD_MD_FLFLAGS;
822                 oa->o_flags = 0;
823         }
824         oa->o_flags |= OBD_FL_SHRINK_GRANT;
825         osc_update_next_shrink(cli);
826 }
827
828 /* Shrink the current grant, either from some large amount to enough for a
829  * full set of in-flight RPCs, or if we have already shrunk to that limit
830  * then to enough for a single RPC.  This avoids keeping more grant than
831  * needed, and avoids shrinking the grant piecemeal. */
832 static int osc_shrink_grant(struct client_obd *cli)
833 {
834         __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
835                              (cli->cl_max_pages_per_rpc << PAGE_SHIFT);
836
837         spin_lock(&cli->cl_loi_list_lock);
838         if (cli->cl_avail_grant <= target_bytes)
839                 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
840         spin_unlock(&cli->cl_loi_list_lock);
841
842         return osc_shrink_grant_to_target(cli, target_bytes);
843 }
844
845 int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
846 {
847         int                     rc = 0;
848         struct ost_body        *body;
849         ENTRY;
850
851         spin_lock(&cli->cl_loi_list_lock);
852         /* Don't shrink if we are already above or below the desired limit
853          * We don't want to shrink below a single RPC, as that will negatively
854          * impact block allocation and long-term performance. */
855         if (target_bytes < cli->cl_max_pages_per_rpc << PAGE_SHIFT)
856                 target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
857
858         if (target_bytes >= cli->cl_avail_grant) {
859                 spin_unlock(&cli->cl_loi_list_lock);
860                 RETURN(0);
861         }
862         spin_unlock(&cli->cl_loi_list_lock);
863
864         OBD_ALLOC_PTR(body);
865         if (!body)
866                 RETURN(-ENOMEM);
867
868         osc_announce_cached(cli, &body->oa, 0);
869
870         spin_lock(&cli->cl_loi_list_lock);
871         if (target_bytes >= cli->cl_avail_grant) {
872                 /* available grant has changed since target calculation */
873                 spin_unlock(&cli->cl_loi_list_lock);
874                 GOTO(out_free, rc = 0);
875         }
876         body->oa.o_grant = cli->cl_avail_grant - target_bytes;
877         cli->cl_avail_grant = target_bytes;
878         spin_unlock(&cli->cl_loi_list_lock);
879         if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
880                 body->oa.o_valid |= OBD_MD_FLFLAGS;
881                 body->oa.o_flags = 0;
882         }
883         body->oa.o_flags |= OBD_FL_SHRINK_GRANT;
884         osc_update_next_shrink(cli);
885
886         rc = osc_set_info_async(NULL, cli->cl_import->imp_obd->obd_self_export,
887                                 sizeof(KEY_GRANT_SHRINK), KEY_GRANT_SHRINK,
888                                 sizeof(*body), body, NULL);
889         if (rc != 0)
890                 __osc_update_grant(cli, body->oa.o_grant);
891 out_free:
892         OBD_FREE_PTR(body);
893         RETURN(rc);
894 }
895
896 static int osc_should_shrink_grant(struct client_obd *client)
897 {
898         time64_t next_shrink = client->cl_next_shrink_grant;
899
900         if (client->cl_import == NULL)
901                 return 0;
902
903         if (!OCD_HAS_FLAG(&client->cl_import->imp_connect_data, GRANT_SHRINK) ||
904             client->cl_import->imp_grant_shrink_disabled) {
905                 osc_update_next_shrink(client);
906                 return 0;
907         }
908
909         if (ktime_get_seconds() >= next_shrink - 5) {
910                 /* Get the current RPC size directly, instead of going via:
911                  * cli_brw_size(obd->u.cli.cl_import->imp_obd->obd_self_export)
912                  * Keep comment here so that it can be found by searching. */
913                 int brw_size = client->cl_max_pages_per_rpc << PAGE_SHIFT;
914
915                 if (client->cl_import->imp_state == LUSTRE_IMP_FULL &&
916                     client->cl_avail_grant > brw_size)
917                         return 1;
918                 else
919                         osc_update_next_shrink(client);
920         }
921         return 0;
922 }
923
924 #define GRANT_SHRINK_RPC_BATCH  100
925
926 static struct delayed_work work;
927
928 static void osc_grant_work_handler(struct work_struct *data)
929 {
930         struct client_obd *cli;
931         int rpc_sent;
932         bool init_next_shrink = true;
933         time64_t next_shrink = ktime_get_seconds() + GRANT_SHRINK_INTERVAL;
934
935         rpc_sent = 0;
936         mutex_lock(&client_gtd.gtd_mutex);
937         list_for_each_entry(cli, &client_gtd.gtd_clients,
938                             cl_grant_chain) {
939                 if (rpc_sent < GRANT_SHRINK_RPC_BATCH &&
940                     osc_should_shrink_grant(cli)) {
941                         osc_shrink_grant(cli);
942                         rpc_sent++;
943                 }
944
945                 if (!init_next_shrink) {
946                         if (cli->cl_next_shrink_grant < next_shrink &&
947                             cli->cl_next_shrink_grant > ktime_get_seconds())
948                                 next_shrink = cli->cl_next_shrink_grant;
949                 } else {
950                         init_next_shrink = false;
951                         next_shrink = cli->cl_next_shrink_grant;
952                 }
953         }
954         mutex_unlock(&client_gtd.gtd_mutex);
955
956         if (client_gtd.gtd_stopped == 1)
957                 return;
958
959         if (next_shrink > ktime_get_seconds()) {
960                 time64_t delay = next_shrink - ktime_get_seconds();
961
962                 schedule_delayed_work(&work, cfs_time_seconds(delay));
963         } else {
964                 schedule_work(&work.work);
965         }
966 }
967
968 void osc_schedule_grant_work(void)
969 {
970         cancel_delayed_work_sync(&work);
971         schedule_work(&work.work);
972 }
973
974 /**
975  * Start grant thread for returing grant to server for idle clients.
976  */
977 static int osc_start_grant_work(void)
978 {
979         client_gtd.gtd_stopped = 0;
980         mutex_init(&client_gtd.gtd_mutex);
981         INIT_LIST_HEAD(&client_gtd.gtd_clients);
982
983         INIT_DELAYED_WORK(&work, osc_grant_work_handler);
984         schedule_work(&work.work);
985
986         return 0;
987 }
988
989 static void osc_stop_grant_work(void)
990 {
991         client_gtd.gtd_stopped = 1;
992         cancel_delayed_work_sync(&work);
993 }
994
995 static void osc_add_grant_list(struct client_obd *client)
996 {
997         mutex_lock(&client_gtd.gtd_mutex);
998         list_add(&client->cl_grant_chain, &client_gtd.gtd_clients);
999         mutex_unlock(&client_gtd.gtd_mutex);
1000 }
1001
1002 static void osc_del_grant_list(struct client_obd *client)
1003 {
1004         if (list_empty(&client->cl_grant_chain))
1005                 return;
1006
1007         mutex_lock(&client_gtd.gtd_mutex);
1008         list_del_init(&client->cl_grant_chain);
1009         mutex_unlock(&client_gtd.gtd_mutex);
1010 }
1011
1012 void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
1013 {
1014         /*
1015          * ocd_grant is the total grant amount we're expect to hold: if we've
1016          * been evicted, it's the new avail_grant amount, cl_dirty_pages will
1017          * drop to 0 as inflight RPCs fail out; otherwise, it's avail_grant +
1018          * dirty.
1019          *
1020          * race is tolerable here: if we're evicted, but imp_state already
1021          * left EVICTED state, then cl_dirty_pages must be 0 already.
1022          */
1023         spin_lock(&cli->cl_loi_list_lock);
1024         cli->cl_avail_grant = ocd->ocd_grant;
1025         if (cli->cl_import->imp_state != LUSTRE_IMP_EVICTED) {
1026                 cli->cl_avail_grant -= cli->cl_reserved_grant;
1027                 if (OCD_HAS_FLAG(ocd, GRANT_PARAM))
1028                         cli->cl_avail_grant -= cli->cl_dirty_grant;
1029                 else
1030                         cli->cl_avail_grant -=
1031                                         cli->cl_dirty_pages << PAGE_SHIFT;
1032         }
1033
1034         if (OCD_HAS_FLAG(ocd, GRANT_PARAM)) {
1035                 u64 size;
1036                 int chunk_mask;
1037
1038                 /* overhead for each extent insertion */
1039                 cli->cl_grant_extent_tax = ocd->ocd_grant_tax_kb << 10;
1040                 /* determine the appropriate chunk size used by osc_extent. */
1041                 cli->cl_chunkbits = max_t(int, PAGE_SHIFT,
1042                                           ocd->ocd_grant_blkbits);
1043                 /* max_pages_per_rpc must be chunk aligned */
1044                 chunk_mask = ~((1 << (cli->cl_chunkbits - PAGE_SHIFT)) - 1);
1045                 cli->cl_max_pages_per_rpc = (cli->cl_max_pages_per_rpc +
1046                                              ~chunk_mask) & chunk_mask;
1047                 /* determine maximum extent size, in #pages */
1048                 size = (u64)ocd->ocd_grant_max_blks << ocd->ocd_grant_blkbits;
1049                 cli->cl_max_extent_pages = size >> PAGE_SHIFT;
1050                 if (cli->cl_max_extent_pages == 0)
1051                         cli->cl_max_extent_pages = 1;
1052         } else {
1053                 cli->cl_grant_extent_tax = 0;
1054                 cli->cl_chunkbits = PAGE_SHIFT;
1055                 cli->cl_max_extent_pages = DT_MAX_BRW_PAGES;
1056         }
1057         spin_unlock(&cli->cl_loi_list_lock);
1058
1059         CDEBUG(D_CACHE,
1060                "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld. chunk bits: %d cl_max_extent_pages: %d\n",
1061                cli_name(cli),
1062                cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
1063                cli->cl_max_extent_pages);
1064
1065         if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
1066                 osc_add_grant_list(cli);
1067 }
1068 EXPORT_SYMBOL(osc_init_grant);
1069
1070 /* We assume that the reason this OSC got a short read is because it read
1071  * beyond the end of a stripe file; i.e. lustre is reading a sparse file
1072  * via the LOV, and it _knows_ it's reading inside the file, it's just that
1073  * this stripe never got written at or beyond this stripe offset yet. */
1074 static void handle_short_read(int nob_read, size_t page_count,
1075                               struct brw_page **pga)
1076 {
1077         char *ptr;
1078         int i = 0;
1079
1080         /* skip bytes read OK */
1081         while (nob_read > 0) {
1082                 LASSERT (page_count > 0);
1083
1084                 if (pga[i]->count > nob_read) {
1085                         /* EOF inside this page */
1086                         ptr = kmap(pga[i]->pg) +
1087                                 (pga[i]->off & ~PAGE_MASK);
1088                         memset(ptr + nob_read, 0, pga[i]->count - nob_read);
1089                         kunmap(pga[i]->pg);
1090                         page_count--;
1091                         i++;
1092                         break;
1093                 }
1094
1095                 nob_read -= pga[i]->count;
1096                 page_count--;
1097                 i++;
1098         }
1099
1100         /* zero remaining pages */
1101         while (page_count-- > 0) {
1102                 ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
1103                 memset(ptr, 0, pga[i]->count);
1104                 kunmap(pga[i]->pg);
1105                 i++;
1106         }
1107 }
1108
1109 static int check_write_rcs(struct ptlrpc_request *req,
1110                            int requested_nob, int niocount,
1111                            size_t page_count, struct brw_page **pga)
1112 {
1113         int     i;
1114         __u32   *remote_rcs;
1115
1116         remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS,
1117                                                   sizeof(*remote_rcs) *
1118                                                   niocount);
1119         if (remote_rcs == NULL) {
1120                 CDEBUG(D_INFO, "Missing/short RC vector on BRW_WRITE reply\n");
1121                 return(-EPROTO);
1122         }
1123
1124         /* return error if any niobuf was in error */
1125         for (i = 0; i < niocount; i++) {
1126                 if ((int)remote_rcs[i] < 0) {
1127                         CDEBUG(D_INFO, "rc[%d]: %d req %p\n",
1128                                i, remote_rcs[i], req);
1129                         return remote_rcs[i];
1130                 }
1131
1132                 if (remote_rcs[i] != 0) {
1133                         CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n",
1134                                 i, remote_rcs[i], req);
1135                         return(-EPROTO);
1136                 }
1137         }
1138         if (req->rq_bulk != NULL &&
1139             req->rq_bulk->bd_nob_transferred != requested_nob) {
1140                 CERROR("Unexpected # bytes transferred: %d (requested %d)\n",
1141                        req->rq_bulk->bd_nob_transferred, requested_nob);
1142                 return(-EPROTO);
1143         }
1144
1145         return (0);
1146 }
1147
1148 static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
1149 {
1150         if (p1->flag != p2->flag) {
1151                 unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
1152                                   OBD_BRW_SYNC       | OBD_BRW_ASYNC   |
1153                                   OBD_BRW_NOQUOTA    | OBD_BRW_SOFT_SYNC);
1154
1155                 /* warn if we try to combine flags that we don't know to be
1156                  * safe to combine */
1157                 if (unlikely((p1->flag & mask) != (p2->flag & mask))) {
1158                         CWARN("Saw flags 0x%x and 0x%x in the same brw, please "
1159                               "report this at https://jira.whamcloud.com/\n",
1160                               p1->flag, p2->flag);
1161                 }
1162                 return 0;
1163         }
1164
1165         return (p1->off + p1->count == p2->off);
1166 }
1167
1168 #if IS_ENABLED(CONFIG_CRC_T10DIF)
1169 static int osc_checksum_bulk_t10pi(const char *obd_name, int nob,
1170                                    size_t pg_count, struct brw_page **pga,
1171                                    int opc, obd_dif_csum_fn *fn,
1172                                    int sector_size,
1173                                    u32 *check_sum)
1174 {
1175         struct ahash_request *req;
1176         /* Used Adler as the default checksum type on top of DIF tags */
1177         unsigned char cfs_alg = cksum_obd2cfs(OBD_CKSUM_T10_TOP);
1178         struct page *__page;
1179         unsigned char *buffer;
1180         __u16 *guard_start;
1181         unsigned int bufsize;
1182         int guard_number;
1183         int used_number = 0;
1184         int used;
1185         u32 cksum;
1186         int rc = 0;
1187         int i = 0;
1188
1189         LASSERT(pg_count > 0);
1190
1191         __page = alloc_page(GFP_KERNEL);
1192         if (__page == NULL)
1193                 return -ENOMEM;
1194
1195         req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1196         if (IS_ERR(req)) {
1197                 rc = PTR_ERR(req);
1198                 CERROR("%s: unable to initialize checksum hash %s: rc = %d\n",
1199                        obd_name, cfs_crypto_hash_name(cfs_alg), rc);
1200                 GOTO(out, rc);
1201         }
1202
1203         buffer = kmap(__page);
1204         guard_start = (__u16 *)buffer;
1205         guard_number = PAGE_SIZE / sizeof(*guard_start);
1206         while (nob > 0 && pg_count > 0) {
1207                 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1208
1209                 /* corrupt the data before we compute the checksum, to
1210                  * simulate an OST->client data error */
1211                 if (unlikely(i == 0 && opc == OST_READ &&
1212                              OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))) {
1213                         unsigned char *ptr = kmap(pga[i]->pg);
1214                         int off = pga[i]->off & ~PAGE_MASK;
1215
1216                         memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1217                         kunmap(pga[i]->pg);
1218                 }
1219
1220                 /*
1221                  * The left guard number should be able to hold checksums of a
1222                  * whole page
1223                  */
1224                 rc = obd_page_dif_generate_buffer(obd_name, pga[i]->pg,
1225                                                   pga[i]->off & ~PAGE_MASK,
1226                                                   count,
1227                                                   guard_start + used_number,
1228                                                   guard_number - used_number,
1229                                                   &used, sector_size,
1230                                                   fn);
1231                 if (rc)
1232                         break;
1233
1234                 used_number += used;
1235                 if (used_number == guard_number) {
1236                         cfs_crypto_hash_update_page(req, __page, 0,
1237                                 used_number * sizeof(*guard_start));
1238                         used_number = 0;
1239                 }
1240
1241                 nob -= pga[i]->count;
1242                 pg_count--;
1243                 i++;
1244         }
1245         kunmap(__page);
1246         if (rc)
1247                 GOTO(out, rc);
1248
1249         if (used_number != 0)
1250                 cfs_crypto_hash_update_page(req, __page, 0,
1251                         used_number * sizeof(*guard_start));
1252
1253         bufsize = sizeof(cksum);
1254         cfs_crypto_hash_final(req, (unsigned char *)&cksum, &bufsize);
1255
1256         /* For sending we only compute the wrong checksum instead
1257          * of corrupting the data so it is still correct on a redo */
1258         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1259                 cksum++;
1260
1261         *check_sum = cksum;
1262 out:
1263         __free_page(__page);
1264         return rc;
1265 }
1266 #else /* !CONFIG_CRC_T10DIF */
1267 #define obd_dif_ip_fn NULL
1268 #define obd_dif_crc_fn NULL
1269 #define osc_checksum_bulk_t10pi(name, nob, pgc, pga, opc, fn, ssize, csum)  \
1270         -EOPNOTSUPP
1271 #endif /* CONFIG_CRC_T10DIF */
1272
1273 static int osc_checksum_bulk(int nob, size_t pg_count,
1274                              struct brw_page **pga, int opc,
1275                              enum cksum_types cksum_type,
1276                              u32 *cksum)
1277 {
1278         int                             i = 0;
1279         struct ahash_request           *req;
1280         unsigned int                    bufsize;
1281         unsigned char                   cfs_alg = cksum_obd2cfs(cksum_type);
1282
1283         LASSERT(pg_count > 0);
1284
1285         req = cfs_crypto_hash_init(cfs_alg, NULL, 0);
1286         if (IS_ERR(req)) {
1287                 CERROR("Unable to initialize checksum hash %s\n",
1288                        cfs_crypto_hash_name(cfs_alg));
1289                 return PTR_ERR(req);
1290         }
1291
1292         while (nob > 0 && pg_count > 0) {
1293                 unsigned int count = pga[i]->count > nob ? nob : pga[i]->count;
1294
1295                 /* corrupt the data before we compute the checksum, to
1296                  * simulate an OST->client data error */
1297                 if (i == 0 && opc == OST_READ &&
1298                     OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
1299                         unsigned char *ptr = kmap(pga[i]->pg);
1300                         int off = pga[i]->off & ~PAGE_MASK;
1301
1302                         memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
1303                         kunmap(pga[i]->pg);
1304                 }
1305                 cfs_crypto_hash_update_page(req, pga[i]->pg,
1306                                             pga[i]->off & ~PAGE_MASK,
1307                                             count);
1308                 LL_CDEBUG_PAGE(D_PAGE, pga[i]->pg, "off %d\n",
1309                                (int)(pga[i]->off & ~PAGE_MASK));
1310
1311                 nob -= pga[i]->count;
1312                 pg_count--;
1313                 i++;
1314         }
1315
1316         bufsize = sizeof(*cksum);
1317         cfs_crypto_hash_final(req, (unsigned char *)cksum, &bufsize);
1318
1319         /* For sending we only compute the wrong checksum instead
1320          * of corrupting the data so it is still correct on a redo */
1321         if (opc == OST_WRITE && OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_SEND))
1322                 (*cksum)++;
1323
1324         return 0;
1325 }
1326
1327 static int osc_checksum_bulk_rw(const char *obd_name,
1328                                 enum cksum_types cksum_type,
1329                                 int nob, size_t pg_count,
1330                                 struct brw_page **pga, int opc,
1331                                 u32 *check_sum)
1332 {
1333         obd_dif_csum_fn *fn = NULL;
1334         int sector_size = 0;
1335         int rc;
1336
1337         ENTRY;
1338         obd_t10_cksum2dif(cksum_type, &fn, &sector_size);
1339
1340         if (fn)
1341                 rc = osc_checksum_bulk_t10pi(obd_name, nob, pg_count, pga,
1342                                              opc, fn, sector_size, check_sum);
1343         else
1344                 rc = osc_checksum_bulk(nob, pg_count, pga, opc, cksum_type,
1345                                        check_sum);
1346
1347         RETURN(rc);
1348 }
1349
1350 static inline void osc_release_bounce_pages(struct brw_page **pga,
1351                                             u32 page_count)
1352 {
1353 #ifdef HAVE_LUSTRE_CRYPTO
1354         int i;
1355
1356         for (i = 0; i < page_count; i++) {
1357                 if (pga[i]->pg->mapping)
1358                         /* bounce pages are unmapped */
1359                         continue;
1360                 if (pga[i]->flag & OBD_BRW_SYNC)
1361                         /* sync transfer cannot have encrypted pages */
1362                         continue;
1363                 llcrypt_finalize_bounce_page(&pga[i]->pg);
1364                 pga[i]->count -= pga[i]->bp_count_diff;
1365                 pga[i]->off += pga[i]->bp_off_diff;
1366         }
1367 #endif
1368 }
1369
1370 static int
1371 osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa,
1372                      u32 page_count, struct brw_page **pga,
1373                      struct ptlrpc_request **reqp, int resend)
1374 {
1375         struct ptlrpc_request *req;
1376         struct ptlrpc_bulk_desc *desc;
1377         struct ost_body *body;
1378         struct obd_ioobj *ioobj;
1379         struct niobuf_remote *niobuf;
1380         int niocount, i, requested_nob, opc, rc, short_io_size = 0;
1381         struct osc_brw_async_args *aa;
1382         struct req_capsule *pill;
1383         struct brw_page *pg_prev;
1384         void *short_io_buf;
1385         const char *obd_name = cli->cl_import->imp_obd->obd_name;
1386         struct inode *inode;
1387
1388         ENTRY;
1389         inode = page2inode(pga[0]->pg);
1390         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
1391                 RETURN(-ENOMEM); /* Recoverable */
1392         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
1393                 RETURN(-EINVAL); /* Fatal */
1394
1395         if ((cmd & OBD_BRW_WRITE) != 0) {
1396                 opc = OST_WRITE;
1397                 req = ptlrpc_request_alloc_pool(cli->cl_import,
1398                                                 osc_rq_pool,
1399                                                 &RQF_OST_BRW_WRITE);
1400         } else {
1401                 opc = OST_READ;
1402                 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
1403         }
1404         if (req == NULL)
1405                 RETURN(-ENOMEM);
1406
1407         if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode)) {
1408                 for (i = 0; i < page_count; i++) {
1409                         struct brw_page *pg = pga[i];
1410                         struct page *data_page = NULL;
1411                         bool retried = false;
1412                         bool lockedbymyself;
1413
1414 retry_encrypt:
1415                         /* The page can already be locked when we arrive here.
1416                          * This is possible when cl_page_assume/vvp_page_assume
1417                          * is stuck on wait_on_page_writeback with page lock
1418                          * held. In this case there is no risk for the lock to
1419                          * be released while we are doing our encryption
1420                          * processing, because writeback against that page will
1421                          * end in vvp_page_completion_write/cl_page_completion,
1422                          * which means only once the page is fully processed.
1423                          */
1424                         lockedbymyself = trylock_page(pg->pg);
1425                         data_page =
1426                                 llcrypt_encrypt_pagecache_blocks(pg->pg,
1427                                                                  PAGE_SIZE, 0,
1428                                                                  GFP_NOFS);
1429                         if (lockedbymyself)
1430                                 unlock_page(pg->pg);
1431                         if (IS_ERR(data_page)) {
1432                                 rc = PTR_ERR(data_page);
1433                                 if (rc == -ENOMEM && !retried) {
1434                                         retried = true;
1435                                         rc = 0;
1436                                         goto retry_encrypt;
1437                                 }
1438                                 ptlrpc_request_free(req);
1439                                 RETURN(rc);
1440                         }
1441                         /* len is forced to PAGE_SIZE, and poff to 0
1442                          * so store the old, clear text info
1443                          */
1444                         pg->pg = data_page;
1445                         pg->bp_count_diff = PAGE_SIZE - pg->count;
1446                         pg->count = PAGE_SIZE;
1447                         pg->bp_off_diff = pg->off & ~PAGE_MASK;
1448                         pg->off = pg->off & PAGE_MASK;
1449                 }
1450         }
1451
1452         for (niocount = i = 1; i < page_count; i++) {
1453                 if (!can_merge_pages(pga[i - 1], pga[i]))
1454                         niocount++;
1455         }
1456
1457         pill = &req->rq_pill;
1458         req_capsule_set_size(pill, &RMF_OBD_IOOBJ, RCL_CLIENT,
1459                              sizeof(*ioobj));
1460         req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
1461                              niocount * sizeof(*niobuf));
1462
1463         for (i = 0; i < page_count; i++)
1464                 short_io_size += pga[i]->count;
1465
1466         /* Check if read/write is small enough to be a short io. */
1467         if (short_io_size > cli->cl_max_short_io_bytes || niocount > 1 ||
1468             !imp_connect_shortio(cli->cl_import))
1469                 short_io_size = 0;
1470
1471         req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_CLIENT,
1472                              opc == OST_READ ? 0 : short_io_size);
1473         if (opc == OST_READ)
1474                 req_capsule_set_size(pill, &RMF_SHORT_IO, RCL_SERVER,
1475                                      short_io_size);
1476
1477         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
1478         if (rc) {
1479                 ptlrpc_request_free(req);
1480                 RETURN(rc);
1481         }
1482         osc_set_io_portal(req);
1483
1484         ptlrpc_at_set_req_timeout(req);
1485         /* ask ptlrpc not to resend on EINPROGRESS since BRWs have their own
1486          * retry logic */
1487         req->rq_no_retry_einprogress = 1;
1488
1489         if (short_io_size != 0) {
1490                 desc = NULL;
1491                 short_io_buf = NULL;
1492                 goto no_bulk;
1493         }
1494
1495         desc = ptlrpc_prep_bulk_imp(req, page_count,
1496                 cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
1497                 (opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
1498                         PTLRPC_BULK_PUT_SINK),
1499                 OST_BULK_PORTAL,
1500                 &ptlrpc_bulk_kiov_pin_ops);
1501
1502         if (desc == NULL)
1503                 GOTO(out, rc = -ENOMEM);
1504         /* NB request now owns desc and will free it when it gets freed */
1505 no_bulk:
1506         body = req_capsule_client_get(pill, &RMF_OST_BODY);
1507         ioobj = req_capsule_client_get(pill, &RMF_OBD_IOOBJ);
1508         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1509         LASSERT(body != NULL && ioobj != NULL && niobuf != NULL);
1510
1511         lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa);
1512
1513         /* For READ and WRITE, we can't fill o_uid and o_gid using from_kuid()
1514          * and from_kgid(), because they are asynchronous. Fortunately, variable
1515          * oa contains valid o_uid and o_gid in these two operations.
1516          * Besides, filling o_uid and o_gid is enough for nrs-tbf, see LU-9658.
1517          * OBD_MD_FLUID and OBD_MD_FLUID is not set in order to avoid breaking
1518          * other process logic */
1519         body->oa.o_uid = oa->o_uid;
1520         body->oa.o_gid = oa->o_gid;
1521
1522         obdo_to_ioobj(oa, ioobj);
1523         ioobj->ioo_bufcnt = niocount;
1524         /* The high bits of ioo_max_brw tells server _maximum_ number of bulks
1525          * that might be send for this request.  The actual number is decided
1526          * when the RPC is finally sent in ptlrpc_register_bulk(). It sends
1527          * "max - 1" for old client compatibility sending "0", and also so the
1528          * the actual maximum is a power-of-two number, not one less. LU-1431 */
1529         if (desc != NULL)
1530                 ioobj_max_brw_set(ioobj, desc->bd_md_max_brw);
1531         else /* short io */
1532                 ioobj_max_brw_set(ioobj, 0);
1533
1534         if (short_io_size != 0) {
1535                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1536                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1537                         body->oa.o_flags = 0;
1538                 }
1539                 body->oa.o_flags |= OBD_FL_SHORT_IO;
1540                 CDEBUG(D_CACHE, "Using short io for data transfer, size = %d\n",
1541                        short_io_size);
1542                 if (opc == OST_WRITE) {
1543                         short_io_buf = req_capsule_client_get(pill,
1544                                                               &RMF_SHORT_IO);
1545                         LASSERT(short_io_buf != NULL);
1546                 }
1547         }
1548
1549         LASSERT(page_count > 0);
1550         pg_prev = pga[0];
1551         for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
1552                 struct brw_page *pg = pga[i];
1553                 int poff = pg->off & ~PAGE_MASK;
1554
1555                 LASSERT(pg->count > 0);
1556                 /* make sure there is no gap in the middle of page array */
1557                 LASSERTF(page_count == 1 ||
1558                          (ergo(i == 0, poff + pg->count == PAGE_SIZE) &&
1559                           ergo(i > 0 && i < page_count - 1,
1560                                poff == 0 && pg->count == PAGE_SIZE)   &&
1561                           ergo(i == page_count - 1, poff == 0)),
1562                          "i: %d/%d pg: %p off: %llu, count: %u\n",
1563                          i, page_count, pg, pg->off, pg->count);
1564                 LASSERTF(i == 0 || pg->off > pg_prev->off,
1565                          "i %d p_c %u pg %p [pri %lu ind %lu] off %llu"
1566                          " prev_pg %p [pri %lu ind %lu] off %llu\n",
1567                          i, page_count,
1568                          pg->pg, page_private(pg->pg), pg->pg->index, pg->off,
1569                          pg_prev->pg, page_private(pg_prev->pg),
1570                          pg_prev->pg->index, pg_prev->off);
1571                 LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
1572                         (pg->flag & OBD_BRW_SRVLOCK));
1573                 if (short_io_size != 0 && opc == OST_WRITE) {
1574                         unsigned char *ptr = kmap_atomic(pg->pg);
1575
1576                         LASSERT(short_io_size >= requested_nob + pg->count);
1577                         memcpy(short_io_buf + requested_nob,
1578                                ptr + poff,
1579                                pg->count);
1580                         kunmap_atomic(ptr);
1581                 } else if (short_io_size == 0) {
1582                         desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff,
1583                                                          pg->count);
1584                 }
1585                 requested_nob += pg->count;
1586
1587                 if (i > 0 && can_merge_pages(pg_prev, pg)) {
1588                         niobuf--;
1589                         niobuf->rnb_len += pg->count;
1590                 } else {
1591                         niobuf->rnb_offset = pg->off;
1592                         niobuf->rnb_len    = pg->count;
1593                         niobuf->rnb_flags  = pg->flag;
1594                 }
1595                 pg_prev = pg;
1596         }
1597
1598         LASSERTF((void *)(niobuf - niocount) ==
1599                 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE),
1600                 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill,
1601                 &RMF_NIOBUF_REMOTE), (void *)(niobuf - niocount));
1602
1603         osc_announce_cached(cli, &body->oa, opc == OST_WRITE ? requested_nob:0);
1604         if (resend) {
1605                 if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0) {
1606                         body->oa.o_valid |= OBD_MD_FLFLAGS;
1607                         body->oa.o_flags = 0;
1608                 }
1609                 body->oa.o_flags |= OBD_FL_RECOV_RESEND;
1610         }
1611
1612         if (osc_should_shrink_grant(cli))
1613                 osc_shrink_grant_local(cli, &body->oa);
1614
1615         /* size[REQ_REC_OFF] still sizeof (*body) */
1616         if (opc == OST_WRITE) {
1617                 if (cli->cl_checksum &&
1618                     !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1619                         /* store cl_cksum_type in a local variable since
1620                          * it can be changed via lprocfs */
1621                         enum cksum_types cksum_type = cli->cl_cksum_type;
1622
1623                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1624                                 body->oa.o_flags = 0;
1625
1626                         body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1627                                                                 cksum_type);
1628                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1629
1630                         rc = osc_checksum_bulk_rw(obd_name, cksum_type,
1631                                                   requested_nob, page_count,
1632                                                   pga, OST_WRITE,
1633                                                   &body->oa.o_cksum);
1634                         if (rc < 0) {
1635                                 CDEBUG(D_PAGE, "failed to checksum, rc = %d\n",
1636                                        rc);
1637                                 GOTO(out, rc);
1638                         }
1639                         CDEBUG(D_PAGE, "checksum at write origin: %x\n",
1640                                body->oa.o_cksum);
1641
1642                         /* save this in 'oa', too, for later checking */
1643                         oa->o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1644                         oa->o_flags |= obd_cksum_type_pack(obd_name,
1645                                                            cksum_type);
1646                 } else {
1647                         /* clear out the checksum flag, in case this is a
1648                          * resend but cl_checksum is no longer set. b=11238 */
1649                         oa->o_valid &= ~OBD_MD_FLCKSUM;
1650                 }
1651                 oa->o_cksum = body->oa.o_cksum;
1652                 /* 1 RC per niobuf */
1653                 req_capsule_set_size(pill, &RMF_RCS, RCL_SERVER,
1654                                      sizeof(__u32) * niocount);
1655         } else {
1656                 if (cli->cl_checksum &&
1657                     !sptlrpc_flavor_has_bulk(&req->rq_flvr)) {
1658                         if ((body->oa.o_valid & OBD_MD_FLFLAGS) == 0)
1659                                 body->oa.o_flags = 0;
1660                         body->oa.o_flags |= obd_cksum_type_pack(obd_name,
1661                                 cli->cl_cksum_type);
1662                         body->oa.o_valid |= OBD_MD_FLCKSUM | OBD_MD_FLFLAGS;
1663                 }
1664
1665                 /* Client cksum has been already copied to wire obdo in previous
1666                  * lustre_set_wire_obdo(), and in the case a bulk-read is being
1667                  * resent due to cksum error, this will allow Server to
1668                  * check+dump pages on its side */
1669         }
1670         ptlrpc_request_set_replen(req);
1671
1672         aa = ptlrpc_req_async_args(aa, req);
1673         aa->aa_oa = oa;
1674         aa->aa_requested_nob = requested_nob;
1675         aa->aa_nio_count = niocount;
1676         aa->aa_page_count = page_count;
1677         aa->aa_resends = 0;
1678         aa->aa_ppga = pga;
1679         aa->aa_cli = cli;
1680         INIT_LIST_HEAD(&aa->aa_oaps);
1681
1682         *reqp = req;
1683         niobuf = req_capsule_client_get(pill, &RMF_NIOBUF_REMOTE);
1684         CDEBUG(D_RPCTRACE, "brw rpc %p - object "DOSTID" offset %lld<>%lld\n",
1685                 req, POSTID(&oa->o_oi), niobuf[0].rnb_offset,
1686                 niobuf[niocount - 1].rnb_offset + niobuf[niocount - 1].rnb_len);
1687         RETURN(0);
1688
1689  out:
1690         ptlrpc_req_finished(req);
1691         RETURN(rc);
1692 }
1693
1694 char dbgcksum_file_name[PATH_MAX];
1695
1696 static void dump_all_bulk_pages(struct obdo *oa, __u32 page_count,
1697                                 struct brw_page **pga, __u32 server_cksum,
1698                                 __u32 client_cksum)
1699 {
1700         struct file *filp;
1701         int rc, i;
1702         unsigned int len;
1703         char *buf;
1704
1705         /* will only keep dump of pages on first error for the same range in
1706          * file/fid, not during the resends/retries. */
1707         snprintf(dbgcksum_file_name, sizeof(dbgcksum_file_name),
1708                  "%s-checksum_dump-osc-"DFID":[%llu-%llu]-%x-%x",
1709                  (strncmp(libcfs_debug_file_path_arr, "NONE", 4) != 0 ?
1710                   libcfs_debug_file_path_arr :
1711                   LIBCFS_DEBUG_FILE_PATH_DEFAULT),
1712                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : 0ULL,
1713                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1714                  oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1715                  pga[0]->off,
1716                  pga[page_count-1]->off + pga[page_count-1]->count - 1,
1717                  client_cksum, server_cksum);
1718         filp = filp_open(dbgcksum_file_name,
1719                          O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE, 0600);
1720         if (IS_ERR(filp)) {
1721                 rc = PTR_ERR(filp);
1722                 if (rc == -EEXIST)
1723                         CDEBUG(D_INFO, "%s: can't open to dump pages with "
1724                                "checksum error: rc = %d\n", dbgcksum_file_name,
1725                                rc);
1726                 else
1727                         CERROR("%s: can't open to dump pages with checksum "
1728                                "error: rc = %d\n", dbgcksum_file_name, rc);
1729                 return;
1730         }
1731
1732         for (i = 0; i < page_count; i++) {
1733                 len = pga[i]->count;
1734                 buf = kmap(pga[i]->pg);
1735                 while (len != 0) {
1736                         rc = cfs_kernel_write(filp, buf, len, &filp->f_pos);
1737                         if (rc < 0) {
1738                                 CERROR("%s: wanted to write %u but got %d "
1739                                        "error\n", dbgcksum_file_name, len, rc);
1740                                 break;
1741                         }
1742                         len -= rc;
1743                         buf += rc;
1744                         CDEBUG(D_INFO, "%s: wrote %d bytes\n",
1745                                dbgcksum_file_name, rc);
1746                 }
1747                 kunmap(pga[i]->pg);
1748         }
1749
1750         rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
1751         if (rc)
1752                 CERROR("%s: sync returns %d\n", dbgcksum_file_name, rc);
1753         filp_close(filp, NULL);
1754 }
1755
1756 static int
1757 check_write_checksum(struct obdo *oa, const struct lnet_process_id *peer,
1758                      __u32 client_cksum, __u32 server_cksum,
1759                      struct osc_brw_async_args *aa)
1760 {
1761         const char *obd_name = aa->aa_cli->cl_import->imp_obd->obd_name;
1762         enum cksum_types cksum_type;
1763         obd_dif_csum_fn *fn = NULL;
1764         int sector_size = 0;
1765         __u32 new_cksum;
1766         char *msg;
1767         int rc;
1768
1769         if (server_cksum == client_cksum) {
1770                 CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
1771                 return 0;
1772         }
1773
1774         if (aa->aa_cli->cl_checksum_dump)
1775                 dump_all_bulk_pages(oa, aa->aa_page_count, aa->aa_ppga,
1776                                     server_cksum, client_cksum);
1777
1778         cksum_type = obd_cksum_type_unpack(oa->o_valid & OBD_MD_FLFLAGS ?
1779                                            oa->o_flags : 0);
1780
1781         switch (cksum_type) {
1782         case OBD_CKSUM_T10IP512:
1783                 fn = obd_dif_ip_fn;
1784                 sector_size = 512;
1785                 break;
1786         case OBD_CKSUM_T10IP4K:
1787                 fn = obd_dif_ip_fn;
1788                 sector_size = 4096;
1789                 break;
1790         case OBD_CKSUM_T10CRC512:
1791                 fn = obd_dif_crc_fn;
1792                 sector_size = 512;
1793                 break;
1794         case OBD_CKSUM_T10CRC4K:
1795                 fn = obd_dif_crc_fn;
1796                 sector_size = 4096;
1797                 break;
1798         default:
1799                 break;
1800         }
1801
1802         if (fn)
1803                 rc = osc_checksum_bulk_t10pi(obd_name, aa->aa_requested_nob,
1804                                              aa->aa_page_count, aa->aa_ppga,
1805                                              OST_WRITE, fn, sector_size,
1806                                              &new_cksum);
1807         else
1808                 rc = osc_checksum_bulk(aa->aa_requested_nob, aa->aa_page_count,
1809                                        aa->aa_ppga, OST_WRITE, cksum_type,
1810                                        &new_cksum);
1811
1812         if (rc < 0)
1813                 msg = "failed to calculate the client write checksum";
1814         else if (cksum_type != obd_cksum_type_unpack(aa->aa_oa->o_flags))
1815                 msg = "the server did not use the checksum type specified in "
1816                       "the original request - likely a protocol problem";
1817         else if (new_cksum == server_cksum)
1818                 msg = "changed on the client after we checksummed it - "
1819                       "likely false positive due to mmap IO (bug 11742)";
1820         else if (new_cksum == client_cksum)
1821                 msg = "changed in transit before arrival at OST";
1822         else
1823                 msg = "changed in transit AND doesn't match the original - "
1824                       "likely false positive due to mmap IO (bug 11742)";
1825
1826         LCONSOLE_ERROR_MSG(0x132, "%s: BAD WRITE CHECKSUM: %s: from %s inode "
1827                            DFID " object "DOSTID" extent [%llu-%llu], original "
1828                            "client csum %x (type %x), server csum %x (type %x),"
1829                            " client csum now %x\n",
1830                            obd_name, msg, libcfs_nid2str(peer->nid),
1831                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : (__u64)0,
1832                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
1833                            oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
1834                            POSTID(&oa->o_oi), aa->aa_ppga[0]->off,
1835                            aa->aa_ppga[aa->aa_page_count - 1]->off +
1836                                 aa->aa_ppga[aa->aa_page_count-1]->count - 1,
1837                            client_cksum,
1838                            obd_cksum_type_unpack(aa->aa_oa->o_flags),
1839                            server_cksum, cksum_type, new_cksum);
1840         return 1;
1841 }
1842
1843 /* Note rc enters this function as number of bytes transferred */
1844 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
1845 {
1846         struct osc_brw_async_args *aa = (void *)&req->rq_async_args;
1847         struct client_obd *cli = aa->aa_cli;
1848         const char *obd_name = cli->cl_import->imp_obd->obd_name;
1849         const struct lnet_process_id *peer =
1850                 &req->rq_import->imp_connection->c_peer;
1851         struct ost_body *body;
1852         u32 client_cksum = 0;
1853         struct inode *inode;
1854
1855         ENTRY;
1856
1857         if (rc < 0 && rc != -EDQUOT) {
1858                 DEBUG_REQ(D_INFO, req, "Failed request: rc = %d", rc);
1859                 RETURN(rc);
1860         }
1861
1862         LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
1863         body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
1864         if (body == NULL) {
1865                 DEBUG_REQ(D_INFO, req, "cannot unpack body");
1866                 RETURN(-EPROTO);
1867         }
1868
1869         /* set/clear over quota flag for a uid/gid/projid */
1870         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE &&
1871             body->oa.o_valid & (OBD_MD_FLALLQUOTA)) {
1872                 unsigned qid[LL_MAXQUOTAS] = {
1873                                          body->oa.o_uid, body->oa.o_gid,
1874                                          body->oa.o_projid };
1875                 CDEBUG(D_QUOTA,
1876                        "setdq for [%u %u %u] with valid %#llx, flags %x\n",
1877                        body->oa.o_uid, body->oa.o_gid, body->oa.o_projid,
1878                        body->oa.o_valid, body->oa.o_flags);
1879                        osc_quota_setdq(cli, req->rq_xid, qid, body->oa.o_valid,
1880                                        body->oa.o_flags);
1881         }
1882
1883         osc_update_grant(cli, body);
1884
1885         if (rc < 0)
1886                 RETURN(rc);
1887
1888         if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
1889                 client_cksum = aa->aa_oa->o_cksum; /* save for later */
1890
1891         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
1892                 if (rc > 0) {
1893                         CERROR("%s: unexpected positive size %d\n",
1894                                obd_name, rc);
1895                         RETURN(-EPROTO);
1896                 }
1897
1898                 if (req->rq_bulk != NULL &&
1899                     sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
1900                         RETURN(-EAGAIN);
1901
1902                 if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
1903                     check_write_checksum(&body->oa, peer, client_cksum,
1904                                          body->oa.o_cksum, aa))
1905                         RETURN(-EAGAIN);
1906
1907                 rc = check_write_rcs(req, aa->aa_requested_nob,
1908                                      aa->aa_nio_count, aa->aa_page_count,
1909                                      aa->aa_ppga);
1910                 GOTO(out, rc);
1911         }
1912
1913         /* The rest of this function executes only for OST_READs */
1914
1915         if (req->rq_bulk == NULL) {
1916                 rc = req_capsule_get_size(&req->rq_pill, &RMF_SHORT_IO,
1917                                           RCL_SERVER);
1918                 LASSERT(rc == req->rq_status);
1919         } else {
1920                 /* if unwrap_bulk failed, return -EAGAIN to retry */
1921                 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc);
1922         }
1923         if (rc < 0)
1924                 GOTO(out, rc = -EAGAIN);
1925
1926         if (rc > aa->aa_requested_nob) {
1927                 CERROR("%s: unexpected size %d, requested %d\n", obd_name,
1928                        rc, aa->aa_requested_nob);
1929                 RETURN(-EPROTO);
1930         }
1931
1932         if (req->rq_bulk != NULL && rc != req->rq_bulk->bd_nob_transferred) {
1933                 CERROR("%s: unexpected size %d, transferred %d\n", obd_name,
1934                        rc, req->rq_bulk->bd_nob_transferred);
1935                 RETURN(-EPROTO);
1936         }
1937
1938         if (req->rq_bulk == NULL) {
1939                 /* short io */
1940                 int nob, pg_count, i = 0;
1941                 unsigned char *buf;
1942
1943                 CDEBUG(D_CACHE, "Using short io read, size %d\n", rc);
1944                 pg_count = aa->aa_page_count;
1945                 buf = req_capsule_server_sized_get(&req->rq_pill, &RMF_SHORT_IO,
1946                                                    rc);
1947                 nob = rc;
1948                 while (nob > 0 && pg_count > 0) {
1949                         unsigned char *ptr;
1950                         int count = aa->aa_ppga[i]->count > nob ?
1951                                     nob : aa->aa_ppga[i]->count;
1952
1953                         CDEBUG(D_CACHE, "page %p count %d\n",
1954                                aa->aa_ppga[i]->pg, count);
1955                         ptr = kmap_atomic(aa->aa_ppga[i]->pg);
1956                         memcpy(ptr + (aa->aa_ppga[i]->off & ~PAGE_MASK), buf,
1957                                count);
1958                         kunmap_atomic((void *) ptr);
1959
1960                         buf += count;
1961                         nob -= count;
1962                         i++;
1963                         pg_count--;
1964                 }
1965         }
1966
1967         if (rc < aa->aa_requested_nob)
1968                 handle_short_read(rc, aa->aa_page_count, aa->aa_ppga);
1969
1970         if (body->oa.o_valid & OBD_MD_FLCKSUM) {
1971                 static int cksum_counter;
1972                 u32        server_cksum = body->oa.o_cksum;
1973                 char      *via = "";
1974                 char      *router = "";
1975                 enum cksum_types cksum_type;
1976                 u32 o_flags = body->oa.o_valid & OBD_MD_FLFLAGS ?
1977                         body->oa.o_flags : 0;
1978
1979                 cksum_type = obd_cksum_type_unpack(o_flags);
1980                 rc = osc_checksum_bulk_rw(obd_name, cksum_type, rc,
1981                                           aa->aa_page_count, aa->aa_ppga,
1982                                           OST_READ, &client_cksum);
1983                 if (rc < 0)
1984                         GOTO(out, rc);
1985
1986                 if (req->rq_bulk != NULL &&
1987                     peer->nid != req->rq_bulk->bd_sender) {
1988                         via = " via ";
1989                         router = libcfs_nid2str(req->rq_bulk->bd_sender);
1990                 }
1991
1992                 if (server_cksum != client_cksum) {
1993                         struct ost_body *clbody;
1994                         u32 page_count = aa->aa_page_count;
1995
1996                         clbody = req_capsule_client_get(&req->rq_pill,
1997                                                         &RMF_OST_BODY);
1998                         if (cli->cl_checksum_dump)
1999                                 dump_all_bulk_pages(&clbody->oa, page_count,
2000                                                     aa->aa_ppga, server_cksum,
2001                                                     client_cksum);
2002
2003                         LCONSOLE_ERROR_MSG(0x133, "%s: BAD READ CHECKSUM: from "
2004                                            "%s%s%s inode "DFID" object "DOSTID
2005                                            " extent [%llu-%llu], client %x, "
2006                                            "server %x, cksum_type %x\n",
2007                                            obd_name,
2008                                            libcfs_nid2str(peer->nid),
2009                                            via, router,
2010                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2011                                                 clbody->oa.o_parent_seq : 0ULL,
2012                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2013                                                 clbody->oa.o_parent_oid : 0,
2014                                            clbody->oa.o_valid & OBD_MD_FLFID ?
2015                                                 clbody->oa.o_parent_ver : 0,
2016                                            POSTID(&body->oa.o_oi),
2017                                            aa->aa_ppga[0]->off,
2018                                            aa->aa_ppga[page_count-1]->off +
2019                                            aa->aa_ppga[page_count-1]->count - 1,
2020                                            client_cksum, server_cksum,
2021                                            cksum_type);
2022                         cksum_counter = 0;
2023                         aa->aa_oa->o_cksum = client_cksum;
2024                         rc = -EAGAIN;
2025                 } else {
2026                         cksum_counter++;
2027                         CDEBUG(D_PAGE, "checksum %x confirmed\n", client_cksum);
2028                         rc = 0;
2029                 }
2030         } else if (unlikely(client_cksum)) {
2031                 static int cksum_missed;
2032
2033                 cksum_missed++;
2034                 if ((cksum_missed & (-cksum_missed)) == cksum_missed)
2035                         CERROR("%s: checksum %u requested from %s but not sent\n",
2036                                obd_name, cksum_missed,
2037                                libcfs_nid2str(peer->nid));
2038         } else {
2039                 rc = 0;
2040         }
2041
2042         inode = page2inode(aa->aa_ppga[0]->pg);
2043         if (inode && IS_ENCRYPTED(inode)) {
2044                 int idx;
2045
2046                 if (!llcrypt_has_encryption_key(inode)) {
2047                         CDEBUG(D_SEC, "no enc key for ino %lu\n", inode->i_ino);
2048                         GOTO(out, rc);
2049                 }
2050                 for (idx = 0; idx < aa->aa_page_count; idx++) {
2051                         struct brw_page *pg = aa->aa_ppga[idx];
2052                         __u64 *p, *q;
2053
2054                         /* do not decrypt if page is all 0s */
2055                         p = q = page_address(pg->pg);
2056                         while (p - q < PAGE_SIZE / sizeof(*p)) {
2057                                 if (*p != 0)
2058                                         break;
2059                                 p++;
2060                         }
2061                         if (p - q == PAGE_SIZE / sizeof(*p))
2062                                 continue;
2063
2064                         rc = llcrypt_decrypt_pagecache_blocks(pg->pg,
2065                                                               PAGE_SIZE, 0);
2066                         if (rc)
2067                                 GOTO(out, rc);
2068                 }
2069         }
2070
2071 out:
2072         if (rc >= 0)
2073                 lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
2074                                      aa->aa_oa, &body->oa);
2075
2076         RETURN(rc);
2077 }
2078
2079 static int osc_brw_redo_request(struct ptlrpc_request *request,
2080                                 struct osc_brw_async_args *aa, int rc)
2081 {
2082         struct ptlrpc_request *new_req;
2083         struct osc_brw_async_args *new_aa;
2084         struct osc_async_page *oap;
2085         ENTRY;
2086
2087         /* The below message is checked in replay-ost-single.sh test_8ae*/
2088         DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
2089                   "redo for recoverable error %d", rc);
2090
2091         rc = osc_brw_prep_request(lustre_msg_get_opc(request->rq_reqmsg) ==
2092                                 OST_WRITE ? OBD_BRW_WRITE : OBD_BRW_READ,
2093                                   aa->aa_cli, aa->aa_oa, aa->aa_page_count,
2094                                   aa->aa_ppga, &new_req, 1);
2095         if (rc)
2096                 RETURN(rc);
2097
2098         list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
2099                 if (oap->oap_request != NULL) {
2100                         LASSERTF(request == oap->oap_request,
2101                                  "request %p != oap_request %p\n",
2102                                  request, oap->oap_request);
2103                 }
2104         }
2105         /*
2106          * New request takes over pga and oaps from old request.
2107          * Note that copying a list_head doesn't work, need to move it...
2108          */
2109         aa->aa_resends++;
2110         new_req->rq_interpret_reply = request->rq_interpret_reply;
2111         new_req->rq_async_args = request->rq_async_args;
2112         new_req->rq_commit_cb = request->rq_commit_cb;
2113         /* cap resend delay to the current request timeout, this is similar to
2114          * what ptlrpc does (see after_reply()) */
2115         if (aa->aa_resends > new_req->rq_timeout)
2116                 new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
2117         else
2118                 new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
2119         new_req->rq_generation_set = 1;
2120         new_req->rq_import_generation = request->rq_import_generation;
2121
2122         new_aa = ptlrpc_req_async_args(new_aa, new_req);
2123
2124         INIT_LIST_HEAD(&new_aa->aa_oaps);
2125         list_splice_init(&aa->aa_oaps, &new_aa->aa_oaps);
2126         INIT_LIST_HEAD(&new_aa->aa_exts);
2127         list_splice_init(&aa->aa_exts, &new_aa->aa_exts);
2128         new_aa->aa_resends = aa->aa_resends;
2129
2130         list_for_each_entry(oap, &new_aa->aa_oaps, oap_rpc_item) {
2131                 if (oap->oap_request) {
2132                         ptlrpc_req_finished(oap->oap_request);
2133                         oap->oap_request = ptlrpc_request_addref(new_req);
2134                 }
2135         }
2136
2137         /* XXX: This code will run into problem if we're going to support
2138          * to add a series of BRW RPCs into a self-defined ptlrpc_request_set
2139          * and wait for all of them to be finished. We should inherit request
2140          * set from old request. */
2141         ptlrpcd_add_req(new_req);
2142
2143         DEBUG_REQ(D_INFO, new_req, "new request");
2144         RETURN(0);
2145 }
2146
2147 /*
2148  * ugh, we want disk allocation on the target to happen in offset order.  we'll
2149  * follow sedgewicks advice and stick to the dead simple shellsort -- it'll do
2150  * fine for our small page arrays and doesn't require allocation.  its an
2151  * insertion sort that swaps elements that are strides apart, shrinking the
2152  * stride down until its '1' and the array is sorted.
2153  */
2154 static void sort_brw_pages(struct brw_page **array, int num)
2155 {
2156         int stride, i, j;
2157         struct brw_page *tmp;
2158
2159         if (num == 1)
2160                 return;
2161         for (stride = 1; stride < num ; stride = (stride * 3) + 1)
2162                 ;
2163
2164         do {
2165                 stride /= 3;
2166                 for (i = stride ; i < num ; i++) {
2167                         tmp = array[i];
2168                         j = i;
2169                         while (j >= stride && array[j - stride]->off > tmp->off) {
2170                                 array[j] = array[j - stride];
2171                                 j -= stride;
2172                         }
2173                         array[j] = tmp;
2174                 }
2175         } while (stride > 1);
2176 }
2177
2178 static void osc_release_ppga(struct brw_page **ppga, size_t count)
2179 {
2180         LASSERT(ppga != NULL);
2181         OBD_FREE_PTR_ARRAY(ppga, count);
2182 }
2183
2184 static int brw_interpret(const struct lu_env *env,
2185                          struct ptlrpc_request *req, void *args, int rc)
2186 {
2187         struct osc_brw_async_args *aa = args;
2188         struct osc_extent *ext;
2189         struct osc_extent *tmp;
2190         struct client_obd *cli = aa->aa_cli;
2191         unsigned long transferred = 0;
2192
2193         ENTRY;
2194
2195         rc = osc_brw_fini_request(req, rc);
2196         CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
2197
2198         /* restore clear text pages */
2199         osc_release_bounce_pages(aa->aa_ppga, aa->aa_page_count);
2200
2201         /*
2202          * When server returns -EINPROGRESS, client should always retry
2203          * regardless of the number of times the bulk was resent already.
2204          */
2205         if (osc_recoverable_error(rc) && !req->rq_no_delay) {
2206                 if (req->rq_import_generation !=
2207                     req->rq_import->imp_generation) {
2208                         CDEBUG(D_HA, "%s: resend cross eviction for object: "
2209                                ""DOSTID", rc = %d.\n",
2210                                req->rq_import->imp_obd->obd_name,
2211                                POSTID(&aa->aa_oa->o_oi), rc);
2212                 } else if (rc == -EINPROGRESS ||
2213                     client_should_resend(aa->aa_resends, aa->aa_cli)) {
2214                         rc = osc_brw_redo_request(req, aa, rc);
2215                 } else {
2216                         CERROR("%s: too many resent retries for object: "
2217                                "%llu:%llu, rc = %d.\n",
2218                                req->rq_import->imp_obd->obd_name,
2219                                POSTID(&aa->aa_oa->o_oi), rc);
2220                 }
2221
2222                 if (rc == 0)
2223                         RETURN(0);
2224                 else if (rc == -EAGAIN || rc == -EINPROGRESS)
2225                         rc = -EIO;
2226         }
2227
2228         if (rc == 0) {
2229                 struct obdo *oa = aa->aa_oa;
2230                 struct cl_attr *attr = &osc_env_info(env)->oti_attr;
2231                 unsigned long valid = 0;
2232                 struct cl_object *obj;
2233                 struct osc_async_page *last;
2234
2235                 last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
2236                 obj = osc2cl(last->oap_obj);
2237
2238                 cl_object_attr_lock(obj);
2239                 if (oa->o_valid & OBD_MD_FLBLOCKS) {
2240                         attr->cat_blocks = oa->o_blocks;
2241                         valid |= CAT_BLOCKS;
2242                 }
2243                 if (oa->o_valid & OBD_MD_FLMTIME) {
2244                         attr->cat_mtime = oa->o_mtime;
2245                         valid |= CAT_MTIME;
2246                 }
2247                 if (oa->o_valid & OBD_MD_FLATIME) {
2248                         attr->cat_atime = oa->o_atime;
2249                         valid |= CAT_ATIME;
2250                 }
2251                 if (oa->o_valid & OBD_MD_FLCTIME) {
2252                         attr->cat_ctime = oa->o_ctime;
2253                         valid |= CAT_CTIME;
2254                 }
2255
2256                 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
2257                         struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
2258                         loff_t last_off = last->oap_count + last->oap_obj_off +
2259                                 last->oap_page_off;
2260
2261                         /* Change file size if this is an out of quota or
2262                          * direct IO write and it extends the file size */
2263                         if (loi->loi_lvb.lvb_size < last_off) {
2264                                 attr->cat_size = last_off;
2265                                 valid |= CAT_SIZE;
2266                         }
2267                         /* Extend KMS if it's not a lockless write */
2268                         if (loi->loi_kms < last_off &&
2269                             oap2osc_page(last)->ops_srvlock == 0) {
2270                                 attr->cat_kms = last_off;
2271                                 valid |= CAT_KMS;
2272                         }
2273                 }
2274
2275                 if (valid != 0)
2276                         cl_object_attr_update(env, obj, attr, valid);
2277                 cl_object_attr_unlock(obj);
2278         }
2279         OBD_SLAB_FREE_PTR(aa->aa_oa, osc_obdo_kmem);
2280         aa->aa_oa = NULL;
2281
2282         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && rc == 0)
2283                 osc_inc_unstable_pages(req);
2284
2285         list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
2286                 list_del_init(&ext->oe_link);
2287                 osc_extent_finish(env, ext, 1,
2288                                   rc && req->rq_no_delay ? -EWOULDBLOCK : rc);
2289         }
2290         LASSERT(list_empty(&aa->aa_exts));
2291         LASSERT(list_empty(&aa->aa_oaps));
2292
2293         transferred = (req->rq_bulk == NULL ? /* short io */
2294                        aa->aa_requested_nob :
2295                        req->rq_bulk->bd_nob_transferred);
2296
2297         osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
2298         ptlrpc_lprocfs_brw(req, transferred);
2299
2300         spin_lock(&cli->cl_loi_list_lock);
2301         /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
2302          * is called so we know whether to go to sync BRWs or wait for more
2303          * RPCs to complete */
2304         if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE)
2305                 cli->cl_w_in_flight--;
2306         else
2307                 cli->cl_r_in_flight--;
2308         osc_wake_cache_waiters(cli);
2309         spin_unlock(&cli->cl_loi_list_lock);
2310
2311         osc_io_unplug(env, cli, NULL);
2312         RETURN(rc);
2313 }
2314
2315 static void brw_commit(struct ptlrpc_request *req)
2316 {
2317         /* If osc_inc_unstable_pages (via osc_extent_finish) races with
2318          * this called via the rq_commit_cb, I need to ensure
2319          * osc_dec_unstable_pages is still called. Otherwise unstable
2320          * pages may be leaked. */
2321         spin_lock(&req->rq_lock);
2322         if (likely(req->rq_unstable)) {
2323                 req->rq_unstable = 0;
2324                 spin_unlock(&req->rq_lock);
2325
2326                 osc_dec_unstable_pages(req);
2327         } else {
2328                 req->rq_committed = 1;
2329                 spin_unlock(&req->rq_lock);
2330         }
2331 }
2332
2333 /**
2334  * Build an RPC by the list of extent @ext_list. The caller must ensure
2335  * that the total pages in this list are NOT over max pages per RPC.
2336  * Extents in the list must be in OES_RPC state.
2337  */
2338 int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
2339                   struct list_head *ext_list, int cmd)
2340 {
2341         struct ptlrpc_request           *req = NULL;
2342         struct osc_extent               *ext;
2343         struct brw_page                 **pga = NULL;
2344         struct osc_brw_async_args       *aa = NULL;
2345         struct obdo                     *oa = NULL;
2346         struct osc_async_page           *oap;
2347         struct osc_object               *obj = NULL;
2348         struct cl_req_attr              *crattr = NULL;
2349         loff_t                          starting_offset = OBD_OBJECT_EOF;
2350         loff_t                          ending_offset = 0;
2351         /* '1' for consistency with code that checks !mpflag to restore */
2352         int mpflag = 1;
2353         int                             mem_tight = 0;
2354         int                             page_count = 0;
2355         bool                            soft_sync = false;
2356         bool                            ndelay = false;
2357         int                             i;
2358         int                             grant = 0;
2359         int                             rc;
2360         __u32                           layout_version = 0;
2361         LIST_HEAD(rpc_list);
2362         struct ost_body                 *body;
2363         ENTRY;
2364         LASSERT(!list_empty(ext_list));
2365
2366         /* add pages into rpc_list to build BRW rpc */
2367         list_for_each_entry(ext, ext_list, oe_link) {
2368                 LASSERT(ext->oe_state == OES_RPC);
2369                 mem_tight |= ext->oe_memalloc;
2370                 grant += ext->oe_grants;
2371                 page_count += ext->oe_nr_pages;
2372                 layout_version = max(layout_version, ext->oe_layout_version);
2373                 if (obj == NULL)
2374                         obj = ext->oe_obj;
2375         }
2376
2377         soft_sync = osc_over_unstable_soft_limit(cli);
2378         if (mem_tight)
2379                 mpflag = memalloc_noreclaim_save();
2380
2381         OBD_ALLOC_PTR_ARRAY(pga, page_count);
2382         if (pga == NULL)
2383                 GOTO(out, rc = -ENOMEM);
2384
2385         OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
2386         if (oa == NULL)
2387                 GOTO(out, rc = -ENOMEM);
2388
2389         i = 0;
2390         list_for_each_entry(ext, ext_list, oe_link) {
2391                 list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
2392                         if (mem_tight)
2393                                 oap->oap_brw_flags |= OBD_BRW_MEMALLOC;
2394                         if (soft_sync)
2395                                 oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
2396                         pga[i] = &oap->oap_brw_page;
2397                         pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
2398                         i++;
2399
2400                         list_add_tail(&oap->oap_rpc_item, &rpc_list);
2401                         if (starting_offset == OBD_OBJECT_EOF ||
2402                             starting_offset > oap->oap_obj_off)
2403                                 starting_offset = oap->oap_obj_off;
2404                         else
2405                                 LASSERT(oap->oap_page_off == 0);
2406                         if (ending_offset < oap->oap_obj_off + oap->oap_count)
2407                                 ending_offset = oap->oap_obj_off +
2408                                                 oap->oap_count;
2409                         else
2410                                 LASSERT(oap->oap_page_off + oap->oap_count ==
2411                                         PAGE_SIZE);
2412                 }
2413                 if (ext->oe_ndelay)
2414                         ndelay = true;
2415         }
2416
2417         /* first page in the list */
2418         oap = list_entry(rpc_list.next, typeof(*oap), oap_rpc_item);
2419
2420         crattr = &osc_env_info(env)->oti_req_attr;
2421         memset(crattr, 0, sizeof(*crattr));
2422         crattr->cra_type = (cmd & OBD_BRW_WRITE) ? CRT_WRITE : CRT_READ;
2423         crattr->cra_flags = ~0ULL;
2424         crattr->cra_page = oap2cl_page(oap);
2425         crattr->cra_oa = oa;
2426         cl_req_attr_set(env, osc2cl(obj), crattr);
2427
2428         if (cmd == OBD_BRW_WRITE) {
2429                 oa->o_grant_used = grant;
2430                 if (layout_version > 0) {
2431                         CDEBUG(D_LAYOUT, DFID": write with layout version %u\n",
2432                                PFID(&oa->o_oi.oi_fid), layout_version);
2433
2434                         oa->o_layout_version = layout_version;
2435                         oa->o_valid |= OBD_MD_LAYOUT_VERSION;
2436                 }
2437         }
2438
2439         sort_brw_pages(pga, page_count);
2440         rc = osc_brw_prep_request(cmd, cli, oa, page_count, pga, &req, 0);
2441         if (rc != 0) {
2442                 CERROR("prep_req failed: %d\n", rc);
2443                 GOTO(out, rc);
2444         }
2445
2446         req->rq_commit_cb = brw_commit;
2447         req->rq_interpret_reply = brw_interpret;
2448         req->rq_memalloc = mem_tight != 0;
2449         oap->oap_request = ptlrpc_request_addref(req);
2450         if (ndelay) {
2451                 req->rq_no_resend = req->rq_no_delay = 1;
2452                 /* probably set a shorter timeout value.
2453                  * to handle ETIMEDOUT in brw_interpret() correctly. */
2454                 /* lustre_msg_set_timeout(req, req->rq_timeout / 2); */
2455         }
2456
2457         /* Need to update the timestamps after the request is built in case
2458          * we race with setattr (locally or in queue at OST).  If OST gets
2459          * later setattr before earlier BRW (as determined by the request xid),
2460          * the OST will not use BRW timestamps.  Sadly, there is no obvious
2461          * way to do this in a single call.  bug 10150 */
2462         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
2463         crattr->cra_oa = &body->oa;
2464         crattr->cra_flags = OBD_MD_FLMTIME | OBD_MD_FLCTIME | OBD_MD_FLATIME;
2465         cl_req_attr_set(env, osc2cl(obj), crattr);
2466         lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
2467
2468         aa = ptlrpc_req_async_args(aa, req);
2469         INIT_LIST_HEAD(&aa->aa_oaps);
2470         list_splice_init(&rpc_list, &aa->aa_oaps);
2471         INIT_LIST_HEAD(&aa->aa_exts);
2472         list_splice_init(ext_list, &aa->aa_exts);
2473
2474         spin_lock(&cli->cl_loi_list_lock);
2475         starting_offset >>= PAGE_SHIFT;
2476         if (cmd == OBD_BRW_READ) {
2477                 cli->cl_r_in_flight++;
2478                 lprocfs_oh_tally_log2(&cli->cl_read_page_hist, page_count);
2479                 lprocfs_oh_tally(&cli->cl_read_rpc_hist, cli->cl_r_in_flight);
2480                 lprocfs_oh_tally_log2(&cli->cl_read_offset_hist,
2481                                       starting_offset + 1);
2482         } else {
2483                 cli->cl_w_in_flight++;
2484                 lprocfs_oh_tally_log2(&cli->cl_write_page_hist, page_count);
2485                 lprocfs_oh_tally(&cli->cl_write_rpc_hist, cli->cl_w_in_flight);
2486                 lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
2487                                       starting_offset + 1);
2488         }
2489         spin_unlock(&cli->cl_loi_list_lock);
2490
2491         DEBUG_REQ(D_INODE, req, "%d pages, aa %p, now %ur/%uw in flight",
2492                   page_count, aa, cli->cl_r_in_flight,
2493                   cli->cl_w_in_flight);
2494         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_IO, cfs_fail_val);
2495
2496         ptlrpcd_add_req(req);
2497         rc = 0;
2498         EXIT;
2499
2500 out:
2501         if (mem_tight)
2502                 memalloc_noreclaim_restore(mpflag);
2503
2504         if (rc != 0) {
2505                 LASSERT(req == NULL);
2506
2507                 if (oa)
2508                         OBD_SLAB_FREE_PTR(oa, osc_obdo_kmem);
2509                 if (pga) {
2510                         osc_release_bounce_pages(pga, page_count);
2511                         osc_release_ppga(pga, page_count);
2512                 }
2513                 /* this should happen rarely and is pretty bad, it makes the
2514                  * pending list not follow the dirty order */
2515                 while (!list_empty(ext_list)) {
2516                         ext = list_entry(ext_list->next, struct osc_extent,
2517                                          oe_link);
2518                         list_del_init(&ext->oe_link);
2519                         osc_extent_finish(env, ext, 0, rc);
2520                 }
2521         }
2522         RETURN(rc);
2523 }
2524
2525 static int osc_set_lock_data(struct ldlm_lock *lock, void *data)
2526 {
2527         int set = 0;
2528
2529         LASSERT(lock != NULL);
2530
2531         lock_res_and_lock(lock);
2532
2533         if (lock->l_ast_data == NULL)
2534                 lock->l_ast_data = data;
2535         if (lock->l_ast_data == data)
2536                 set = 1;
2537
2538         unlock_res_and_lock(lock);
2539
2540         return set;
2541 }
2542
2543 int osc_enqueue_fini(struct ptlrpc_request *req, osc_enqueue_upcall_f upcall,
2544                      void *cookie, struct lustre_handle *lockh,
2545                      enum ldlm_mode mode, __u64 *flags, bool speculative,
2546                      int errcode)
2547 {
2548         bool intent = *flags & LDLM_FL_HAS_INTENT;
2549         int rc;
2550         ENTRY;
2551
2552         /* The request was created before ldlm_cli_enqueue call. */
2553         if (intent && errcode == ELDLM_LOCK_ABORTED) {
2554                 struct ldlm_reply *rep;
2555
2556                 rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
2557                 LASSERT(rep != NULL);
2558
2559                 rep->lock_policy_res1 =
2560                         ptlrpc_status_ntoh(rep->lock_policy_res1);
2561                 if (rep->lock_policy_res1)
2562                         errcode = rep->lock_policy_res1;
2563                 if (!speculative)
2564                         *flags |= LDLM_FL_LVB_READY;
2565         } else if (errcode == ELDLM_OK) {
2566                 *flags |= LDLM_FL_LVB_READY;
2567         }
2568
2569         /* Call the update callback. */
2570         rc = (*upcall)(cookie, lockh, errcode);
2571
2572         /* release the reference taken in ldlm_cli_enqueue() */
2573         if (errcode == ELDLM_LOCK_MATCHED)
2574                 errcode = ELDLM_OK;
2575         if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
2576                 ldlm_lock_decref(lockh, mode);
2577
2578         RETURN(rc);
2579 }
2580
2581 int osc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req,
2582                           void *args, int rc)
2583 {
2584         struct osc_enqueue_args *aa = args;
2585         struct ldlm_lock *lock;
2586         struct lustre_handle *lockh = &aa->oa_lockh;
2587         enum ldlm_mode mode = aa->oa_mode;
2588         struct ost_lvb *lvb = aa->oa_lvb;
2589         __u32 lvb_len = sizeof(*lvb);
2590         __u64 flags = 0;
2591
2592         ENTRY;
2593
2594         /* ldlm_cli_enqueue is holding a reference on the lock, so it must
2595          * be valid. */
2596         lock = ldlm_handle2lock(lockh);
2597         LASSERTF(lock != NULL,
2598                  "lockh %#llx, req %p, aa %p - client evicted?\n",
2599                  lockh->cookie, req, aa);
2600
2601         /* Take an additional reference so that a blocking AST that
2602          * ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
2603          * to arrive after an upcall has been executed by
2604          * osc_enqueue_fini(). */
2605         ldlm_lock_addref(lockh, mode);
2606
2607         /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
2608         OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
2609
2610         /* Let CP AST to grant the lock first. */
2611         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
2612
2613         if (aa->oa_speculative) {
2614                 LASSERT(aa->oa_lvb == NULL);
2615                 LASSERT(aa->oa_flags == NULL);
2616                 aa->oa_flags = &flags;
2617         }
2618
2619         /* Complete obtaining the lock procedure. */
2620         rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_type, 1,
2621                                    aa->oa_mode, aa->oa_flags, lvb, lvb_len,
2622                                    lockh, rc);
2623         /* Complete osc stuff. */
2624         rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
2625                               aa->oa_flags, aa->oa_speculative, rc);
2626
2627         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
2628
2629         ldlm_lock_decref(lockh, mode);
2630         LDLM_LOCK_PUT(lock);
2631         RETURN(rc);
2632 }
2633
2634 /* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
2635  * from the 2nd OSC before a lock from the 1st one. This does not deadlock with
2636  * other synchronous requests, however keeping some locks and trying to obtain
2637  * others may take a considerable amount of time in a case of ost failure; and
2638  * when other sync requests do not get released lock from a client, the client
2639  * is evicted from the cluster -- such scenarious make the life difficult, so
2640  * release locks just after they are obtained. */
2641 int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
2642                      __u64 *flags, union ldlm_policy_data *policy,
2643                      struct ost_lvb *lvb, osc_enqueue_upcall_f upcall,
2644                      void *cookie, struct ldlm_enqueue_info *einfo,
2645                      struct ptlrpc_request_set *rqset, int async,
2646                      bool speculative)
2647 {
2648         struct obd_device *obd = exp->exp_obd;
2649         struct lustre_handle lockh = { 0 };
2650         struct ptlrpc_request *req = NULL;
2651         int intent = *flags & LDLM_FL_HAS_INTENT;
2652         __u64 match_flags = *flags;
2653         enum ldlm_mode mode;
2654         int rc;
2655         ENTRY;
2656
2657         /* Filesystem lock extents are extended to page boundaries so that
2658          * dealing with the page cache is a little smoother.  */
2659         policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2660         policy->l_extent.end |= ~PAGE_MASK;
2661
2662         /* Next, search for already existing extent locks that will cover us */
2663         /* If we're trying to read, we also search for an existing PW lock.  The
2664          * VFS and page cache already protect us locally, so lots of readers/
2665          * writers can share a single PW lock.
2666          *
2667          * There are problems with conversion deadlocks, so instead of
2668          * converting a read lock to a write lock, we'll just enqueue a new
2669          * one.
2670          *
2671          * At some point we should cancel the read lock instead of making them
2672          * send us a blocking callback, but there are problems with canceling
2673          * locks out from other users right now, too. */
2674         mode = einfo->ei_mode;
2675         if (einfo->ei_mode == LCK_PR)
2676                 mode |= LCK_PW;
2677         /* Normal lock requests must wait for the LVB to be ready before
2678          * matching a lock; speculative lock requests do not need to,
2679          * because they will not actually use the lock. */
2680         if (!speculative)
2681                 match_flags |= LDLM_FL_LVB_READY;
2682         if (intent != 0)
2683                 match_flags |= LDLM_FL_BLOCK_GRANTED;
2684         mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
2685                                einfo->ei_type, policy, mode, &lockh, 0);
2686         if (mode) {
2687                 struct ldlm_lock *matched;
2688
2689                 if (*flags & LDLM_FL_TEST_LOCK)
2690                         RETURN(ELDLM_OK);
2691
2692                 matched = ldlm_handle2lock(&lockh);
2693                 if (speculative) {
2694                         /* This DLM lock request is speculative, and does not
2695                          * have an associated IO request. Therefore if there
2696                          * is already a DLM lock, it wll just inform the
2697                          * caller to cancel the request for this stripe.*/
2698                         lock_res_and_lock(matched);
2699                         if (ldlm_extent_equal(&policy->l_extent,
2700                             &matched->l_policy_data.l_extent))
2701                                 rc = -EEXIST;
2702                         else
2703                                 rc = -ECANCELED;
2704                         unlock_res_and_lock(matched);
2705
2706                         ldlm_lock_decref(&lockh, mode);
2707                         LDLM_LOCK_PUT(matched);
2708                         RETURN(rc);
2709                 } else if (osc_set_lock_data(matched, einfo->ei_cbdata)) {
2710                         *flags |= LDLM_FL_LVB_READY;
2711
2712                         /* We already have a lock, and it's referenced. */
2713                         (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
2714
2715                         ldlm_lock_decref(&lockh, mode);
2716                         LDLM_LOCK_PUT(matched);
2717                         RETURN(ELDLM_OK);
2718                 } else {
2719                         ldlm_lock_decref(&lockh, mode);
2720                         LDLM_LOCK_PUT(matched);
2721                 }
2722         }
2723
2724         if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
2725                 RETURN(-ENOLCK);
2726
2727         if (intent) {
2728                 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2729                                            &RQF_LDLM_ENQUEUE_LVB);
2730                 if (req == NULL)
2731                         RETURN(-ENOMEM);
2732
2733                 rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
2734                 if (rc) {
2735                         ptlrpc_request_free(req);
2736                         RETURN(rc);
2737                 }
2738
2739                 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
2740                                      sizeof *lvb);
2741                 ptlrpc_request_set_replen(req);
2742         }
2743
2744         /* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
2745         *flags &= ~LDLM_FL_BLOCK_GRANTED;
2746
2747         rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
2748                               sizeof(*lvb), LVB_T_OST, &lockh, async);
2749         if (async) {
2750                 if (!rc) {
2751                         struct osc_enqueue_args *aa;
2752                         aa = ptlrpc_req_async_args(aa, req);
2753                         aa->oa_exp         = exp;
2754                         aa->oa_mode        = einfo->ei_mode;
2755                         aa->oa_type        = einfo->ei_type;
2756                         lustre_handle_copy(&aa->oa_lockh, &lockh);
2757                         aa->oa_upcall      = upcall;
2758                         aa->oa_cookie      = cookie;
2759                         aa->oa_speculative = speculative;
2760                         if (!speculative) {
2761                                 aa->oa_flags  = flags;
2762                                 aa->oa_lvb    = lvb;
2763                         } else {
2764                                 /* speculative locks are essentially to enqueue
2765                                  * a DLM lock  in advance, so we don't care
2766                                  * about the result of the enqueue. */
2767                                 aa->oa_lvb    = NULL;
2768                                 aa->oa_flags  = NULL;
2769                         }
2770
2771                         req->rq_interpret_reply = osc_enqueue_interpret;
2772                         ptlrpc_set_add_req(rqset, req);
2773                 } else if (intent) {
2774                         ptlrpc_req_finished(req);
2775                 }
2776                 RETURN(rc);
2777         }
2778
2779         rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
2780                               flags, speculative, rc);
2781         if (intent)
2782                 ptlrpc_req_finished(req);
2783
2784         RETURN(rc);
2785 }
2786
2787 int osc_match_base(const struct lu_env *env, struct obd_export *exp,
2788                    struct ldlm_res_id *res_id, enum ldlm_type type,
2789                    union ldlm_policy_data *policy, enum ldlm_mode mode,
2790                    __u64 *flags, struct osc_object *obj,
2791                    struct lustre_handle *lockh, int unref)
2792 {
2793         struct obd_device *obd = exp->exp_obd;
2794         __u64 lflags = *flags;
2795         enum ldlm_mode rc;
2796         ENTRY;
2797
2798         if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
2799                 RETURN(-EIO);
2800
2801         /* Filesystem lock extents are extended to page boundaries so that
2802          * dealing with the page cache is a little smoother */
2803         policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
2804         policy->l_extent.end |= ~PAGE_MASK;
2805
2806         /* Next, search for already existing extent locks that will cover us */
2807         /* If we're trying to read, we also search for an existing PW lock.  The
2808          * VFS and page cache already protect us locally, so lots of readers/
2809          * writers can share a single PW lock. */
2810         rc = mode;
2811         if (mode == LCK_PR)
2812                 rc |= LCK_PW;
2813         rc = ldlm_lock_match(obd->obd_namespace, lflags,
2814                              res_id, type, policy, rc, lockh, unref);
2815         if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
2816                 RETURN(rc);
2817
2818         if (obj != NULL) {
2819                 struct ldlm_lock *lock = ldlm_handle2lock(lockh);
2820
2821                 LASSERT(lock != NULL);
2822                 if (osc_set_lock_data(lock, obj)) {
2823                         lock_res_and_lock(lock);
2824                         if (!ldlm_is_lvb_cached(lock)) {
2825                                 LASSERT(lock->l_ast_data == obj);
2826                                 osc_lock_lvb_update(env, obj, lock, NULL);
2827                                 ldlm_set_lvb_cached(lock);
2828                         }
2829                         unlock_res_and_lock(lock);
2830                 } else {
2831                         ldlm_lock_decref(lockh, rc);
2832                         rc = 0;
2833                 }
2834                 LDLM_LOCK_PUT(lock);
2835         }
2836         RETURN(rc);
2837 }
2838
2839 static int osc_statfs_interpret(const struct lu_env *env,
2840                                 struct ptlrpc_request *req, void *args, int rc)
2841 {
2842         struct osc_async_args *aa = args;
2843         struct obd_statfs *msfs;
2844
2845         ENTRY;
2846         if (rc == -EBADR)
2847                 /*
2848                  * The request has in fact never been sent due to issues at
2849                  * a higher level (LOV).  Exit immediately since the caller
2850                  * is aware of the problem and takes care of the clean up.
2851                  */
2852                 RETURN(rc);
2853
2854         if ((rc == -ENOTCONN || rc == -EAGAIN) &&
2855             (aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
2856                 GOTO(out, rc = 0);
2857
2858         if (rc != 0)
2859                 GOTO(out, rc);
2860
2861         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
2862         if (msfs == NULL)
2863                 GOTO(out, rc = -EPROTO);
2864
2865         *aa->aa_oi->oi_osfs = *msfs;
2866 out:
2867         rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
2868
2869         RETURN(rc);
2870 }
2871
2872 static int osc_statfs_async(struct obd_export *exp,
2873                             struct obd_info *oinfo, time64_t max_age,
2874                             struct ptlrpc_request_set *rqset)
2875 {
2876         struct obd_device     *obd = class_exp2obd(exp);
2877         struct ptlrpc_request *req;
2878         struct osc_async_args *aa;
2879         int rc;
2880         ENTRY;
2881
2882         if (obd->obd_osfs_age >= max_age) {
2883                 CDEBUG(D_SUPER,
2884                        "%s: use %p cache blocks %llu/%llu objects %llu/%llu\n",
2885                        obd->obd_name, &obd->obd_osfs,
2886                        obd->obd_osfs.os_bavail, obd->obd_osfs.os_blocks,
2887                        obd->obd_osfs.os_ffree, obd->obd_osfs.os_files);
2888                 spin_lock(&obd->obd_osfs_lock);
2889                 memcpy(oinfo->oi_osfs, &obd->obd_osfs, sizeof(*oinfo->oi_osfs));
2890                 spin_unlock(&obd->obd_osfs_lock);
2891                 oinfo->oi_flags |= OBD_STATFS_FROM_CACHE;
2892                 if (oinfo->oi_cb_up)
2893                         oinfo->oi_cb_up(oinfo, 0);
2894
2895                 RETURN(0);
2896         }
2897
2898         /* We could possibly pass max_age in the request (as an absolute
2899          * timestamp or a "seconds.usec ago") so the target can avoid doing
2900          * extra calls into the filesystem if that isn't necessary (e.g.
2901          * during mount that would help a bit).  Having relative timestamps
2902          * is not so great if request processing is slow, while absolute
2903          * timestamps are not ideal because they need time synchronization. */
2904         req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
2905         if (req == NULL)
2906                 RETURN(-ENOMEM);
2907
2908         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
2909         if (rc) {
2910                 ptlrpc_request_free(req);
2911                 RETURN(rc);
2912         }
2913         ptlrpc_request_set_replen(req);
2914         req->rq_request_portal = OST_CREATE_PORTAL;
2915         ptlrpc_at_set_req_timeout(req);
2916
2917         if (oinfo->oi_flags & OBD_STATFS_NODELAY) {
2918                 /* procfs requests not want stat in wait for avoid deadlock */
2919                 req->rq_no_resend = 1;
2920                 req->rq_no_delay = 1;
2921         }
2922
2923         req->rq_interpret_reply = osc_statfs_interpret;
2924         aa = ptlrpc_req_async_args(aa, req);
2925         aa->aa_oi = oinfo;
2926
2927         ptlrpc_set_add_req(rqset, req);
2928         RETURN(0);
2929 }
2930
2931 static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
2932                       struct obd_statfs *osfs, time64_t max_age, __u32 flags)
2933 {
2934         struct obd_device     *obd = class_exp2obd(exp);
2935         struct obd_statfs     *msfs;
2936         struct ptlrpc_request *req;
2937         struct obd_import     *imp = NULL;
2938         int rc;
2939         ENTRY;
2940
2941
2942         /*Since the request might also come from lprocfs, so we need
2943          *sync this with client_disconnect_export Bug15684*/
2944         down_read(&obd->u.cli.cl_sem);
2945         if (obd->u.cli.cl_import)
2946                 imp = class_import_get(obd->u.cli.cl_import);
2947         up_read(&obd->u.cli.cl_sem);
2948         if (!imp)
2949                 RETURN(-ENODEV);
2950
2951         /* We could possibly pass max_age in the request (as an absolute
2952          * timestamp or a "seconds.usec ago") so the target can avoid doing
2953          * extra calls into the filesystem if that isn't necessary (e.g.
2954          * during mount that would help a bit).  Having relative timestamps
2955          * is not so great if request processing is slow, while absolute
2956          * timestamps are not ideal because they need time synchronization. */
2957         req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS);
2958
2959         class_import_put(imp);
2960
2961         if (req == NULL)
2962                 RETURN(-ENOMEM);
2963
2964         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
2965         if (rc) {
2966                 ptlrpc_request_free(req);
2967                 RETURN(rc);
2968         }
2969         ptlrpc_request_set_replen(req);
2970         req->rq_request_portal = OST_CREATE_PORTAL;
2971         ptlrpc_at_set_req_timeout(req);
2972
2973         if (flags & OBD_STATFS_NODELAY) {
2974                 /* procfs requests not want stat in wait for avoid deadlock */
2975                 req->rq_no_resend = 1;
2976                 req->rq_no_delay = 1;
2977         }
2978
2979         rc = ptlrpc_queue_wait(req);
2980         if (rc)
2981                 GOTO(out, rc);
2982
2983         msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
2984         if (msfs == NULL)
2985                 GOTO(out, rc = -EPROTO);
2986
2987         *osfs = *msfs;
2988
2989         EXIT;
2990 out:
2991         ptlrpc_req_finished(req);
2992         return rc;
2993 }
2994
2995 static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2996                          void *karg, void __user *uarg)
2997 {
2998         struct obd_device *obd = exp->exp_obd;
2999         struct obd_ioctl_data *data = karg;
3000         int rc = 0;
3001
3002         ENTRY;
3003         if (!try_module_get(THIS_MODULE)) {
3004                 CERROR("%s: cannot get module '%s'\n", obd->obd_name,
3005                        module_name(THIS_MODULE));
3006                 return -EINVAL;
3007         }
3008         switch (cmd) {
3009         case OBD_IOC_CLIENT_RECOVER:
3010                 rc = ptlrpc_recover_import(obd->u.cli.cl_import,
3011                                            data->ioc_inlbuf1, 0);
3012                 if (rc > 0)
3013                         rc = 0;
3014                 break;
3015         case IOC_OSC_SET_ACTIVE:
3016                 rc = ptlrpc_set_import_active(obd->u.cli.cl_import,
3017                                               data->ioc_offset);
3018                 break;
3019         default:
3020                 rc = -ENOTTY;
3021                 CDEBUG(D_INODE, "%s: unrecognised ioctl %#x by %s: rc = %d\n",
3022                        obd->obd_name, cmd, current->comm, rc);
3023                 break;
3024         }
3025
3026         module_put(THIS_MODULE);
3027         return rc;
3028 }
3029
3030 int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
3031                        u32 keylen, void *key, u32 vallen, void *val,
3032                        struct ptlrpc_request_set *set)
3033 {
3034         struct ptlrpc_request *req;
3035         struct obd_device     *obd = exp->exp_obd;
3036         struct obd_import     *imp = class_exp2cliimp(exp);
3037         char                  *tmp;
3038         int                    rc;
3039         ENTRY;
3040
3041         OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
3042
3043         if (KEY_IS(KEY_CHECKSUM)) {
3044                 if (vallen != sizeof(int))
3045                         RETURN(-EINVAL);
3046                 exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
3047                 RETURN(0);
3048         }
3049
3050         if (KEY_IS(KEY_SPTLRPC_CONF)) {
3051                 sptlrpc_conf_client_adapt(obd);
3052                 RETURN(0);
3053         }
3054
3055         if (KEY_IS(KEY_FLUSH_CTX)) {
3056                 sptlrpc_import_flush_my_ctx(imp);
3057                 RETURN(0);
3058         }
3059
3060         if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
3061                 struct client_obd *cli = &obd->u.cli;
3062                 long nr = atomic_long_read(&cli->cl_lru_in_list) >> 1;
3063                 long target = *(long *)val;
3064
3065                 nr = osc_lru_shrink(env, cli, min(nr, target), true);
3066                 *(long *)val -= nr;
3067                 RETURN(0);
3068         }
3069
3070         if (!set && !KEY_IS(KEY_GRANT_SHRINK))
3071                 RETURN(-EINVAL);
3072
3073         /* We pass all other commands directly to OST. Since nobody calls osc
3074            methods directly and everybody is supposed to go through LOV, we
3075            assume lov checked invalid values for us.
3076            The only recognised values so far are evict_by_nid and mds_conn.
3077            Even if something bad goes through, we'd get a -EINVAL from OST
3078            anyway. */
3079
3080         req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ?
3081                                                 &RQF_OST_SET_GRANT_INFO :
3082                                                 &RQF_OBD_SET_INFO);
3083         if (req == NULL)
3084                 RETURN(-ENOMEM);
3085
3086         req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
3087                              RCL_CLIENT, keylen);
3088         if (!KEY_IS(KEY_GRANT_SHRINK))
3089                 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL,
3090                                      RCL_CLIENT, vallen);
3091         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
3092         if (rc) {
3093                 ptlrpc_request_free(req);
3094                 RETURN(rc);
3095         }
3096
3097         tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
3098         memcpy(tmp, key, keylen);
3099         tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ?
3100                                                         &RMF_OST_BODY :
3101                                                         &RMF_SETINFO_VAL);
3102         memcpy(tmp, val, vallen);
3103
3104         if (KEY_IS(KEY_GRANT_SHRINK)) {
3105                 struct osc_grant_args *aa;
3106                 struct obdo *oa;
3107
3108                 aa = ptlrpc_req_async_args(aa, req);
3109                 OBD_SLAB_ALLOC_PTR_GFP(oa, osc_obdo_kmem, GFP_NOFS);
3110                 if (!oa) {
3111                         ptlrpc_req_finished(req);
3112                         RETURN(-ENOMEM);
3113                 }
3114                 *oa = ((struct ost_body *)val)->oa;
3115                 aa->aa_oa = oa;
3116                 req->rq_interpret_reply = osc_shrink_grant_interpret;
3117         }
3118
3119         ptlrpc_request_set_replen(req);
3120         if (!KEY_IS(KEY_GRANT_SHRINK)) {
3121                 LASSERT(set != NULL);
3122                 ptlrpc_set_add_req(set, req);
3123                 ptlrpc_check_set(NULL, set);
3124         } else {
3125                 ptlrpcd_add_req(req);
3126         }
3127
3128         RETURN(0);
3129 }
3130 EXPORT_SYMBOL(osc_set_info_async);
3131
3132 int osc_reconnect(const struct lu_env *env, struct obd_export *exp,
3133                   struct obd_device *obd, struct obd_uuid *cluuid,
3134                   struct obd_connect_data *data, void *localdata)
3135 {
3136         struct client_obd *cli = &obd->u.cli;
3137
3138         if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
3139                 long lost_grant;
3140                 long grant;
3141
3142                 spin_lock(&cli->cl_loi_list_lock);
3143                 grant = cli->cl_avail_grant + cli->cl_reserved_grant;
3144                 if (data->ocd_connect_flags & OBD_CONNECT_GRANT_PARAM) {
3145                         /* restore ocd_grant_blkbits as client page bits */
3146                         data->ocd_grant_blkbits = PAGE_SHIFT;
3147                         grant += cli->cl_dirty_grant;
3148                 } else {
3149                         grant += cli->cl_dirty_pages << PAGE_SHIFT;
3150                 }
3151                 data->ocd_grant = grant ? : 2 * cli_brw_size(obd);
3152                 lost_grant = cli->cl_lost_grant;
3153                 cli->cl_lost_grant = 0;
3154                 spin_unlock(&cli->cl_loi_list_lock);
3155
3156                 CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d"
3157                        " ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags,
3158                        data->ocd_version, data->ocd_grant, lost_grant);
3159         }
3160
3161         RETURN(0);
3162 }
3163 EXPORT_SYMBOL(osc_reconnect);
3164
3165 int osc_disconnect(struct obd_export *exp)
3166 {
3167         struct obd_device *obd = class_exp2obd(exp);
3168         int rc;
3169
3170         rc = client_disconnect_export(exp);
3171         /**
3172          * Initially we put del_shrink_grant before disconnect_export, but it
3173          * causes the following problem if setup (connect) and cleanup
3174          * (disconnect) are tangled together.
3175          *      connect p1                     disconnect p2
3176          *   ptlrpc_connect_import
3177          *     ...............               class_manual_cleanup
3178          *                                     osc_disconnect
3179          *                                     del_shrink_grant
3180          *   ptlrpc_connect_interrupt
3181          *     osc_init_grant
3182          *   add this client to shrink list
3183          *                                      cleanup_osc
3184          * Bang! grant shrink thread trigger the shrink. BUG18662
3185          */
3186         osc_del_grant_list(&obd->u.cli);
3187         return rc;
3188 }
3189 EXPORT_SYMBOL(osc_disconnect);
3190
3191 int osc_ldlm_resource_invalidate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
3192                                  struct hlist_node *hnode, void *arg)
3193 {
3194         struct lu_env *env = arg;
3195         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
3196         struct ldlm_lock *lock;
3197         struct osc_object *osc = NULL;
3198         ENTRY;
3199
3200         lock_res(res);
3201         list_for_each_entry(lock, &res->lr_granted, l_res_link) {
3202                 if (lock->l_ast_data != NULL && osc == NULL) {
3203                         osc = lock->l_ast_data;
3204                         cl_object_get(osc2cl(osc));
3205                 }
3206
3207                 /* clear LDLM_FL_CLEANED flag to make sure it will be canceled
3208                  * by the 2nd round of ldlm_namespace_clean() call in
3209                  * osc_import_event(). */
3210                 ldlm_clear_cleaned(lock);
3211         }
3212         unlock_res(res);
3213
3214         if (osc != NULL) {
3215                 osc_object_invalidate(env, osc);
3216                 cl_object_put(env, osc2cl(osc));
3217         }
3218
3219         RETURN(0);
3220 }
3221 EXPORT_SYMBOL(osc_ldlm_resource_invalidate);
3222
3223 static int osc_import_event(struct obd_device *obd,
3224                             struct obd_import *imp,
3225                             enum obd_import_event event)
3226 {
3227         struct client_obd *cli;
3228         int rc = 0;
3229
3230         ENTRY;
3231         LASSERT(imp->imp_obd == obd);
3232
3233         switch (event) {
3234         case IMP_EVENT_DISCON: {
3235                 cli = &obd->u.cli;
3236                 spin_lock(&cli->cl_loi_list_lock);
3237                 cli->cl_avail_grant = 0;
3238                 cli->cl_lost_grant = 0;
3239                 spin_unlock(&cli->cl_loi_list_lock);
3240                 break;
3241         }
3242         case IMP_EVENT_INACTIVE: {
3243                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE);
3244                 break;
3245         }
3246         case IMP_EVENT_INVALIDATE: {
3247                 struct ldlm_namespace *ns = obd->obd_namespace;
3248                 struct lu_env         *env;
3249                 __u16                  refcheck;
3250
3251                 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3252
3253                 env = cl_env_get(&refcheck);
3254                 if (!IS_ERR(env)) {
3255                         osc_io_unplug(env, &obd->u.cli, NULL);
3256
3257                         cfs_hash_for_each_nolock(ns->ns_rs_hash,
3258                                                  osc_ldlm_resource_invalidate,
3259                                                  env, 0);
3260                         cl_env_put(env, &refcheck);
3261
3262                         ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
3263                 } else
3264                         rc = PTR_ERR(env);
3265                 break;
3266         }
3267         case IMP_EVENT_ACTIVE: {
3268                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE);
3269                 break;
3270         }
3271         case IMP_EVENT_OCD: {
3272                 struct obd_connect_data *ocd = &imp->imp_connect_data;
3273
3274                 if (ocd->ocd_connect_flags & OBD_CONNECT_GRANT)
3275                         osc_init_grant(&obd->u.cli, ocd);
3276
3277                 /* See bug 7198 */
3278                 if (ocd->ocd_connect_flags & OBD_CONNECT_REQPORTAL)
3279                         imp->imp_client->cli_request_portal =OST_REQUEST_PORTAL;
3280
3281                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD);
3282                 break;
3283         }
3284         case IMP_EVENT_DEACTIVATE: {
3285                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DEACTIVATE);
3286                 break;
3287         }
3288         case IMP_EVENT_ACTIVATE: {
3289                 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVATE);
3290                 break;
3291         }
3292         default:
3293                 CERROR("Unknown import event %d\n", event);
3294                 LBUG();
3295         }
3296         RETURN(rc);
3297 }
3298
3299 /**
3300  * Determine whether the lock can be canceled before replaying the lock
3301  * during recovery, see bug16774 for detailed information.
3302  *
3303  * \retval zero the lock can't be canceled
3304  * \retval other ok to cancel
3305  */
3306 static int osc_cancel_weight(struct ldlm_lock *lock)
3307 {
3308         /*
3309          * Cancel all unused and granted extent lock.
3310          */
3311         if (lock->l_resource->lr_type == LDLM_EXTENT &&
3312             ldlm_is_granted(lock) &&
3313             osc_ldlm_weigh_ast(lock) == 0)
3314                 RETURN(1);
3315
3316         RETURN(0);
3317 }
3318
3319 static int brw_queue_work(const struct lu_env *env, void *data)
3320 {
3321         struct client_obd *cli = data;
3322
3323         CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
3324
3325         osc_io_unplug(env, cli, NULL);
3326         RETURN(0);
3327 }
3328
3329 int osc_setup_common(struct obd_device *obd, struct lustre_cfg *lcfg)
3330 {
3331         struct client_obd *cli = &obd->u.cli;
3332         void *handler;
3333         int rc;
3334
3335         ENTRY;
3336
3337         rc = ptlrpcd_addref();
3338         if (rc)
3339                 RETURN(rc);
3340
3341         rc = client_obd_setup(obd, lcfg);
3342         if (rc)
3343                 GOTO(out_ptlrpcd, rc);
3344
3345
3346         handler = ptlrpcd_alloc_work(cli->cl_import, brw_queue_work, cli);
3347         if (IS_ERR(handler))
3348                 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3349         cli->cl_writeback_work = handler;
3350
3351         handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
3352         if (IS_ERR(handler))
3353                 GOTO(out_ptlrpcd_work, rc = PTR_ERR(handler));
3354         cli->cl_lru_work = handler;
3355
3356         rc = osc_quota_setup(obd);
3357         if (rc)
3358                 GOTO(out_ptlrpcd_work, rc);
3359
3360         cli->cl_grant_shrink_interval = GRANT_SHRINK_INTERVAL;
3361         osc_update_next_shrink(cli);
3362
3363         RETURN(rc);
3364
3365 out_ptlrpcd_work:
3366         if (cli->cl_writeback_work != NULL) {
3367                 ptlrpcd_destroy_work(cli->cl_writeback_work);
3368                 cli->cl_writeback_work = NULL;
3369         }
3370         if (cli->cl_lru_work != NULL) {
3371                 ptlrpcd_destroy_work(cli->cl_lru_work);
3372                 cli->cl_lru_work = NULL;
3373         }
3374         client_obd_cleanup(obd);
3375 out_ptlrpcd:
3376         ptlrpcd_decref();
3377         RETURN(rc);
3378 }
3379 EXPORT_SYMBOL(osc_setup_common);
3380
3381 int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
3382 {
3383         struct client_obd *cli = &obd->u.cli;
3384         int                adding;
3385         int                added;
3386         int                req_count;
3387         int                rc;
3388
3389         ENTRY;
3390
3391         rc = osc_setup_common(obd, lcfg);
3392         if (rc < 0)
3393                 RETURN(rc);
3394
3395         rc = osc_tunables_init(obd);
3396         if (rc)
3397                 RETURN(rc);
3398
3399         /*
3400          * We try to control the total number of requests with a upper limit
3401          * osc_reqpool_maxreqcount. There might be some race which will cause
3402          * over-limit allocation, but it is fine.
3403          */
3404         req_count = atomic_read(&osc_pool_req_count);
3405         if (req_count < osc_reqpool_maxreqcount) {
3406                 adding = cli->cl_max_rpcs_in_flight + 2;
3407                 if (req_count + adding > osc_reqpool_maxreqcount)
3408                         adding = osc_reqpool_maxreqcount - req_count;
3409
3410                 added = ptlrpc_add_rqs_to_pool(osc_rq_pool, adding);
3411                 atomic_add(added, &osc_pool_req_count);
3412         }
3413
3414         ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
3415
3416         spin_lock(&osc_shrink_lock);
3417         list_add_tail(&cli->cl_shrink_list, &osc_shrink_list);
3418         spin_unlock(&osc_shrink_lock);
3419         cli->cl_import->imp_idle_timeout = osc_idle_timeout;
3420         cli->cl_import->imp_idle_debug = D_HA;
3421
3422         RETURN(0);
3423 }
3424
3425 int osc_precleanup_common(struct obd_device *obd)
3426 {
3427         struct client_obd *cli = &obd->u.cli;
3428         ENTRY;
3429
3430         /* LU-464
3431          * for echo client, export may be on zombie list, wait for
3432          * zombie thread to cull it, because cli.cl_import will be
3433          * cleared in client_disconnect_export():
3434          *   class_export_destroy() -> obd_cleanup() ->
3435          *   echo_device_free() -> echo_client_cleanup() ->
3436          *   obd_disconnect() -> osc_disconnect() ->
3437          *   client_disconnect_export()
3438          */
3439         obd_zombie_barrier();
3440         if (cli->cl_writeback_work) {
3441                 ptlrpcd_destroy_work(cli->cl_writeback_work);
3442                 cli->cl_writeback_work = NULL;
3443         }
3444
3445         if (cli->cl_lru_work) {
3446                 ptlrpcd_destroy_work(cli->cl_lru_work);
3447                 cli->cl_lru_work = NULL;
3448         }
3449
3450         obd_cleanup_client_import(obd);
3451         RETURN(0);
3452 }
3453 EXPORT_SYMBOL(osc_precleanup_common);
3454
3455 static int osc_precleanup(struct obd_device *obd)
3456 {
3457         ENTRY;
3458
3459         osc_precleanup_common(obd);
3460
3461         ptlrpc_lprocfs_unregister_obd(obd);
3462         RETURN(0);
3463 }
3464
3465 int osc_cleanup_common(struct obd_device *obd)
3466 {
3467         struct client_obd *cli = &obd->u.cli;
3468         int rc;
3469
3470         ENTRY;
3471
3472         spin_lock(&osc_shrink_lock);
3473         list_del(&cli->cl_shrink_list);
3474         spin_unlock(&osc_shrink_lock);
3475
3476         /* lru cleanup */
3477         if (cli->cl_cache != NULL) {
3478                 LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
3479                 spin_lock(&cli->cl_cache->ccc_lru_lock);
3480                 list_del_init(&cli->cl_lru_osc);
3481                 spin_unlock(&cli->cl_cache->ccc_lru_lock);
3482                 cli->cl_lru_left = NULL;
3483                 cl_cache_decref(cli->cl_cache);
3484                 cli->cl_cache = NULL;
3485         }
3486
3487         /* free memory of osc quota cache */
3488         osc_quota_cleanup(obd);
3489
3490         rc = client_obd_cleanup(obd);
3491
3492         ptlrpcd_decref();
3493         RETURN(rc);
3494 }
3495 EXPORT_SYMBOL(osc_cleanup_common);
3496
3497 static const struct obd_ops osc_obd_ops = {
3498         .o_owner                = THIS_MODULE,
3499         .o_setup                = osc_setup,
3500         .o_precleanup           = osc_precleanup,
3501         .o_cleanup              = osc_cleanup_common,
3502         .o_add_conn             = client_import_add_conn,
3503         .o_del_conn             = client_import_del_conn,
3504         .o_connect              = client_connect_import,
3505         .o_reconnect            = osc_reconnect,
3506         .o_disconnect           = osc_disconnect,
3507         .o_statfs               = osc_statfs,
3508         .o_statfs_async         = osc_statfs_async,
3509         .o_create               = osc_create,
3510         .o_destroy              = osc_destroy,
3511         .o_getattr              = osc_getattr,
3512         .o_setattr              = osc_setattr,
3513         .o_iocontrol            = osc_iocontrol,
3514         .o_set_info_async       = osc_set_info_async,
3515         .o_import_event         = osc_import_event,
3516         .o_quotactl             = osc_quotactl,
3517 };
3518
3519 static struct shrinker *osc_cache_shrinker;
3520 LIST_HEAD(osc_shrink_list);
3521 DEFINE_SPINLOCK(osc_shrink_lock);
3522
3523 #ifndef HAVE_SHRINKER_COUNT
3524 static int osc_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
3525 {
3526         struct shrink_control scv = {
3527                 .nr_to_scan = shrink_param(sc, nr_to_scan),
3528                 .gfp_mask   = shrink_param(sc, gfp_mask)
3529         };
3530         (void)osc_cache_shrink_scan(shrinker, &scv);
3531
3532         return osc_cache_shrink_count(shrinker, &scv);
3533 }
3534 #endif
3535
3536 static int __init osc_init(void)
3537 {
3538         unsigned int reqpool_size;
3539         unsigned int reqsize;
3540         int rc;
3541         DEF_SHRINKER_VAR(osc_shvar, osc_cache_shrink,
3542                          osc_cache_shrink_count, osc_cache_shrink_scan);
3543         ENTRY;
3544
3545         /* print an address of _any_ initialized kernel symbol from this
3546          * module, to allow debugging with gdb that doesn't support data
3547          * symbols from modules.*/
3548         CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
3549
3550         rc = lu_kmem_init(osc_caches);
3551         if (rc)
3552                 RETURN(rc);
3553
3554         rc = class_register_type(&osc_obd_ops, NULL, true, NULL,
3555                                  LUSTRE_OSC_NAME, &osc_device_type);
3556         if (rc)
3557                 GOTO(out_kmem, rc);
3558
3559         osc_cache_shrinker = set_shrinker(DEFAULT_SEEKS, &osc_shvar);
3560
3561         /* This is obviously too much memory, only prevent overflow here */
3562         if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0)
3563                 GOTO(out_type, rc = -EINVAL);
3564
3565         reqpool_size = osc_reqpool_mem_max << 20;
3566
3567         reqsize = 1;
3568         while (reqsize < OST_IO_MAXREQSIZE)
3569                 reqsize = reqsize << 1;
3570
3571         /*
3572          * We don't enlarge the request count in OSC pool according to
3573          * cl_max_rpcs_in_flight. The allocation from the pool will only be
3574          * tried after normal allocation failed. So a small OSC pool won't
3575          * cause much performance degression in most of cases.
3576          */
3577         osc_reqpool_maxreqcount = reqpool_size / reqsize;
3578
3579         atomic_set(&osc_pool_req_count, 0);
3580         osc_rq_pool = ptlrpc_init_rq_pool(0, OST_IO_MAXREQSIZE,
3581                                           ptlrpc_add_rqs_to_pool);
3582
3583         if (osc_rq_pool == NULL)
3584                 GOTO(out_type, rc = -ENOMEM);
3585
3586         rc = osc_start_grant_work();
3587         if (rc != 0)
3588                 GOTO(out_req_pool, rc);
3589
3590         RETURN(rc);
3591
3592 out_req_pool:
3593         ptlrpc_free_rq_pool(osc_rq_pool);
3594 out_type:
3595         class_unregister_type(LUSTRE_OSC_NAME);
3596 out_kmem:
3597         lu_kmem_fini(osc_caches);
3598
3599         RETURN(rc);
3600 }
3601
3602 static void __exit osc_exit(void)
3603 {
3604         osc_stop_grant_work();
3605         remove_shrinker(osc_cache_shrinker);
3606         class_unregister_type(LUSTRE_OSC_NAME);
3607         lu_kmem_fini(osc_caches);
3608         ptlrpc_free_rq_pool(osc_rq_pool);
3609 }
3610
3611 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3612 MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
3613 MODULE_VERSION(LUSTRE_VERSION_STRING);
3614 MODULE_LICENSE("GPL");
3615
3616 module_init(osc_init);
3617 module_exit(osc_exit);