Whamcloud - gitweb
315d439c97d45e6a23034d23ae80bb38550b228b
[fs/lustre-release.git] / lustre / ptlrpc / sec.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * Copyright (C) 2004-2007 Cluster File Systems, Inc.
5  *   Author: Eric Mei <ericm@clusterfs.com>
6  *
7  *   This file is part of Lustre, http://www.lustre.org.
8  *
9  *   Lustre is free software; you can redistribute it and/or
10  *   modify it under the terms of version 2 of the GNU General Public
11  *   License as published by the Free Software Foundation.
12  *
13  *   Lustre is distributed in the hope that it will be useful,
14  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *   GNU General Public License for more details.
17  *
18  *   You should have received a copy of the GNU General Public License
19  *   along with Lustre; if not, write to the Free Software
20  *   Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21  */
22
23 #ifndef EXPORT_SYMTAB
24 #define EXPORT_SYMTAB
25 #endif
26 #define DEBUG_SUBSYSTEM S_SEC
27
28 #include <libcfs/libcfs.h>
29 #ifndef __KERNEL__
30 #include <liblustre.h>
31 #include <libcfs/list.h>
32 #else
33 #include <linux/crypto.h>
34 #include <linux/key.h>
35 #endif
36
37 #include <obd.h>
38 #include <obd_class.h>
39 #include <obd_support.h>
40 #include <lustre_net.h>
41 #include <lustre_import.h>
42 #include <lustre_dlm.h>
43 #include <lustre_sec.h>
44
45 #include "ptlrpc_internal.h"
46
47 /***********************************************
48  * policy registers                            *
49  ***********************************************/
50
51 static rwlock_t policy_lock = RW_LOCK_UNLOCKED;
52 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
53         NULL,
54 };
55
56 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
57 {
58         __u16 number = policy->sp_policy;
59
60         LASSERT(policy->sp_name);
61         LASSERT(policy->sp_cops);
62         LASSERT(policy->sp_sops);
63
64         if (number >= SPTLRPC_POLICY_MAX)
65                 return -EINVAL;
66
67         write_lock(&policy_lock);
68         if (unlikely(policies[number])) {
69                 write_unlock(&policy_lock);
70                 return -EALREADY;
71         }
72         policies[number] = policy;
73         write_unlock(&policy_lock);
74
75         CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
76         return 0;
77 }
78 EXPORT_SYMBOL(sptlrpc_register_policy);
79
80 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
81 {
82         __u16 number = policy->sp_policy;
83
84         LASSERT(number < SPTLRPC_POLICY_MAX);
85
86         write_lock(&policy_lock);
87         if (unlikely(policies[number] == NULL)) {
88                 write_unlock(&policy_lock);
89                 CERROR("%s: already unregistered\n", policy->sp_name);
90                 return -EINVAL;
91         }
92
93         LASSERT(policies[number] == policy);
94         policies[number] = NULL;
95         write_unlock(&policy_lock);
96
97         CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
98         return 0;
99 }
100 EXPORT_SYMBOL(sptlrpc_unregister_policy);
101
102 static
103 struct ptlrpc_sec_policy * sptlrpc_rpcflavor2policy(__u16 flavor)
104 {
105         static DECLARE_MUTEX(load_mutex);
106         static atomic_t           loaded = ATOMIC_INIT(0);
107         struct ptlrpc_sec_policy *policy;
108         __u16                     number = RPC_FLVR_POLICY(flavor), flag = 0;
109
110         if (number >= SPTLRPC_POLICY_MAX)
111                 return NULL;
112
113 again:
114         read_lock(&policy_lock);
115         policy = policies[number];
116         if (policy && !try_module_get(policy->sp_owner))
117                 policy = NULL;
118         if (policy == NULL)
119                 flag = atomic_read(&loaded);
120         read_unlock(&policy_lock);
121
122         /* if failure, try to load gss module, once */
123         if (unlikely(policy == NULL) && flag == 0 &&
124             number == SPTLRPC_POLICY_GSS) {
125                 mutex_down(&load_mutex);
126                 if (atomic_read(&loaded) == 0) {
127                         if (request_module("ptlrpc_gss") != 0)
128                                 CERROR("Unable to load module ptlrpc_gss\n");
129                         else
130                                 CWARN("module ptlrpc_gss loaded on demand\n");
131
132                         atomic_set(&loaded, 1);
133                 }
134                 mutex_up(&load_mutex);
135
136                 goto again;
137         }
138
139         return policy;
140 }
141
142 __u16 sptlrpc_name2rpcflavor(const char *name)
143 {
144         if (!strcmp(name, "null"))
145                 return SPTLRPC_FLVR_NULL;
146         if (!strcmp(name, "plain"))
147                 return SPTLRPC_FLVR_PLAIN;
148         if (!strcmp(name, "krb5n"))
149                 return SPTLRPC_FLVR_KRB5N;
150         if (!strcmp(name, "krb5i"))
151                 return SPTLRPC_FLVR_KRB5I;
152         if (!strcmp(name, "krb5p"))
153                 return SPTLRPC_FLVR_KRB5P;
154
155         return SPTLRPC_FLVR_INVALID;
156 }
157 EXPORT_SYMBOL(sptlrpc_name2rpcflavor);
158
159 const char *sptlrpc_rpcflavor2name(__u16 flavor)
160 {
161         switch (flavor) {
162         case SPTLRPC_FLVR_NULL:
163                 return "null";
164         case SPTLRPC_FLVR_PLAIN:
165                 return "plain";
166         case SPTLRPC_FLVR_KRB5N:
167                 return "krb5n";
168         case SPTLRPC_FLVR_KRB5A:
169                 return "krb5a";
170         case SPTLRPC_FLVR_KRB5I:
171                 return "krb5i";
172         case SPTLRPC_FLVR_KRB5P:
173                 return "krb5p";
174         default:
175                 CERROR("invalid rpc flavor 0x%x(p%u,s%u,v%u)\n", flavor,
176                        RPC_FLVR_POLICY(flavor), RPC_FLVR_MECH(flavor),
177                        RPC_FLVR_SVC(flavor));
178         }
179         return "unknown";
180 }
181 EXPORT_SYMBOL(sptlrpc_rpcflavor2name);
182
183 int sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
184 {
185         char           *bulk;
186
187         if (sf->sf_bulk_ciph != BULK_CIPH_ALG_NULL)
188                 bulk = "bulkp";
189         else if (sf->sf_bulk_hash != BULK_HASH_ALG_NULL)
190                 bulk = "bulki";
191         else
192                 bulk = "bulkn";
193
194         snprintf(buf, bufsize, "%s-%s:%s/%s",
195                  sptlrpc_rpcflavor2name(sf->sf_rpc), bulk,
196                  sptlrpc_get_hash_name(sf->sf_bulk_hash),
197                  sptlrpc_get_ciph_name(sf->sf_bulk_ciph));
198         return 0;
199 }
200 EXPORT_SYMBOL(sptlrpc_flavor2name);
201
202 /**************************************************
203  * client context APIs                            *
204  **************************************************/
205
206 static
207 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
208 {
209         struct vfs_cred vcred;
210         int create = 1, remove_dead = 1;
211
212         LASSERT(sec);
213         LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
214
215         if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
216                                      PTLRPC_SEC_FL_ROOTONLY)) {
217                 vcred.vc_uid = 0;
218                 vcred.vc_gid = 0;
219                 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
220                         create = 0;
221                         remove_dead = 0;
222                 }
223         } else {
224                 vcred.vc_uid = cfs_current()->uid;
225                 vcred.vc_gid = cfs_current()->gid;
226         }
227
228         return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred,
229                                                    create, remove_dead);
230 }
231
232 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
233 {
234         LASSERT(atomic_read(&ctx->cc_refcount) > 0);
235         atomic_inc(&ctx->cc_refcount);
236         return ctx;
237 }
238 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
239
240 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
241 {
242         struct ptlrpc_sec *sec = ctx->cc_sec;
243
244         LASSERT(sec);
245         LASSERT(atomic_read(&ctx->cc_refcount));
246
247         if (!atomic_dec_and_test(&ctx->cc_refcount))
248                 return;
249
250         sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
251 }
252 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
253
254 /*
255  * expire the context immediately.
256  * the caller must hold at least 1 ref on the ctx.
257  */
258 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
259 {
260         LASSERT(ctx->cc_ops->die);
261         ctx->cc_ops->die(ctx, 0);
262 }
263 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
264
265 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
266 {
267         struct ptlrpc_request *req, *next;
268
269         spin_lock(&ctx->cc_lock);
270         list_for_each_entry_safe(req, next, &ctx->cc_req_list, rq_ctx_chain) {
271                 list_del_init(&req->rq_ctx_chain);
272                 ptlrpc_wake_client_req(req);
273         }
274         spin_unlock(&ctx->cc_lock);
275 }
276 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
277
278 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
279 {
280         LASSERT(ctx->cc_ops);
281
282         if (ctx->cc_ops->display == NULL)
283                 return 0;
284
285         return ctx->cc_ops->display(ctx, buf, bufsize);
286 }
287
288 static int sptlrpc_import_sec_check_expire(struct obd_import *imp)
289 {
290         int     adapt = 0;
291
292         spin_lock(&imp->imp_lock);
293         if (imp->imp_sec_expire &&
294             imp->imp_sec_expire < cfs_time_current_sec()) {
295                 adapt = 1;
296                 imp->imp_sec_expire = 0;
297         }
298         spin_unlock(&imp->imp_lock);
299
300         if (!adapt)
301                 return 0;
302
303         CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
304         return sptlrpc_import_sec_adapt(imp, NULL, 0);
305 }
306
307 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
308 {
309         struct obd_import *imp = req->rq_import;
310         struct ptlrpc_sec *sec;
311         int                rc;
312         ENTRY;
313
314         LASSERT(!req->rq_cli_ctx);
315         LASSERT(imp);
316
317         if (unlikely(imp->imp_sec_expire)) {
318                 rc = sptlrpc_import_sec_check_expire(imp);
319                 if (rc)
320                         RETURN(rc);
321         }
322
323         sec = sptlrpc_import_sec_ref(imp);
324         if (sec == NULL) {
325                 CERROR("import %p (%s) with no ptlrpc_sec\n",
326                        imp, ptlrpc_import_state_name(imp->imp_state));
327                 RETURN(-EACCES);
328         }
329
330         if (unlikely(sec->ps_dying)) {
331                 CERROR("attempt to use dying sec %p\n", sec);
332                 return -EACCES;
333         }
334
335         req->rq_cli_ctx = get_my_ctx(sec);
336
337         sptlrpc_sec_put(sec);
338
339         if (!req->rq_cli_ctx) {
340                 CERROR("req %p: fail to get context\n", req);
341                 RETURN(-ENOMEM);
342         }
343
344         RETURN(0);
345 }
346
347 /*
348  * if @sync == 0, this function should return quickly without sleep;
349  * otherwise might trigger ctx destroying rpc to server.
350  */
351 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
352 {
353         ENTRY;
354
355         LASSERT(req);
356         LASSERT(req->rq_cli_ctx);
357
358         /* request might be asked to release earlier while still
359          * in the context waiting list.
360          */
361         if (!list_empty(&req->rq_ctx_chain)) {
362                 spin_lock(&req->rq_cli_ctx->cc_lock);
363                 list_del_init(&req->rq_ctx_chain);
364                 spin_unlock(&req->rq_cli_ctx->cc_lock);
365         }
366
367         sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
368         req->rq_cli_ctx = NULL;
369         EXIT;
370 }
371
372 static
373 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
374                            struct ptlrpc_cli_ctx *oldctx,
375                            struct ptlrpc_cli_ctx *newctx)
376 {
377         struct sptlrpc_flavor   old_flvr;
378         char                   *reqmsg;
379         int                     reqmsg_size;
380         int                     rc;
381
382         if (likely(oldctx->cc_sec == newctx->cc_sec))
383                 return 0;
384
385         LASSERT(req->rq_reqmsg);
386         LASSERT(req->rq_reqlen);
387         LASSERT(req->rq_replen);
388
389         CWARN("req %p: switch ctx %p -> %p, switch sec %p(%s) -> %p(%s)\n",
390               req, oldctx, newctx,
391               oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
392               newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name);
393
394         /* save flavor */
395         old_flvr = req->rq_flvr;
396
397         /* save request message */
398         reqmsg_size = req->rq_reqlen;
399         OBD_ALLOC(reqmsg, reqmsg_size);
400         if (reqmsg == NULL)
401                 return -ENOMEM;
402         memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
403
404         /* release old req/rep buf */
405         req->rq_cli_ctx = oldctx;
406         sptlrpc_cli_free_reqbuf(req);
407         sptlrpc_cli_free_repbuf(req);
408         req->rq_cli_ctx = newctx;
409
410         /* recalculate the flavor */
411         sptlrpc_req_set_flavor(req, 0);
412
413         /* alloc new request buffer
414          * we don't need to alloc reply buffer here, leave it to the
415          * rest procedure of ptlrpc
416          */
417         rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
418         if (!rc) {
419                 LASSERT(req->rq_reqmsg);
420                 memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
421         } else {
422                 CWARN("failed to alloc reqbuf: %d\n", rc);
423                 req->rq_flvr = old_flvr;
424         }
425
426         OBD_FREE(reqmsg, reqmsg_size);
427         return rc;
428 }
429
430 /*
431  * request must have a context. in any case of failure, restore the
432  * restore the old one. a request must have a ctx.
433  */
434 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
435 {
436         struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
437         struct ptlrpc_cli_ctx *newctx;
438         int                    rc;
439         ENTRY;
440
441         LASSERT(oldctx);
442         LASSERT(test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags));
443
444         sptlrpc_cli_ctx_get(oldctx);
445         sptlrpc_req_put_ctx(req, 0);
446
447         rc = sptlrpc_req_get_ctx(req);
448         if (unlikely(rc)) {
449                 LASSERT(!req->rq_cli_ctx);
450
451                 /* restore old ctx */
452                 req->rq_cli_ctx = oldctx;
453                 RETURN(rc);
454         }
455
456         newctx = req->rq_cli_ctx;
457         LASSERT(newctx);
458
459         if (unlikely(newctx == oldctx)) {
460                 /*
461                  * still get the old ctx, usually means system busy
462                  */
463                 CWARN("ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
464                       newctx, newctx->cc_flags);
465
466                 schedule_timeout(HZ);
467         } else {
468                 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
469                 if (rc) {
470                         /* restore old ctx */
471                         sptlrpc_req_put_ctx(req, 0);
472                         req->rq_cli_ctx = oldctx;
473                         RETURN(rc);
474                 }
475
476                 LASSERT(req->rq_cli_ctx == newctx);
477         }
478
479         sptlrpc_cli_ctx_put(oldctx, 1);
480         RETURN(0);
481 }
482 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
483
484 static
485 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
486 {
487         if (cli_ctx_is_refreshed(ctx))
488                 return 1;
489         return 0;
490 }
491
492 static
493 int ctx_refresh_timeout(void *data)
494 {
495         struct ptlrpc_request *req = data;
496         int rc;
497
498         /* conn_cnt is needed in expire_one_request */
499         lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
500
501         rc = ptlrpc_expire_one_request(req);
502         /* if we started recovery, we should mark this ctx dead; otherwise
503          * in case of lgssd died nobody would retire this ctx, following
504          * connecting will still find the same ctx thus cause deadlock.
505          * there's an assumption that expire time of the request should be
506          * later than the context refresh expire time.
507          */
508         if (rc == 0)
509                 req->rq_cli_ctx->cc_ops->die(req->rq_cli_ctx, 0);
510         return rc;
511 }
512
513 static
514 void ctx_refresh_interrupt(void *data)
515 {
516         struct ptlrpc_request *req = data;
517
518         spin_lock(&req->rq_lock);
519         req->rq_intr = 1;
520         spin_unlock(&req->rq_lock);
521 }
522
523 static
524 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
525 {
526         spin_lock(&ctx->cc_lock);
527         if (!list_empty(&req->rq_ctx_chain))
528                 list_del_init(&req->rq_ctx_chain);
529         spin_unlock(&ctx->cc_lock);
530 }
531
532 /*
533  * the status of context could be subject to be changed by other threads at any
534  * time. we allow this race. but once we return with 0, the caller will
535  * suppose it's uptodated and keep using it until the owning rpc is done.
536  *
537  * @timeout:
538  *    < 0  - don't wait
539  *    = 0  - wait until success or fatal error occur
540  *    > 0  - timeout value
541  *
542  * return 0 only if the context is uptodated.
543  */
544 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
545 {
546         struct ptlrpc_cli_ctx  *ctx = req->rq_cli_ctx;
547         struct l_wait_info      lwi;
548         int                     rc;
549         ENTRY;
550
551         LASSERT(ctx);
552
553         /*
554          * during the process a request's context might change type even
555          * (e.g. from gss ctx to plain ctx), so each loop we need to re-check
556          * everything
557          */
558 again:
559         /* skip special ctxs */
560         if (cli_ctx_is_eternal(ctx) || req->rq_ctx_init || req->rq_ctx_fini)
561                 RETURN(0);
562
563         if (test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags)) {
564                 LASSERT(ctx->cc_ops->refresh);
565                 ctx->cc_ops->refresh(ctx);
566         }
567         LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
568
569         LASSERT(ctx->cc_ops->validate);
570         if (ctx->cc_ops->validate(ctx) == 0) {
571                 req_off_ctx_list(req, ctx);
572                 RETURN(0);
573         }
574
575         if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
576                 req->rq_err = 1;
577                 req_off_ctx_list(req, ctx);
578                 RETURN(-EPERM);
579         }
580
581         /* This is subtle. For resent message we have to keep original
582          * context to survive following situation:
583          *  1. the request sent to server
584          *  2. recovery was kick start
585          *  3. recovery finished, the request marked as resent
586          *  4. resend the request
587          *  5. old reply from server received (because xid is the same)
588          *  6. verify reply (has to be success)
589          *  7. new reply from server received, lnet drop it
590          *
591          * Note we can't simply change xid for resent request because
592          * server reply on it for reply reconstruction.
593          *
594          * Commonly the original context should be uptodate because we
595          * have a expiry nice time; And server will keep their half part
596          * context because we at least hold a ref of old context which
597          * prevent the context detroy RPC be sent. So server still can
598          * accept the request and finish RPC. Two cases:
599          *  1. If server side context has been trimed, a NO_CONTEXT will
600          *     be returned, gss_cli_ctx_verify/unseal will switch to new
601          *     context by force.
602          *  2. Current context never be refreshed, then we are fine: we
603          *     never really send request with old context before.
604          */
605         if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
606             unlikely(req->rq_reqmsg) &&
607             lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
608                 req_off_ctx_list(req, ctx);
609                 RETURN(0);
610         }
611
612         if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
613                 rc = sptlrpc_req_replace_dead_ctx(req);
614                 if (rc) {
615                         LASSERT(ctx == req->rq_cli_ctx);
616                         CERROR("req %p: failed to replace dead ctx %p: %d\n",
617                                 req, ctx, rc);
618                         req->rq_err = 1;
619                         LASSERT(list_empty(&req->rq_ctx_chain));
620                         RETURN(rc);
621                 }
622
623                 CWARN("req %p: replace dead ctx %p => ctx %p (%u->%s)\n",
624                       req, ctx, req->rq_cli_ctx,
625                       req->rq_cli_ctx->cc_vcred.vc_uid,
626                       sec2target_str(req->rq_cli_ctx->cc_sec));
627
628                 ctx = req->rq_cli_ctx;
629                 LASSERT(list_empty(&req->rq_ctx_chain));
630
631                 goto again;
632         }
633
634         /* Now we're sure this context is during upcall, add myself into
635          * waiting list
636          */
637         spin_lock(&ctx->cc_lock);
638         if (list_empty(&req->rq_ctx_chain))
639                 list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
640         spin_unlock(&ctx->cc_lock);
641
642         if (timeout < 0) {
643                 RETURN(-EWOULDBLOCK);
644         }
645
646         /* Clear any flags that may be present from previous sends */
647         LASSERT(req->rq_receiving_reply == 0);
648         spin_lock(&req->rq_lock);
649         req->rq_err = 0;
650         req->rq_timedout = 0;
651         req->rq_resend = 0;
652         req->rq_restart = 0;
653         spin_unlock(&req->rq_lock);
654
655         lwi = LWI_TIMEOUT_INTR(timeout * HZ, ctx_refresh_timeout,
656                                ctx_refresh_interrupt, req);
657         rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
658
659         /* following cases we could be here:
660          * - successfully refreshed;
661          * - interruptted;
662          * - timedout, and we don't want recover from the failure;
663          * - timedout, and waked up upon recovery finished;
664          * - someone else mark this ctx dead by force;
665          * - someone invalidate the req and call wake_client_req(),
666          *   e.g. ptlrpc_abort_inflight();
667          */
668         if (!cli_ctx_is_refreshed(ctx)) {
669                 /* timed out or interruptted */
670                 req_off_ctx_list(req, ctx);
671
672                 LASSERT(rc != 0);
673                 RETURN(rc);
674         }
675
676         goto again;
677 }
678
679 /*
680  * Note this could be called in two situations:
681  * - new request from ptlrpc_pre_req(), with proper @opcode
682  * - old request which changed ctx in the middle, with @opcode == 0
683  */
684 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
685 {
686         struct ptlrpc_sec *sec;
687
688         LASSERT(req->rq_import);
689         LASSERT(req->rq_cli_ctx);
690         LASSERT(req->rq_cli_ctx->cc_sec);
691         LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
692
693         /* special security flags accoding to opcode */
694         switch (opcode) {
695         case OST_READ:
696                 req->rq_bulk_read = 1;
697                 break;
698         case OST_WRITE:
699                 req->rq_bulk_write = 1;
700                 break;
701         case SEC_CTX_INIT:
702                 req->rq_ctx_init = 1;
703                 break;
704         case SEC_CTX_FINI:
705                 req->rq_ctx_fini = 1;
706                 break;
707         case 0:
708                 /* init/fini rpc won't be resend, so can't be here */
709                 LASSERT(req->rq_ctx_init == 0);
710                 LASSERT(req->rq_ctx_fini == 0);
711
712                 /* cleanup flags, which should be recalculated */
713                 req->rq_pack_udesc = 0;
714                 req->rq_pack_bulk = 0;
715                 break;
716         }
717
718         sec = req->rq_cli_ctx->cc_sec;
719
720         spin_lock(&sec->ps_lock);
721         req->rq_flvr = sec->ps_flvr;
722         spin_unlock(&sec->ps_lock);
723
724         /* force SVC_NULL for context initiation rpc, SVC_INTG for context
725          * destruction rpc */
726         if (unlikely(req->rq_ctx_init))
727                 rpc_flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
728         else if (unlikely(req->rq_ctx_fini))
729                 rpc_flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
730
731         /* user descriptor flag, null security can't do it anyway */
732         if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
733             (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
734                 req->rq_pack_udesc = 1;
735
736         /* bulk security flag */
737         if ((req->rq_bulk_read || req->rq_bulk_write) &&
738             (req->rq_flvr.sf_bulk_ciph != BULK_CIPH_ALG_NULL ||
739              req->rq_flvr.sf_bulk_hash != BULK_HASH_ALG_NULL))
740                 req->rq_pack_bulk = 1;
741 }
742
743 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
744 {
745         if (RPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
746                 return;
747
748         LASSERT(req->rq_clrbuf);
749         if (req->rq_pool || !req->rq_reqbuf)
750                 return;
751
752         OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
753         req->rq_reqbuf = NULL;
754         req->rq_reqbuf_len = 0;
755 }
756
757 /*
758  * check whether current user have valid context for an import or not.
759  * might repeatedly try in case of non-fatal errors.
760  * return 0 on success, < 0 on failure
761  */
762 int sptlrpc_import_check_ctx(struct obd_import *imp)
763 {
764         struct ptlrpc_sec     *sec;
765         struct ptlrpc_cli_ctx *ctx;
766         struct ptlrpc_request *req = NULL;
767         int rc;
768         ENTRY;
769
770         might_sleep();
771
772         sec = sptlrpc_import_sec_ref(imp);
773         ctx = get_my_ctx(sec);
774         sptlrpc_sec_put(sec);
775
776         if (!ctx)
777                 RETURN(1);
778
779         if (cli_ctx_is_eternal(ctx) ||
780             ctx->cc_ops->validate(ctx) == 0) {
781                 sptlrpc_cli_ctx_put(ctx, 1);
782                 RETURN(0);
783         }
784
785         OBD_ALLOC_PTR(req);
786         if (!req)
787                 RETURN(-ENOMEM);
788
789         spin_lock_init(&req->rq_lock);
790         atomic_set(&req->rq_refcount, 10000);
791         CFS_INIT_LIST_HEAD(&req->rq_ctx_chain);
792         init_waitqueue_head(&req->rq_reply_waitq);
793         req->rq_import = imp;
794         req->rq_cli_ctx = ctx;
795
796         rc = sptlrpc_req_refresh_ctx(req, 0);
797         LASSERT(list_empty(&req->rq_ctx_chain));
798         sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
799         OBD_FREE_PTR(req);
800
801         RETURN(rc);
802 }
803
804 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
805 {
806         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
807         int rc = 0;
808         ENTRY;
809
810         LASSERT(ctx);
811         LASSERT(ctx->cc_sec);
812         LASSERT(req->rq_reqbuf || req->rq_clrbuf);
813
814         /* we wrap bulk request here because now we can be sure
815          * the context is uptodate.
816          */
817         if (req->rq_bulk) {
818                 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
819                 if (rc)
820                         RETURN(rc);
821         }
822
823         switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
824         case SPTLRPC_SVC_NULL:
825         case SPTLRPC_SVC_AUTH:
826         case SPTLRPC_SVC_INTG:
827                 LASSERT(ctx->cc_ops->sign);
828                 rc = ctx->cc_ops->sign(ctx, req);
829                 break;
830         case SPTLRPC_SVC_PRIV:
831                 LASSERT(ctx->cc_ops->seal);
832                 rc = ctx->cc_ops->seal(ctx, req);
833                 break;
834         default:
835                 LBUG();
836         }
837
838         if (rc == 0) {
839                 LASSERT(req->rq_reqdata_len);
840                 LASSERT(req->rq_reqdata_len % 8 == 0);
841                 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
842         }
843
844         RETURN(rc);
845 }
846
847 /*
848  * rq_nob_received is the actual received data length
849  */
850 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
851 {
852         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
853         int                    rc;
854         __u16                  rpc_flvr;
855         ENTRY;
856
857         LASSERT(ctx);
858         LASSERT(ctx->cc_sec);
859         LASSERT(ctx->cc_ops);
860         LASSERT(req->rq_repbuf);
861
862         req->rq_repdata_len = req->rq_nob_received;
863
864         if (req->rq_nob_received < sizeof(struct lustre_msg)) {
865                 CERROR("replied data length %d too small\n",
866                        req->rq_nob_received);
867                 RETURN(-EPROTO);
868         }
869
870
871         /*
872          * v2 message, check request/reply policy match
873          */
874         rpc_flvr = WIRE_FLVR_RPC(req->rq_repbuf->lm_secflvr);
875
876         if (req->rq_repbuf->lm_magic == LUSTRE_MSG_MAGIC_V2_SWABBED)
877                 __swab16s(&rpc_flvr);
878
879         if (RPC_FLVR_POLICY(rpc_flvr) !=
880                 RPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
881                 CERROR("request policy was %u while reply with %u\n",
882                         RPC_FLVR_POLICY(req->rq_flvr.sf_rpc),
883                         RPC_FLVR_POLICY(rpc_flvr));
884                 RETURN(-EPROTO);
885         }
886
887         /* do nothing if it's null policy; otherwise unpack the
888          * wrapper message
889          */
890         if (RPC_FLVR_POLICY(rpc_flvr) != SPTLRPC_POLICY_NULL &&
891             lustre_unpack_msg(req->rq_repbuf, req->rq_nob_received))
892                 RETURN(-EPROTO);
893
894         switch (RPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
895         case SPTLRPC_SVC_NULL:
896         case SPTLRPC_SVC_AUTH:
897         case SPTLRPC_SVC_INTG:
898                 LASSERT(ctx->cc_ops->verify);
899                 rc = ctx->cc_ops->verify(ctx, req);
900                 break;
901         case SPTLRPC_SVC_PRIV:
902                 LASSERT(ctx->cc_ops->unseal);
903                 rc = ctx->cc_ops->unseal(ctx, req);
904                 break;
905         default:
906                 LBUG();
907         }
908
909         LASSERT(rc || req->rq_repmsg || req->rq_resend);
910         RETURN(rc);
911 }
912
913 /**************************************************
914  * sec ID                                         *
915  **************************************************/
916
917 /*
918  * "fixed" sec (e.g. null) use sec_id < 0
919  */
920 static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
921
922 int sptlrpc_get_next_secid(void)
923 {
924         return atomic_inc_return(&sptlrpc_sec_id);
925 }
926 EXPORT_SYMBOL(sptlrpc_get_next_secid);
927
928 /**************************************************
929  * client side high-level security APIs           *
930  **************************************************/
931
932 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
933                                    int grace, int force)
934 {
935         struct ptlrpc_sec_policy *policy = sec->ps_policy;
936
937         LASSERT(policy->sp_cops);
938         LASSERT(policy->sp_cops->flush_ctx_cache);
939
940         return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
941 }
942
943 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
944 {
945         struct ptlrpc_sec_policy *policy = sec->ps_policy;
946
947         LASSERT(atomic_read(&sec->ps_refcount) == 0);
948         LASSERT(atomic_read(&sec->ps_nctx) == 0);
949         LASSERT(policy->sp_cops->destroy_sec);
950
951         CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
952
953         policy->sp_cops->destroy_sec(sec);
954         sptlrpc_policy_put(policy);
955 }
956
957 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
958 {
959         sec_cop_destroy_sec(sec);
960 }
961 EXPORT_SYMBOL(sptlrpc_sec_destroy);
962
963 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
964 {
965         LASSERT(atomic_read(&sec->ps_refcount) > 0);
966
967         if (sec->ps_policy->sp_cops->kill_sec) {
968                 sec->ps_policy->sp_cops->kill_sec(sec);
969
970                 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
971         }
972 }
973
974 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
975 {
976         if (sec) {
977                 LASSERT(atomic_read(&sec->ps_refcount) > 0);
978                 atomic_inc(&sec->ps_refcount);
979         }
980
981         return sec;
982 }
983 EXPORT_SYMBOL(sptlrpc_sec_get);
984
985 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
986 {
987         if (sec) {
988                 LASSERT(atomic_read(&sec->ps_refcount) > 0);
989
990                 if (atomic_dec_and_test(&sec->ps_refcount)) {
991                         LASSERT(atomic_read(&sec->ps_nctx) == 0);
992
993                         sptlrpc_gc_del_sec(sec);
994                         sec_cop_destroy_sec(sec);
995                 }
996         }
997 }
998 EXPORT_SYMBOL(sptlrpc_sec_put);
999
1000 /*
1001  * it's policy module responsible for taking refrence of import
1002  */
1003 static
1004 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1005                                        struct ptlrpc_svc_ctx *svc_ctx,
1006                                        struct sptlrpc_flavor *sf,
1007                                        enum lustre_sec_part sp)
1008 {
1009         struct ptlrpc_sec_policy *policy;
1010         struct ptlrpc_sec        *sec;
1011         ENTRY;
1012
1013         if (svc_ctx) {
1014                 LASSERT(imp->imp_dlm_fake == 1);
1015
1016                 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1017                        imp->imp_obd->obd_type->typ_name,
1018                        imp->imp_obd->obd_name,
1019                        sptlrpc_rpcflavor2name(sf->sf_rpc));
1020
1021                 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1022                 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1023         } else {
1024                 LASSERT(imp->imp_dlm_fake == 0);
1025
1026                 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1027                        imp->imp_obd->obd_type->typ_name,
1028                        imp->imp_obd->obd_name,
1029                        sptlrpc_rpcflavor2name(sf->sf_rpc));
1030
1031                 policy = sptlrpc_rpcflavor2policy(sf->sf_rpc);
1032                 if (!policy) {
1033                         CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1034                         RETURN(NULL);
1035                 }
1036         }
1037
1038         sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1039         if (sec) {
1040                 atomic_inc(&sec->ps_refcount);
1041
1042                 sec->ps_part = sp;
1043
1044                 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1045                         sptlrpc_gc_add_sec(sec);
1046         } else {
1047                 sptlrpc_policy_put(policy);
1048         }
1049
1050         RETURN(sec);
1051 }
1052
1053 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1054 {
1055         struct ptlrpc_sec *sec;
1056
1057         spin_lock(&imp->imp_lock);
1058         sec = sptlrpc_sec_get(imp->imp_sec);
1059         spin_unlock(&imp->imp_lock);
1060
1061         return sec;
1062 }
1063 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1064
1065 static void sptlrpc_import_sec_install(struct obd_import *imp,
1066                                        struct ptlrpc_sec *sec)
1067 {
1068         struct ptlrpc_sec *old_sec;
1069
1070         LASSERT(atomic_read(&sec->ps_refcount) > 0);
1071
1072         spin_lock(&imp->imp_lock);
1073         old_sec = imp->imp_sec;
1074         imp->imp_sec = sec;
1075         spin_unlock(&imp->imp_lock);
1076
1077         if (old_sec) {
1078                 sptlrpc_sec_kill(old_sec);
1079
1080                 /* balance the ref taken by this import */
1081                 sptlrpc_sec_put(old_sec);
1082         }
1083 }
1084
1085 static void sptlrpc_import_sec_adapt_inplace(struct obd_import *imp,
1086                                              struct ptlrpc_sec *sec,
1087                                              struct sptlrpc_flavor *sf)
1088 {
1089         if (sf->sf_bulk_ciph != sec->ps_flvr.sf_bulk_ciph ||
1090             sf->sf_bulk_hash != sec->ps_flvr.sf_bulk_hash) {
1091                 CWARN("imp %p (%s->%s): changing bulk flavor %s/%s -> %s/%s\n",
1092                       imp, imp->imp_obd->obd_name,
1093                       obd_uuid2str(&imp->imp_connection->c_remote_uuid),
1094                       sptlrpc_get_ciph_name(sec->ps_flvr.sf_bulk_ciph),
1095                       sptlrpc_get_hash_name(sec->ps_flvr.sf_bulk_hash),
1096                       sptlrpc_get_ciph_name(sf->sf_bulk_ciph),
1097                       sptlrpc_get_hash_name(sf->sf_bulk_hash));
1098
1099                 spin_lock(&sec->ps_lock);
1100                 sec->ps_flvr.sf_bulk_ciph = sf->sf_bulk_ciph;
1101                 sec->ps_flvr.sf_bulk_hash = sf->sf_bulk_hash;
1102                 spin_unlock(&sec->ps_lock);
1103         }
1104
1105         if (!equi(sf->sf_flags & PTLRPC_SEC_FL_UDESC,
1106                   sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC)) {
1107                 CWARN("imp %p (%s->%s): %s shipping user descriptor\n",
1108                       imp, imp->imp_obd->obd_name,
1109                       obd_uuid2str(&imp->imp_connection->c_remote_uuid),
1110                       (sf->sf_flags & PTLRPC_SEC_FL_UDESC) ? "start" : "stop");
1111
1112                 spin_lock(&sec->ps_lock);
1113                 sec->ps_flvr.sf_flags &= ~PTLRPC_SEC_FL_UDESC;
1114                 sec->ps_flvr.sf_flags |= sf->sf_flags & PTLRPC_SEC_FL_UDESC;
1115                 spin_unlock(&sec->ps_lock);
1116         }
1117 }
1118
1119 /*
1120  * for normal import, @svc_ctx should be NULL and @rpc_flavor is ignored;
1121  * for reverse import, @svc_ctx and @rpc_flavor is from incoming request.
1122  */
1123 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1124                              struct ptlrpc_svc_ctx *svc_ctx,
1125                              __u16 rpc_flavor)
1126 {
1127         struct ptlrpc_connection   *conn;
1128         struct sptlrpc_flavor       sf;
1129         struct ptlrpc_sec          *sec, *newsec;
1130         enum lustre_sec_part        sp;
1131         int                         rc;
1132
1133         if (imp == NULL)
1134                 return 0;
1135
1136         conn = imp->imp_connection;
1137
1138         if (svc_ctx == NULL) {
1139                 /* normal import, determine flavor from rule set */
1140                 sptlrpc_rule_set_choose(&imp->imp_obd->u.cli.cl_sptlrpc_rset,
1141                                         LUSTRE_SP_ANY, conn->c_self, &sf);
1142
1143                 sp = imp->imp_obd->u.cli.cl_sec_part;
1144         } else {
1145                 /* reverse import, determine flavor from incoming reqeust */
1146                 sf.sf_rpc = rpc_flavor;
1147                 sf.sf_bulk_ciph = BULK_CIPH_ALG_NULL;
1148                 sf.sf_bulk_hash = BULK_HASH_ALG_NULL;
1149                 sf.sf_flags = PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1150
1151                 sp = sptlrpc_target_sec_part(imp->imp_obd);
1152         }
1153
1154         sec = sptlrpc_import_sec_ref(imp);
1155         if (sec) {
1156                 if (svc_ctx == NULL) {
1157                         /* normal import, only check rpc flavor, if just bulk
1158                          * flavor or flags changed, we can handle it on the fly
1159                          * without switching sec. */
1160                         if (sf.sf_rpc == sec->ps_flvr.sf_rpc) {
1161                                 sptlrpc_import_sec_adapt_inplace(imp, sec, &sf);
1162
1163                                 rc = 0;
1164                                 goto out;
1165                         }
1166                 } else {
1167                         /* reverse import, do not compare bulk flavor */
1168                         if (sf.sf_rpc == sec->ps_flvr.sf_rpc) {
1169                                 rc = 0;
1170                                 goto out;
1171                         }
1172                 }
1173
1174                 CWARN("%simport %p (%s%s%s): changing flavor "
1175                       "(%s, %s/%s) -> (%s, %s/%s)\n",
1176                       svc_ctx ? "reverse " : "",
1177                       imp, imp->imp_obd->obd_name,
1178                       svc_ctx == NULL ? "->" : "<-",
1179                       obd_uuid2str(&conn->c_remote_uuid),
1180                       sptlrpc_rpcflavor2name(sec->ps_flvr.sf_rpc),
1181                       sptlrpc_get_hash_name(sec->ps_flvr.sf_bulk_hash),
1182                       sptlrpc_get_ciph_name(sec->ps_flvr.sf_bulk_ciph),
1183                       sptlrpc_rpcflavor2name(sf.sf_rpc),
1184                       sptlrpc_get_hash_name(sf.sf_bulk_hash),
1185                       sptlrpc_get_ciph_name(sf.sf_bulk_ciph));
1186         } else {
1187                 CWARN("%simport %p (%s%s%s) netid %x: "
1188                       "select initial flavor (%s, %s/%s)\n",
1189                       svc_ctx == NULL ? "" : "reverse ",
1190                       imp, imp->imp_obd->obd_name,
1191                       svc_ctx == NULL ? "->" : "<-",
1192                       obd_uuid2str(&conn->c_remote_uuid),
1193                       LNET_NIDNET(conn->c_self),
1194                       sptlrpc_rpcflavor2name(sf.sf_rpc),
1195                       sptlrpc_get_hash_name(sf.sf_bulk_hash),
1196                       sptlrpc_get_ciph_name(sf.sf_bulk_ciph));
1197         }
1198
1199         mutex_down(&imp->imp_sec_mutex);
1200
1201         newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1202         if (newsec) {
1203                 sptlrpc_import_sec_install(imp, newsec);
1204                 rc = 0;
1205         } else {
1206                 CERROR("%simport %p (%s): failed to create new sec\n",
1207                        svc_ctx == NULL ? "" : "reverse ",
1208                        imp, obd_uuid2str(&conn->c_remote_uuid));
1209                 rc = -EPERM;
1210         }
1211
1212         mutex_up(&imp->imp_sec_mutex);
1213
1214 out:
1215         sptlrpc_sec_put(sec);
1216         return 0;
1217 }
1218
1219 void sptlrpc_import_sec_put(struct obd_import *imp)
1220 {
1221         if (imp->imp_sec) {
1222                 sptlrpc_sec_kill(imp->imp_sec);
1223
1224                 sptlrpc_sec_put(imp->imp_sec);
1225                 imp->imp_sec = NULL;
1226         }
1227 }
1228
1229 static void import_flush_ctx_common(struct obd_import *imp,
1230                                     uid_t uid, int grace, int force)
1231 {
1232         struct ptlrpc_sec *sec;
1233
1234         if (imp == NULL)
1235                 return;
1236
1237         sec = sptlrpc_import_sec_ref(imp);
1238         if (sec == NULL)
1239                 return;
1240
1241         sec_cop_flush_ctx_cache(sec, uid, grace, force);
1242         sptlrpc_sec_put(sec);
1243 }
1244
1245 void sptlrpc_import_inval_all_ctx(struct obd_import *imp)
1246 {
1247         /* use grace == 0 */
1248         import_flush_ctx_common(imp, -1, 0, 1);
1249 }
1250
1251 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1252 {
1253         /* it's important to use grace mode, see explain in
1254          * sptlrpc_req_refresh_ctx() */
1255         import_flush_ctx_common(imp, 0, 1, 1);
1256 }
1257
1258 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1259 {
1260         import_flush_ctx_common(imp, cfs_current()->uid, 1, 1);
1261 }
1262 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1263
1264 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1265 {
1266         import_flush_ctx_common(imp, -1, 1, 1);
1267 }
1268 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1269
1270 /*
1271  * when complete successfully, req->rq_reqmsg should point to the
1272  * right place.
1273  */
1274 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1275 {
1276         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1277         struct ptlrpc_sec_policy *policy;
1278         int rc;
1279
1280         LASSERT(ctx);
1281         LASSERT(atomic_read(&ctx->cc_refcount));
1282         LASSERT(ctx->cc_sec);
1283         LASSERT(ctx->cc_sec->ps_policy);
1284         LASSERT(req->rq_reqmsg == NULL);
1285
1286         policy = ctx->cc_sec->ps_policy;
1287         rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1288         if (!rc) {
1289                 LASSERT(req->rq_reqmsg);
1290                 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1291
1292                 /* zeroing preallocated buffer */
1293                 if (req->rq_pool)
1294                         memset(req->rq_reqmsg, 0, msgsize);
1295         }
1296
1297         return rc;
1298 }
1299
1300 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1301 {
1302         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1303         struct ptlrpc_sec_policy *policy;
1304
1305         LASSERT(ctx);
1306         LASSERT(atomic_read(&ctx->cc_refcount));
1307         LASSERT(ctx->cc_sec);
1308         LASSERT(ctx->cc_sec->ps_policy);
1309
1310         if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1311                 return;
1312
1313         policy = ctx->cc_sec->ps_policy;
1314         policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1315 }
1316
1317 /*
1318  * NOTE caller must guarantee the buffer size is enough for the enlargement
1319  */
1320 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1321                                   int segment, int newsize)
1322 {
1323         void   *src, *dst;
1324         int     oldsize, oldmsg_size, movesize;
1325
1326         LASSERT(segment < msg->lm_bufcount);
1327         LASSERT(msg->lm_buflens[segment] <= newsize);
1328
1329         if (msg->lm_buflens[segment] == newsize)
1330                 return;
1331
1332         /* nothing to do if we are enlarging the last segment */
1333         if (segment == msg->lm_bufcount - 1) {
1334                 msg->lm_buflens[segment] = newsize;
1335                 return;
1336         }
1337
1338         oldsize = msg->lm_buflens[segment];
1339
1340         src = lustre_msg_buf(msg, segment + 1, 0);
1341         msg->lm_buflens[segment] = newsize;
1342         dst = lustre_msg_buf(msg, segment + 1, 0);
1343         msg->lm_buflens[segment] = oldsize;
1344
1345         /* move from segment + 1 to end segment */
1346         LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1347         oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1348         movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1349         LASSERT(movesize >= 0);
1350
1351         if (movesize)
1352                 memmove(dst, src, movesize);
1353
1354         /* note we don't clear the ares where old data live, not secret */
1355
1356         /* finally set new segment size */
1357         msg->lm_buflens[segment] = newsize;
1358 }
1359 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1360
1361 /*
1362  * enlarge @segment of upper message req->rq_reqmsg to @newsize, all data
1363  * will be preserved after enlargement. this must be called after rq_reqmsg has
1364  * been intialized at least.
1365  *
1366  * caller's attention: upon return, rq_reqmsg and rq_reqlen might have
1367  * been changed.
1368  */
1369 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1370                                int segment, int newsize)
1371 {
1372         struct ptlrpc_cli_ctx    *ctx = req->rq_cli_ctx;
1373         struct ptlrpc_sec_cops   *cops;
1374         struct lustre_msg        *msg = req->rq_reqmsg;
1375
1376         LASSERT(ctx);
1377         LASSERT(msg);
1378         LASSERT(msg->lm_bufcount > segment);
1379         LASSERT(msg->lm_buflens[segment] <= newsize);
1380
1381         if (msg->lm_buflens[segment] == newsize)
1382                 return 0;
1383
1384         cops = ctx->cc_sec->ps_policy->sp_cops;
1385         LASSERT(cops->enlarge_reqbuf);
1386         return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1387 }
1388 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1389
1390 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1391 {
1392         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1393         struct ptlrpc_sec_policy *policy;
1394         ENTRY;
1395
1396         LASSERT(ctx);
1397         LASSERT(atomic_read(&ctx->cc_refcount));
1398         LASSERT(ctx->cc_sec);
1399         LASSERT(ctx->cc_sec->ps_policy);
1400
1401         if (req->rq_repbuf)
1402                 RETURN(0);
1403
1404         policy = ctx->cc_sec->ps_policy;
1405         RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1406 }
1407
1408 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1409 {
1410         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1411         struct ptlrpc_sec_policy *policy;
1412         ENTRY;
1413
1414         LASSERT(ctx);
1415         LASSERT(atomic_read(&ctx->cc_refcount));
1416         LASSERT(ctx->cc_sec);
1417         LASSERT(ctx->cc_sec->ps_policy);
1418
1419         if (req->rq_repbuf == NULL)
1420                 return;
1421         LASSERT(req->rq_repbuf_len);
1422
1423         policy = ctx->cc_sec->ps_policy;
1424         policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1425         EXIT;
1426 }
1427
1428 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1429                                 struct ptlrpc_cli_ctx *ctx)
1430 {
1431         struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1432
1433         if (!policy->sp_cops->install_rctx)
1434                 return 0;
1435         return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1436 }
1437
1438 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1439                                 struct ptlrpc_svc_ctx *ctx)
1440 {
1441         struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1442
1443         if (!policy->sp_sops->install_rctx)
1444                 return 0;
1445         return policy->sp_sops->install_rctx(imp, ctx);
1446 }
1447
1448 /****************************************
1449  * server side security                 *
1450  ****************************************/
1451
1452 static int flavor_allowed(struct sptlrpc_flavor *exp,
1453                           struct ptlrpc_request *req)
1454 {
1455         struct sptlrpc_flavor *flvr = &req->rq_flvr;
1456
1457         if (exp->sf_rpc == flvr->sf_rpc)
1458                 return 1;
1459
1460         if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1461             RPC_FLVR_POLICY(exp->sf_rpc) == RPC_FLVR_POLICY(flvr->sf_rpc) &&
1462             RPC_FLVR_MECH(exp->sf_rpc) == RPC_FLVR_MECH(flvr->sf_rpc))
1463                 return 1;
1464
1465         return 0;
1466 }
1467
1468 #define EXP_FLVR_UPDATE_EXPIRE      (OBD_TIMEOUT_DEFAULT + 10)
1469
1470 int sptlrpc_target_export_check(struct obd_export *exp,
1471                                 struct ptlrpc_request *req)
1472 {
1473         struct sptlrpc_flavor   flavor;
1474
1475         if (exp == NULL)
1476                 return 0;
1477
1478         /* client side export has no imp_reverse, skip
1479          * FIXME maybe we should check flavor this as well??? */
1480         if (exp->exp_imp_reverse == NULL)
1481                 return 0;
1482
1483         /* don't care about ctx fini rpc */
1484         if (req->rq_ctx_fini)
1485                 return 0;
1486
1487         spin_lock(&exp->exp_lock);
1488
1489         /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1490          * the first req with the new flavor, then treat it as current flavor,
1491          * adapt reverse sec according to it.
1492          * note the first rpc with new flavor might not be with root ctx, in
1493          * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */
1494         if (unlikely(exp->exp_flvr_changed) &&
1495             flavor_allowed(&exp->exp_flvr_old[1], req)) {
1496                 /* make the new flavor as "current", and old ones as
1497                  * about-to-expire */
1498                 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1499                        exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1500                 flavor = exp->exp_flvr_old[1];
1501                 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1502                 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1503                 exp->exp_flvr_old[0] = exp->exp_flvr;
1504                 exp->exp_flvr_expire[0] = cfs_time_current_sec() +
1505                                           EXP_FLVR_UPDATE_EXPIRE;
1506                 exp->exp_flvr = flavor;
1507
1508                 /* flavor change finished */
1509                 exp->exp_flvr_changed = 0;
1510                 LASSERT(exp->exp_flvr_adapt == 1);
1511
1512                 /* if it's gss, we only interested in root ctx init */
1513                 if (req->rq_auth_gss &&
1514                     !(req->rq_ctx_init && (req->rq_auth_usr_root ||
1515                                            req->rq_auth_usr_mdt))) {
1516                         spin_unlock(&exp->exp_lock);
1517                         CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d)\n",
1518                                req->rq_auth_gss, req->rq_ctx_init,
1519                                req->rq_auth_usr_root, req->rq_auth_usr_mdt);
1520                         return 0;
1521                 }
1522
1523                 exp->exp_flvr_adapt = 0;
1524                 spin_unlock(&exp->exp_lock);
1525
1526                 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1527                                                 req->rq_svc_ctx, flavor.sf_rpc);
1528         }
1529
1530         /* if it equals to the current flavor, we accept it, but need to
1531          * dealing with reverse sec/ctx */
1532         if (likely(flavor_allowed(&exp->exp_flvr, req))) {
1533                 /* most cases should return here, we only interested in
1534                  * gss root ctx init */
1535                 if (!req->rq_auth_gss || !req->rq_ctx_init ||
1536                     (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt)) {
1537                         spin_unlock(&exp->exp_lock);
1538                         return 0;
1539                 }
1540
1541                 /* if flavor just changed, we should not proceed, just leave
1542                  * it and current flavor will be discovered and replaced
1543                  * shortly, and let _this_ rpc pass through */
1544                 if (exp->exp_flvr_changed) {
1545                         LASSERT(exp->exp_flvr_adapt);
1546                         spin_unlock(&exp->exp_lock);
1547                         return 0;
1548                 }
1549
1550                 if (exp->exp_flvr_adapt) {
1551                         exp->exp_flvr_adapt = 0;
1552                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
1553                                exp, exp->exp_flvr.sf_rpc,
1554                                exp->exp_flvr_old[0].sf_rpc,
1555                                exp->exp_flvr_old[1].sf_rpc);
1556                         flavor = exp->exp_flvr;
1557                         spin_unlock(&exp->exp_lock);
1558
1559                         return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1560                                                         req->rq_svc_ctx,
1561                                                         flavor.sf_rpc);
1562                 } else {
1563                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
1564                                "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
1565                                exp->exp_flvr_old[0].sf_rpc,
1566                                exp->exp_flvr_old[1].sf_rpc);
1567                         spin_unlock(&exp->exp_lock);
1568
1569                         return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
1570                                                            req->rq_svc_ctx);
1571                 }
1572         }
1573
1574         if (exp->exp_flvr_expire[0]) {
1575                 if (exp->exp_flvr_expire[0] >= cfs_time_current_sec()) {
1576                         if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
1577                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1578                                        "middle one ("CFS_DURATION_T")\n", exp,
1579                                        exp->exp_flvr.sf_rpc,
1580                                        exp->exp_flvr_old[0].sf_rpc,
1581                                        exp->exp_flvr_old[1].sf_rpc,
1582                                        exp->exp_flvr_expire[0] -
1583                                                 cfs_time_current_sec());
1584                                 spin_unlock(&exp->exp_lock);
1585                                 return 0;
1586                         }
1587                 } else {
1588                         CDEBUG(D_SEC, "mark middle expired\n");
1589                         exp->exp_flvr_expire[0] = 0;
1590                 }
1591                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
1592                        exp->exp_flvr.sf_rpc,
1593                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1594                        req->rq_flvr.sf_rpc);
1595         }
1596
1597         /* now it doesn't match the current flavor, the only chance we can
1598          * accept it is match the old flavors which is not expired. */
1599         if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
1600                 if (exp->exp_flvr_expire[1] >= cfs_time_current_sec()) {
1601                         if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
1602                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1603                                        "oldest one ("CFS_DURATION_T")\n", exp,
1604                                        exp->exp_flvr.sf_rpc,
1605                                        exp->exp_flvr_old[0].sf_rpc,
1606                                        exp->exp_flvr_old[1].sf_rpc,
1607                                        exp->exp_flvr_expire[1] -
1608                                                 cfs_time_current_sec());
1609                                 spin_unlock(&exp->exp_lock);
1610                                 return 0;
1611                         }
1612                 } else {
1613                         CDEBUG(D_SEC, "mark oldest expired\n");
1614                         exp->exp_flvr_expire[1] = 0;
1615                 }
1616                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
1617                        exp, exp->exp_flvr.sf_rpc,
1618                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1619                        req->rq_flvr.sf_rpc);
1620         } else {
1621                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
1622                        exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
1623                        exp->exp_flvr_old[1].sf_rpc);
1624         }
1625
1626         spin_unlock(&exp->exp_lock);
1627
1628         CWARN("req %p: (%u|%u|%u|%u|%u) with unauthorized flavor %x\n",
1629               req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
1630               req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_flvr.sf_rpc);
1631         return -EACCES;
1632 }
1633
1634 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
1635                                       struct sptlrpc_rule_set *rset)
1636 {
1637         struct obd_export       *exp;
1638         struct sptlrpc_flavor    new_flvr;
1639
1640         LASSERT(obd);
1641
1642         spin_lock(&obd->obd_dev_lock);
1643
1644         list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
1645                 if (exp->exp_connection == NULL)
1646                         continue;
1647
1648                 /* note if this export had just been updated flavor
1649                  * (exp_flvr_changed == 1), this will override the
1650                  * previous one. */
1651                 spin_lock(&exp->exp_lock);
1652                 sptlrpc_rule_set_choose(rset, exp->exp_sp_peer,
1653                                         exp->exp_connection->c_peer.nid,
1654                                         &new_flvr);
1655                 if (exp->exp_flvr_changed ||
1656                     memcmp(&new_flvr, &exp->exp_flvr, sizeof(new_flvr))) {
1657                         exp->exp_flvr_old[1] = new_flvr;
1658                         exp->exp_flvr_expire[1] = 0;
1659                         exp->exp_flvr_changed = 1;
1660                         exp->exp_flvr_adapt = 1;
1661                         CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
1662                                exp, sptlrpc_part2name(exp->exp_sp_peer),
1663                                exp->exp_flvr.sf_rpc,
1664                                exp->exp_flvr_old[1].sf_rpc);
1665                 }
1666                 spin_unlock(&exp->exp_lock);
1667         }
1668
1669         spin_unlock(&obd->obd_dev_lock);
1670 }
1671 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
1672
1673 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
1674 {
1675         if (svc_rc == SECSVC_DROP)
1676                 return SECSVC_DROP;
1677
1678         switch (req->rq_sp_from) {
1679         case LUSTRE_SP_CLI:
1680         case LUSTRE_SP_MDT:
1681         case LUSTRE_SP_OST:
1682         case LUSTRE_SP_MGS:
1683         case LUSTRE_SP_ANY:
1684                 break;
1685         default:
1686                 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
1687                 return SECSVC_DROP;
1688         }
1689
1690         if (!req->rq_auth_gss)
1691                 return svc_rc;
1692
1693         if (unlikely(req->rq_sp_from == LUSTRE_SP_ANY)) {
1694                 CERROR("not specific part\n");
1695                 return SECSVC_DROP;
1696         }
1697
1698         /* from MDT, must be authenticated as MDT */
1699         if (unlikely(req->rq_sp_from == LUSTRE_SP_MDT &&
1700                      !req->rq_auth_usr_mdt)) {
1701                 DEBUG_REQ(D_ERROR, req, "fake source MDT");
1702                 return SECSVC_DROP;
1703         }
1704
1705         /* from OST, must be callback to MDT and CLI, the reverse sec
1706          * was from mdt/root keytab, so it should be MDT or root FIXME */
1707         if (unlikely(req->rq_sp_from == LUSTRE_SP_OST &&
1708                      !req->rq_auth_usr_mdt && !req->rq_auth_usr_root)) {
1709                 DEBUG_REQ(D_ERROR, req, "fake source OST");
1710                 return SECSVC_DROP;
1711         }
1712
1713         return svc_rc;
1714 }
1715
1716 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
1717 {
1718         struct ptlrpc_sec_policy *policy;
1719         struct lustre_msg *msg = req->rq_reqbuf;
1720         int rc;
1721         ENTRY;
1722
1723         LASSERT(msg);
1724         LASSERT(req->rq_reqmsg == NULL);
1725         LASSERT(req->rq_repmsg == NULL);
1726
1727         req->rq_sp_from = LUSTRE_SP_ANY;
1728         req->rq_auth_uid = INVALID_UID;
1729         req->rq_auth_mapped_uid = INVALID_UID;
1730
1731         if (req->rq_reqdata_len < sizeof(struct lustre_msg)) {
1732                 CERROR("request size %d too small\n", req->rq_reqdata_len);
1733                 RETURN(SECSVC_DROP);
1734         }
1735
1736         /*
1737          * v2 message.
1738          */
1739         if (msg->lm_magic == LUSTRE_MSG_MAGIC_V2)
1740                 req->rq_flvr.sf_rpc = WIRE_FLVR_RPC(msg->lm_secflvr);
1741         else
1742                 req->rq_flvr.sf_rpc = WIRE_FLVR_RPC(__swab32(msg->lm_secflvr));
1743
1744         /* unpack the wrapper message if the policy is not null */
1745         if ((RPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) &&
1746              lustre_unpack_msg(msg, req->rq_reqdata_len))
1747                 RETURN(SECSVC_DROP);
1748
1749         policy = sptlrpc_rpcflavor2policy(req->rq_flvr.sf_rpc);
1750         if (!policy) {
1751                 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
1752                 RETURN(SECSVC_DROP);
1753         }
1754
1755         LASSERT(policy->sp_sops->accept);
1756         rc = policy->sp_sops->accept(req);
1757
1758         LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
1759         sptlrpc_policy_put(policy);
1760
1761         /* sanity check for the request source */
1762         rc = sptlrpc_svc_check_from(req, rc);
1763
1764         /* FIXME move to proper place */
1765         if (rc == SECSVC_OK) {
1766                 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
1767
1768                 if (opc == OST_WRITE)
1769                         req->rq_bulk_write = 1;
1770                 else if (opc == OST_READ)
1771                         req->rq_bulk_read = 1;
1772         }
1773
1774         LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
1775         RETURN(rc);
1776 }
1777
1778 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req,
1779                          int msglen)
1780 {
1781         struct ptlrpc_sec_policy *policy;
1782         struct ptlrpc_reply_state *rs;
1783         int rc;
1784         ENTRY;
1785
1786         LASSERT(req->rq_svc_ctx);
1787         LASSERT(req->rq_svc_ctx->sc_policy);
1788
1789         policy = req->rq_svc_ctx->sc_policy;
1790         LASSERT(policy->sp_sops->alloc_rs);
1791
1792         rc = policy->sp_sops->alloc_rs(req, msglen);
1793         if (unlikely(rc == -ENOMEM)) {
1794                 /* failed alloc, try emergency pool */
1795                 rs = lustre_get_emerg_rs(req->rq_rqbd->rqbd_service);
1796                 if (rs == NULL)
1797                         RETURN(-ENOMEM);
1798
1799                 req->rq_reply_state = rs;
1800                 rc = policy->sp_sops->alloc_rs(req, msglen);
1801                 if (rc) {
1802                         lustre_put_emerg_rs(rs);
1803                         req->rq_reply_state = NULL;
1804                 }
1805         }
1806
1807         LASSERT(rc != 0 ||
1808                 (req->rq_reply_state && req->rq_reply_state->rs_msg));
1809
1810         RETURN(rc);
1811 }
1812
1813 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
1814 {
1815         struct ptlrpc_sec_policy *policy;
1816         int rc;
1817         ENTRY;
1818
1819         LASSERT(req->rq_svc_ctx);
1820         LASSERT(req->rq_svc_ctx->sc_policy);
1821
1822         policy = req->rq_svc_ctx->sc_policy;
1823         LASSERT(policy->sp_sops->authorize);
1824
1825         rc = policy->sp_sops->authorize(req);
1826         LASSERT(rc || req->rq_reply_state->rs_repdata_len);
1827
1828         RETURN(rc);
1829 }
1830
1831 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
1832 {
1833         struct ptlrpc_sec_policy *policy;
1834         unsigned int prealloc;
1835         ENTRY;
1836
1837         LASSERT(rs->rs_svc_ctx);
1838         LASSERT(rs->rs_svc_ctx->sc_policy);
1839
1840         policy = rs->rs_svc_ctx->sc_policy;
1841         LASSERT(policy->sp_sops->free_rs);
1842
1843         prealloc = rs->rs_prealloc;
1844         policy->sp_sops->free_rs(rs);
1845
1846         if (prealloc)
1847                 lustre_put_emerg_rs(rs);
1848         EXIT;
1849 }
1850
1851 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
1852 {
1853         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
1854
1855         if (ctx == NULL)
1856                 return;
1857
1858         LASSERT(atomic_read(&ctx->sc_refcount) > 0);
1859         atomic_inc(&ctx->sc_refcount);
1860 }
1861
1862 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
1863 {
1864         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
1865
1866         if (ctx == NULL)
1867                 return;
1868
1869         LASSERT(atomic_read(&ctx->sc_refcount) > 0);
1870         if (atomic_dec_and_test(&ctx->sc_refcount)) {
1871                 if (ctx->sc_policy->sp_sops->free_ctx)
1872                         ctx->sc_policy->sp_sops->free_ctx(ctx);
1873         }
1874         req->rq_svc_ctx = NULL;
1875 }
1876
1877 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
1878 {
1879         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
1880
1881         if (ctx == NULL)
1882                 return;
1883
1884         LASSERT(atomic_read(&ctx->sc_refcount) > 0);
1885         if (ctx->sc_policy->sp_sops->invalidate_ctx)
1886                 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
1887 }
1888 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
1889
1890 /****************************************
1891  * bulk security                        *
1892  ****************************************/
1893
1894 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
1895                           struct ptlrpc_bulk_desc *desc)
1896 {
1897         struct ptlrpc_cli_ctx *ctx;
1898
1899         if (!req->rq_pack_bulk)
1900                 return 0;
1901
1902         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
1903
1904         ctx = req->rq_cli_ctx;
1905         if (ctx->cc_ops->wrap_bulk)
1906                 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
1907         return 0;
1908 }
1909 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
1910
1911 static
1912 void pga_to_bulk_desc(int nob, obd_count pg_count, struct brw_page **pga,
1913                       struct ptlrpc_bulk_desc *desc)
1914 {
1915         int i;
1916
1917         LASSERT(pga);
1918         LASSERT(*pga);
1919
1920         for (i = 0; i < pg_count && nob > 0; i++) {
1921 #ifdef __KERNEL__
1922                 desc->bd_iov[i].kiov_page = pga[i]->pg;
1923                 desc->bd_iov[i].kiov_len = pga[i]->count > nob ?
1924                                            nob : pga[i]->count;
1925                 desc->bd_iov[i].kiov_offset = pga[i]->off & ~CFS_PAGE_MASK;
1926 #else
1927 #warning FIXME for liblustre!
1928                 desc->bd_iov[i].iov_base = pga[i]->pg->addr;
1929                 desc->bd_iov[i].iov_len = pga[i]->count > nob ?
1930                                            nob : pga[i]->count;
1931 #endif
1932
1933                 desc->bd_iov_count++;
1934                 nob -= pga[i]->count;
1935         }
1936 }
1937
1938 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
1939                                  int nob, obd_count pg_count,
1940                                  struct brw_page **pga)
1941 {
1942         struct ptlrpc_bulk_desc *desc;
1943         struct ptlrpc_cli_ctx *ctx;
1944         int rc = 0;
1945
1946         if (!req->rq_pack_bulk)
1947                 return 0;
1948
1949         LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
1950
1951         OBD_ALLOC(desc, offsetof(struct ptlrpc_bulk_desc, bd_iov[pg_count]));
1952         if (desc == NULL) {
1953                 CERROR("out of memory, can't verify bulk read data\n");
1954                 return -ENOMEM;
1955         }
1956
1957         pga_to_bulk_desc(nob, pg_count, pga, desc);
1958
1959         ctx = req->rq_cli_ctx;
1960         if (ctx->cc_ops->unwrap_bulk)
1961                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
1962
1963         OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc, bd_iov[pg_count]));
1964
1965         return rc;
1966 }
1967 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
1968
1969 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
1970                                   struct ptlrpc_bulk_desc *desc)
1971 {
1972         struct ptlrpc_cli_ctx *ctx;
1973
1974         if (!req->rq_pack_bulk)
1975                 return 0;
1976
1977         LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
1978
1979         ctx = req->rq_cli_ctx;
1980         if (ctx->cc_ops->unwrap_bulk)
1981                 return ctx->cc_ops->unwrap_bulk(ctx, req, desc);
1982
1983         return 0;
1984 }
1985 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
1986
1987 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
1988                           struct ptlrpc_bulk_desc *desc)
1989 {
1990         struct ptlrpc_svc_ctx *ctx;
1991
1992         if (!req->rq_pack_bulk)
1993                 return 0;
1994
1995         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
1996
1997         ctx = req->rq_svc_ctx;
1998         if (ctx->sc_policy->sp_sops->wrap_bulk)
1999                 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2000
2001         return 0;
2002 }
2003 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2004
2005 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2006                             struct ptlrpc_bulk_desc *desc)
2007 {
2008         struct ptlrpc_svc_ctx *ctx;
2009
2010         if (!req->rq_pack_bulk)
2011                 return 0;
2012
2013         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2014
2015         ctx = req->rq_svc_ctx;
2016         if (ctx->sc_policy->sp_sops->unwrap_bulk);
2017                 return ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2018
2019         return 0;
2020 }
2021 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2022
2023
2024 /****************************************
2025  * user descriptor helpers              *
2026  ****************************************/
2027
2028 int sptlrpc_current_user_desc_size(void)
2029 {
2030         int ngroups;
2031
2032 #ifdef __KERNEL__
2033         ngroups = current_ngroups;
2034
2035         if (ngroups > LUSTRE_MAX_GROUPS)
2036                 ngroups = LUSTRE_MAX_GROUPS;
2037 #else
2038         ngroups = 0;
2039 #endif
2040         return sptlrpc_user_desc_size(ngroups);
2041 }
2042 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2043
2044 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2045 {
2046         struct ptlrpc_user_desc *pud;
2047
2048         pud = lustre_msg_buf(msg, offset, 0);
2049
2050         pud->pud_uid = cfs_current()->uid;
2051         pud->pud_gid = cfs_current()->gid;
2052         pud->pud_fsuid = cfs_current()->fsuid;
2053         pud->pud_fsgid = cfs_current()->fsgid;
2054         pud->pud_cap = cfs_current()->cap_effective;
2055         pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2056
2057 #ifdef __KERNEL__
2058         task_lock(current);
2059         if (pud->pud_ngroups > current_ngroups)
2060                 pud->pud_ngroups = current_ngroups;
2061         memcpy(pud->pud_groups, cfs_current()->group_info->blocks[0],
2062                pud->pud_ngroups * sizeof(__u32));
2063         task_unlock(current);
2064 #endif
2065
2066         return 0;
2067 }
2068 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2069
2070 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset)
2071 {
2072         struct ptlrpc_user_desc *pud;
2073         int                      i;
2074
2075         pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2076         if (!pud)
2077                 return -EINVAL;
2078
2079         if (lustre_msg_swabbed(msg)) {
2080                 __swab32s(&pud->pud_uid);
2081                 __swab32s(&pud->pud_gid);
2082                 __swab32s(&pud->pud_fsuid);
2083                 __swab32s(&pud->pud_fsgid);
2084                 __swab32s(&pud->pud_cap);
2085                 __swab32s(&pud->pud_ngroups);
2086         }
2087
2088         if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2089                 CERROR("%u groups is too large\n", pud->pud_ngroups);
2090                 return -EINVAL;
2091         }
2092
2093         if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2094             msg->lm_buflens[offset]) {
2095                 CERROR("%u groups are claimed but bufsize only %u\n",
2096                        pud->pud_ngroups, msg->lm_buflens[offset]);
2097                 return -EINVAL;
2098         }
2099
2100         if (lustre_msg_swabbed(msg)) {
2101                 for (i = 0; i < pud->pud_ngroups; i++)
2102                         __swab32s(&pud->pud_groups[i]);
2103         }
2104
2105         return 0;
2106 }
2107 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2108
2109 /****************************************
2110  * misc helpers                         *
2111  ****************************************/
2112
2113 const char * sec2target_str(struct ptlrpc_sec *sec)
2114 {
2115         if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2116                 return "*";
2117         if (sec_is_reverse(sec))
2118                 return "c";
2119         return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2120 }
2121 EXPORT_SYMBOL(sec2target_str);
2122
2123 /****************************************
2124  * crypto API helper/alloc blkciper     *
2125  ****************************************/
2126
2127 #ifdef __KERNEL__
2128 #ifndef HAVE_ASYNC_BLOCK_CIPHER
2129 struct ll_crypto_cipher *ll_crypto_alloc_blkcipher(const char * algname,
2130                                                    u32 type, u32 mask)
2131 {
2132         char        buf[CRYPTO_MAX_ALG_NAME + 1];
2133         const char *pan = algname;
2134         u32         flag = 0; 
2135
2136         if (strncmp("cbc(", algname, 4) == 0)
2137                 flag |= CRYPTO_TFM_MODE_CBC;
2138         else if (strncmp("ecb(", algname, 4) == 0)
2139                 flag |= CRYPTO_TFM_MODE_ECB;
2140         if (flag) {
2141                 char *vp = strnchr(algname, CRYPTO_MAX_ALG_NAME, ')');
2142                 if (vp) {
2143                         memcpy(buf, algname + 4, vp - algname - 4);
2144                         buf[vp - algname - 4] = '\0';
2145                         pan = buf;
2146                 } else {
2147                         flag = 0;
2148                 }
2149         }
2150         return crypto_alloc_tfm(pan, flag);
2151 }
2152 EXPORT_SYMBOL(ll_crypto_alloc_blkcipher);
2153 #endif
2154 #endif
2155
2156 /****************************************
2157  * initialize/finalize                  *
2158  ****************************************/
2159
2160 int __init sptlrpc_init(void)
2161 {
2162         int rc;
2163
2164         rc = sptlrpc_gc_start_thread();
2165         if (rc)
2166                 goto out;
2167
2168         rc = sptlrpc_enc_pool_init();
2169         if (rc)
2170                 goto out_gc;
2171
2172         rc = sptlrpc_null_init();
2173         if (rc)
2174                 goto out_pool;
2175
2176         rc = sptlrpc_plain_init();
2177         if (rc)
2178                 goto out_null;
2179
2180         rc = sptlrpc_lproc_init();
2181         if (rc)
2182                 goto out_plain;
2183
2184         return 0;
2185
2186 out_plain:
2187         sptlrpc_plain_fini();
2188 out_null:
2189         sptlrpc_null_fini();
2190 out_pool:
2191         sptlrpc_enc_pool_fini();
2192 out_gc:
2193         sptlrpc_gc_stop_thread();
2194 out:
2195         return rc;
2196 }
2197
2198 void __exit sptlrpc_fini(void)
2199 {
2200         sptlrpc_lproc_fini();
2201         sptlrpc_plain_fini();
2202         sptlrpc_null_fini();
2203         sptlrpc_enc_pool_fini();
2204         sptlrpc_gc_stop_thread();
2205 }