Whamcloud - gitweb
b=15534
[fs/lustre-release.git] / lustre / ptlrpc / sec.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/sec.c
37  *
38  * Author: Eric Mei <ericm@clusterfs.com>
39  */
40
41 #ifndef EXPORT_SYMTAB
42 #define EXPORT_SYMTAB
43 #endif
44 #define DEBUG_SUBSYSTEM S_SEC
45
46 #include <libcfs/libcfs.h>
47 #ifndef __KERNEL__
48 #include <liblustre.h>
49 #include <libcfs/list.h>
50 #else
51 #include <linux/crypto.h>
52 #include <linux/key.h>
53 #endif
54
55 #include <obd.h>
56 #include <obd_class.h>
57 #include <obd_support.h>
58 #include <lustre_net.h>
59 #include <lustre_import.h>
60 #include <lustre_dlm.h>
61 #include <lustre_sec.h>
62
63 #include "ptlrpc_internal.h"
64
65 /***********************************************
66  * policy registers                            *
67  ***********************************************/
68
69 static rwlock_t policy_lock;
70 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
71         NULL,
72 };
73
74 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
75 {
76         __u16 number = policy->sp_policy;
77
78         LASSERT(policy->sp_name);
79         LASSERT(policy->sp_cops);
80         LASSERT(policy->sp_sops);
81
82         if (number >= SPTLRPC_POLICY_MAX)
83                 return -EINVAL;
84
85         write_lock(&policy_lock);
86         if (unlikely(policies[number])) {
87                 write_unlock(&policy_lock);
88                 return -EALREADY;
89         }
90         policies[number] = policy;
91         write_unlock(&policy_lock);
92
93         CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
94         return 0;
95 }
96 EXPORT_SYMBOL(sptlrpc_register_policy);
97
98 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
99 {
100         __u16 number = policy->sp_policy;
101
102         LASSERT(number < SPTLRPC_POLICY_MAX);
103
104         write_lock(&policy_lock);
105         if (unlikely(policies[number] == NULL)) {
106                 write_unlock(&policy_lock);
107                 CERROR("%s: already unregistered\n", policy->sp_name);
108                 return -EINVAL;
109         }
110
111         LASSERT(policies[number] == policy);
112         policies[number] = NULL;
113         write_unlock(&policy_lock);
114
115         CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
116         return 0;
117 }
118 EXPORT_SYMBOL(sptlrpc_unregister_policy);
119
120 static
121 struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
122 {
123         static DECLARE_MUTEX(load_mutex);
124         static atomic_t           loaded = ATOMIC_INIT(0);
125         struct ptlrpc_sec_policy *policy;
126         __u16                     number = SPTLRPC_FLVR_POLICY(flavor);
127         __u16                     flag = 0;
128
129         if (number >= SPTLRPC_POLICY_MAX)
130                 return NULL;
131
132         while (1) {
133                 read_lock(&policy_lock);
134                 policy = policies[number];
135                 if (policy && !try_module_get(policy->sp_owner))
136                         policy = NULL;
137                 if (policy == NULL)
138                         flag = atomic_read(&loaded);
139                 read_unlock(&policy_lock);
140
141                 if (policy != NULL || flag != 0 ||
142                     number != SPTLRPC_POLICY_GSS)
143                         break;
144
145                 /* try to load gss module, once */
146                 mutex_down(&load_mutex);
147                 if (atomic_read(&loaded) == 0) {
148                         if (request_module("ptlrpc_gss") == 0)
149                                 CWARN("module ptlrpc_gss loaded on demand\n");
150                         else
151                                 CERROR("Unable to load module ptlrpc_gss\n");
152
153                         atomic_set(&loaded, 1);
154                 }
155                 mutex_up(&load_mutex);
156         }
157
158         return policy;
159 }
160
161 __u32 sptlrpc_name2flavor_base(const char *name)
162 {
163         if (!strcmp(name, "null"))
164                 return SPTLRPC_FLVR_NULL;
165         if (!strcmp(name, "plain"))
166                 return SPTLRPC_FLVR_PLAIN;
167         if (!strcmp(name, "krb5n"))
168                 return SPTLRPC_FLVR_KRB5N;
169         if (!strcmp(name, "krb5a"))
170                 return SPTLRPC_FLVR_KRB5A;
171         if (!strcmp(name, "krb5i"))
172                 return SPTLRPC_FLVR_KRB5I;
173         if (!strcmp(name, "krb5p"))
174                 return SPTLRPC_FLVR_KRB5P;
175
176         return SPTLRPC_FLVR_INVALID;
177 }
178 EXPORT_SYMBOL(sptlrpc_name2flavor_base);
179
180 const char *sptlrpc_flavor2name_base(__u32 flvr)
181 {
182         __u32   base = SPTLRPC_FLVR_BASE(flvr);
183
184         if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
185                 return "null";
186         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
187                 return "plain";
188         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
189                 return "krb5n";
190         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
191                 return "krb5a";
192         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
193                 return "krb5i";
194         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
195                 return "krb5p";
196
197         CERROR("invalid wire flavor 0x%x\n", flvr);
198         return "invalid";
199 }
200 EXPORT_SYMBOL(sptlrpc_flavor2name_base);
201
202 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
203                                char *buf, int bufsize)
204 {
205         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
206                 snprintf(buf, bufsize, "hash:%s",
207                          sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
208         else
209                 snprintf(buf, bufsize, "%s",
210                          sptlrpc_flavor2name_base(sf->sf_rpc));
211
212         buf[bufsize - 1] = '\0';
213         return buf;
214 }
215 EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
216
217 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
218 {
219         snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
220
221         /*
222          * currently we don't support customized bulk specification for
223          * flavors other than plain
224          */
225         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
226                 char bspec[16];
227
228                 bspec[0] = '-';
229                 sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
230                 strncat(buf, bspec, bufsize);
231         }
232
233         buf[bufsize - 1] = '\0';
234         return buf;
235 }
236 EXPORT_SYMBOL(sptlrpc_flavor2name);
237
238 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
239 {
240         buf[0] = '\0';
241
242         if (flags & PTLRPC_SEC_FL_REVERSE)
243                 strncat(buf, "reverse,", bufsize);
244         if (flags & PTLRPC_SEC_FL_ROOTONLY)
245                 strncat(buf, "rootonly,", bufsize);
246         if (flags & PTLRPC_SEC_FL_UDESC)
247                 strncat(buf, "udesc,", bufsize);
248         if (flags & PTLRPC_SEC_FL_BULK)
249                 strncat(buf, "bulk,", bufsize);
250         if (buf[0] == '\0')
251                 strncat(buf, "-,", bufsize);
252
253         buf[bufsize - 1] = '\0';
254         return buf;
255 }
256 EXPORT_SYMBOL(sptlrpc_secflags2str);
257
258 /**************************************************
259  * client context APIs                            *
260  **************************************************/
261
262 static
263 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
264 {
265         struct vfs_cred vcred;
266         int create = 1, remove_dead = 1;
267
268         LASSERT(sec);
269         LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
270
271         if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
272                                      PTLRPC_SEC_FL_ROOTONLY)) {
273                 vcred.vc_uid = 0;
274                 vcred.vc_gid = 0;
275                 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
276                         create = 0;
277                         remove_dead = 0;
278                 }
279         } else {
280                 vcred.vc_uid = cfs_current()->uid;
281                 vcred.vc_gid = cfs_current()->gid;
282         }
283
284         return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred,
285                                                    create, remove_dead);
286 }
287
288 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
289 {
290         LASSERT(atomic_read(&ctx->cc_refcount) > 0);
291         atomic_inc(&ctx->cc_refcount);
292         return ctx;
293 }
294 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
295
296 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
297 {
298         struct ptlrpc_sec *sec = ctx->cc_sec;
299
300         LASSERT(sec);
301         LASSERT(atomic_read(&ctx->cc_refcount));
302
303         if (!atomic_dec_and_test(&ctx->cc_refcount))
304                 return;
305
306         sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
307 }
308 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
309
310 /*
311  * expire the context immediately.
312  * the caller must hold at least 1 ref on the ctx.
313  */
314 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
315 {
316         LASSERT(ctx->cc_ops->die);
317         ctx->cc_ops->die(ctx, 0);
318 }
319 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
320
321 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
322 {
323         struct ptlrpc_request *req, *next;
324
325         spin_lock(&ctx->cc_lock);
326         list_for_each_entry_safe(req, next, &ctx->cc_req_list, rq_ctx_chain) {
327                 list_del_init(&req->rq_ctx_chain);
328                 ptlrpc_client_wake_req(req);
329         }
330         spin_unlock(&ctx->cc_lock);
331 }
332 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
333
334 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
335 {
336         LASSERT(ctx->cc_ops);
337
338         if (ctx->cc_ops->display == NULL)
339                 return 0;
340
341         return ctx->cc_ops->display(ctx, buf, bufsize);
342 }
343
344 static int import_sec_check_expire(struct obd_import *imp)
345 {
346         int     adapt = 0;
347
348         spin_lock(&imp->imp_lock);
349         if (imp->imp_sec_expire &&
350             imp->imp_sec_expire < cfs_time_current_sec()) {
351                 adapt = 1;
352                 imp->imp_sec_expire = 0;
353         }
354         spin_unlock(&imp->imp_lock);
355
356         if (!adapt)
357                 return 0;
358
359         CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
360         return sptlrpc_import_sec_adapt(imp, NULL, 0);
361 }
362
363 static int import_sec_validate_get(struct obd_import *imp,
364                                    struct ptlrpc_sec **sec)
365 {
366         int     rc;
367
368         if (unlikely(imp->imp_sec_expire)) {
369                 rc = import_sec_check_expire(imp);
370                 if (rc)
371                         return rc;
372         }
373
374         *sec = sptlrpc_import_sec_ref(imp);
375         if (*sec == NULL) {
376                 CERROR("import %p (%s) with no sec\n",
377                        imp, ptlrpc_import_state_name(imp->imp_state));
378                 return -EACCES;
379         }
380
381         if (unlikely((*sec)->ps_dying)) {
382                 CERROR("attempt to use dying sec %p\n", sec);
383                 sptlrpc_sec_put(*sec);
384                 return -EACCES;
385         }
386
387         return 0;
388 }
389
390 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
391 {
392         struct obd_import *imp = req->rq_import;
393         struct ptlrpc_sec *sec;
394         int                rc;
395         ENTRY;
396
397         LASSERT(!req->rq_cli_ctx);
398         LASSERT(imp);
399
400         rc = import_sec_validate_get(imp, &sec);
401         if (rc)
402                 RETURN(rc);
403
404         req->rq_cli_ctx = get_my_ctx(sec);
405
406         sptlrpc_sec_put(sec);
407
408         if (!req->rq_cli_ctx) {
409                 CERROR("req %p: fail to get context\n", req);
410                 RETURN(-ENOMEM);
411         }
412
413         RETURN(0);
414 }
415
416 /*
417  * if @sync == 0, this function should return quickly without sleep;
418  * otherwise might trigger ctx destroying rpc to server.
419  */
420 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
421 {
422         ENTRY;
423
424         LASSERT(req);
425         LASSERT(req->rq_cli_ctx);
426
427         /* request might be asked to release earlier while still
428          * in the context waiting list.
429          */
430         if (!list_empty(&req->rq_ctx_chain)) {
431                 spin_lock(&req->rq_cli_ctx->cc_lock);
432                 list_del_init(&req->rq_ctx_chain);
433                 spin_unlock(&req->rq_cli_ctx->cc_lock);
434         }
435
436         sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
437         req->rq_cli_ctx = NULL;
438         EXIT;
439 }
440
441 static
442 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
443                            struct ptlrpc_cli_ctx *oldctx,
444                            struct ptlrpc_cli_ctx *newctx)
445 {
446         struct sptlrpc_flavor   old_flvr;
447         char                   *reqmsg;
448         int                     reqmsg_size;
449         int                     rc;
450
451         LASSERT(req->rq_reqmsg);
452         LASSERT(req->rq_reqlen);
453         LASSERT(req->rq_replen);
454
455         CWARN("req %p: switch ctx %p(%u->%s) -> %p(%u->%s), "
456               "switch sec %p(%s) -> %p(%s)\n", req,
457               oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec),
458               newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec),
459               oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
460               newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name);
461
462         /* save flavor */
463         old_flvr = req->rq_flvr;
464
465         /* save request message */
466         reqmsg_size = req->rq_reqlen;
467         OBD_ALLOC(reqmsg, reqmsg_size);
468         if (reqmsg == NULL)
469                 return -ENOMEM;
470         memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
471
472         /* release old req/rep buf */
473         req->rq_cli_ctx = oldctx;
474         sptlrpc_cli_free_reqbuf(req);
475         sptlrpc_cli_free_repbuf(req);
476         req->rq_cli_ctx = newctx;
477
478         /* recalculate the flavor */
479         sptlrpc_req_set_flavor(req, 0);
480
481         /* alloc new request buffer
482          * we don't need to alloc reply buffer here, leave it to the
483          * rest procedure of ptlrpc
484          */
485         rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
486         if (!rc) {
487                 LASSERT(req->rq_reqmsg);
488                 memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
489         } else {
490                 CWARN("failed to alloc reqbuf: %d\n", rc);
491                 req->rq_flvr = old_flvr;
492         }
493
494         OBD_FREE(reqmsg, reqmsg_size);
495         return rc;
496 }
497
498 /**
499  * if current context has died, or if we resend after flavor switched,
500  * call this func to switch context. if no switch is needed, request
501  * will end up with the same context.
502  *
503  * request must have a context. in any case of failure, restore the
504  * restore the old one - a request must have a context.
505  */
506 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
507 {
508         struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
509         struct ptlrpc_cli_ctx *newctx;
510         int                    rc;
511         ENTRY;
512
513         LASSERT(oldctx);
514
515         sptlrpc_cli_ctx_get(oldctx);
516         sptlrpc_req_put_ctx(req, 0);
517
518         rc = sptlrpc_req_get_ctx(req);
519         if (unlikely(rc)) {
520                 LASSERT(!req->rq_cli_ctx);
521
522                 /* restore old ctx */
523                 req->rq_cli_ctx = oldctx;
524                 RETURN(rc);
525         }
526
527         newctx = req->rq_cli_ctx;
528         LASSERT(newctx);
529
530         if (unlikely(newctx == oldctx && 
531                      test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
532                 /*
533                  * still get the old dead ctx, usually means system too busy
534                  */
535                 CWARN("ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
536                       newctx, newctx->cc_flags);
537
538                 cfs_schedule_timeout(CFS_TASK_INTERRUPTIBLE, HZ);
539         } else {
540                 /*
541                  * it's possible newctx == oldctx if we're switching
542                  * subflavor with the same sec.
543                  */
544                 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
545                 if (rc) {
546                         /* restore old ctx */
547                         sptlrpc_req_put_ctx(req, 0);
548                         req->rq_cli_ctx = oldctx;
549                         RETURN(rc);
550                 }
551
552                 LASSERT(req->rq_cli_ctx == newctx);
553         }
554
555         sptlrpc_cli_ctx_put(oldctx, 1);
556         RETURN(0);
557 }
558 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
559
560 static
561 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
562 {
563         if (cli_ctx_is_refreshed(ctx))
564                 return 1;
565         return 0;
566 }
567
568 static
569 int ctx_refresh_timeout(void *data)
570 {
571         struct ptlrpc_request *req = data;
572         int rc;
573
574         /* conn_cnt is needed in expire_one_request */
575         lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
576
577         rc = ptlrpc_expire_one_request(req, 1);
578         /* if we started recovery, we should mark this ctx dead; otherwise
579          * in case of lgssd died nobody would retire this ctx, following
580          * connecting will still find the same ctx thus cause deadlock.
581          * there's an assumption that expire time of the request should be
582          * later than the context refresh expire time.
583          */
584         if (rc == 0)
585                 req->rq_cli_ctx->cc_ops->die(req->rq_cli_ctx, 0);
586         return rc;
587 }
588
589 static
590 void ctx_refresh_interrupt(void *data)
591 {
592         struct ptlrpc_request *req = data;
593
594         spin_lock(&req->rq_lock);
595         req->rq_intr = 1;
596         spin_unlock(&req->rq_lock);
597 }
598
599 static
600 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
601 {
602         spin_lock(&ctx->cc_lock);
603         if (!list_empty(&req->rq_ctx_chain))
604                 list_del_init(&req->rq_ctx_chain);
605         spin_unlock(&ctx->cc_lock);
606 }
607
608 /*
609  * the status of context could be subject to be changed by other threads at any
610  * time. we allow this race. but once we return with 0, the caller will
611  * suppose it's uptodated and keep using it until the owning rpc is done.
612  *
613  * @timeout:
614  *    < 0  - don't wait
615  *    = 0  - wait until success or fatal error occur
616  *    > 0  - timeout value
617  *
618  * return 0 only if the context is uptodated.
619  */
620 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
621 {
622         struct ptlrpc_cli_ctx  *ctx = req->rq_cli_ctx;
623         struct ptlrpc_sec      *sec;
624         struct l_wait_info      lwi;
625         int                     rc;
626         ENTRY;
627
628         LASSERT(ctx);
629
630         if (req->rq_ctx_init || req->rq_ctx_fini)
631                 RETURN(0);
632
633         /*
634          * during the process a request's context might change type even
635          * (e.g. from gss ctx to plain ctx), so each loop we need to re-check
636          * everything
637          */
638 again:
639         rc = import_sec_validate_get(req->rq_import, &sec);
640         if (rc)
641                 RETURN(rc);
642
643         if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
644                 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
645                       req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
646                 req_off_ctx_list(req, ctx);
647                 sptlrpc_req_replace_dead_ctx(req);
648                 ctx = req->rq_cli_ctx;
649         }
650         sptlrpc_sec_put(sec);
651
652         if (cli_ctx_is_eternal(ctx))
653                 RETURN(0);
654
655         if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
656                 LASSERT(ctx->cc_ops->refresh);
657                 ctx->cc_ops->refresh(ctx);
658         }
659         LASSERT(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
660
661         LASSERT(ctx->cc_ops->validate);
662         if (ctx->cc_ops->validate(ctx) == 0) {
663                 req_off_ctx_list(req, ctx);
664                 RETURN(0);
665         }
666
667         if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
668                 req->rq_err = 1;
669                 req_off_ctx_list(req, ctx);
670                 RETURN(-EPERM);
671         }
672
673         /* This is subtle. For resent message we have to keep original
674          * context to survive following situation:
675          *  1. the request sent to server
676          *  2. recovery was kick start
677          *  3. recovery finished, the request marked as resent
678          *  4. resend the request
679          *  5. old reply from server received (because xid is the same)
680          *  6. verify reply (has to be success)
681          *  7. new reply from server received, lnet drop it
682          *
683          * Note we can't simply change xid for resent request because
684          * server reply on it for reply reconstruction.
685          *
686          * Commonly the original context should be uptodate because we
687          * have a expiry nice time; And server will keep their half part
688          * context because we at least hold a ref of old context which
689          * prevent the context destroy RPC be sent. So server still can
690          * accept the request and finish RPC. Two cases:
691          *  1. If server side context has been trimmed, a NO_CONTEXT will
692          *     be returned, gss_cli_ctx_verify/unseal will switch to new
693          *     context by force.
694          *  2. Current context never be refreshed, then we are fine: we
695          *     never really send request with old context before.
696          */
697         if (test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
698             unlikely(req->rq_reqmsg) &&
699             lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
700                 req_off_ctx_list(req, ctx);
701                 RETURN(0);
702         }
703
704         if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
705                 req_off_ctx_list(req, ctx);
706                 /*
707                  * don't switch ctx if import was deactivated
708                  */
709                 if (req->rq_import->imp_deactive) {
710                         req->rq_err = 1;
711                         RETURN(-EINTR);
712                 }
713
714                 rc = sptlrpc_req_replace_dead_ctx(req);
715                 if (rc) {
716                         LASSERT(ctx == req->rq_cli_ctx);
717                         CERROR("req %p: failed to replace dead ctx %p: %d\n",
718                                 req, ctx, rc);
719                         req->rq_err = 1;
720                         RETURN(rc);
721                 }
722
723                 ctx = req->rq_cli_ctx;
724                 goto again;
725         }
726
727         /* Now we're sure this context is during upcall, add myself into
728          * waiting list
729          */
730         spin_lock(&ctx->cc_lock);
731         if (list_empty(&req->rq_ctx_chain))
732                 list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
733         spin_unlock(&ctx->cc_lock);
734
735         if (timeout < 0)
736                 RETURN(-EWOULDBLOCK);
737
738         /* Clear any flags that may be present from previous sends */
739         LASSERT(req->rq_receiving_reply == 0);
740         spin_lock(&req->rq_lock);
741         req->rq_err = 0;
742         req->rq_timedout = 0;
743         req->rq_resend = 0;
744         req->rq_restart = 0;
745         spin_unlock(&req->rq_lock);
746
747         lwi = LWI_TIMEOUT_INTR(timeout * HZ, ctx_refresh_timeout,
748                                ctx_refresh_interrupt, req);
749         rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
750
751         /* following cases we could be here:
752          * - successfully refreshed;
753          * - interruptted;
754          * - timedout, and we don't want recover from the failure;
755          * - timedout, and waked up upon recovery finished;
756          * - someone else mark this ctx dead by force;
757          * - someone invalidate the req and call ptlrpc_client_wake_req(),
758          *   e.g. ptlrpc_abort_inflight();
759          */
760         if (!cli_ctx_is_refreshed(ctx)) {
761                 /* timed out or interruptted */
762                 req_off_ctx_list(req, ctx);
763
764                 LASSERT(rc != 0);
765                 RETURN(rc);
766         }
767
768         goto again;
769 }
770
771 /*
772  * Note this could be called in two situations:
773  * - new request from ptlrpc_pre_req(), with proper @opcode
774  * - old request which changed ctx in the middle, with @opcode == 0
775  */
776 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
777 {
778         struct ptlrpc_sec *sec;
779
780         LASSERT(req->rq_import);
781         LASSERT(req->rq_cli_ctx);
782         LASSERT(req->rq_cli_ctx->cc_sec);
783         LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
784
785         /* special security flags accoding to opcode */
786         switch (opcode) {
787         case OST_READ:
788         case MDS_READPAGE:
789                 req->rq_bulk_read = 1;
790                 break;
791         case OST_WRITE:
792         case MDS_WRITEPAGE:
793                 req->rq_bulk_write = 1;
794                 break;
795         case SEC_CTX_INIT:
796                 req->rq_ctx_init = 1;
797                 break;
798         case SEC_CTX_FINI:
799                 req->rq_ctx_fini = 1;
800                 break;
801         case 0:
802                 /* init/fini rpc won't be resend, so can't be here */
803                 LASSERT(req->rq_ctx_init == 0);
804                 LASSERT(req->rq_ctx_fini == 0);
805
806                 /* cleanup flags, which should be recalculated */
807                 req->rq_pack_udesc = 0;
808                 req->rq_pack_bulk = 0;
809                 break;
810         }
811
812         sec = req->rq_cli_ctx->cc_sec;
813
814         spin_lock(&sec->ps_lock);
815         req->rq_flvr = sec->ps_flvr;
816         spin_unlock(&sec->ps_lock);
817
818         /* force SVC_NULL for context initiation rpc, SVC_INTG for context
819          * destruction rpc */
820         if (unlikely(req->rq_ctx_init))
821                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
822         else if (unlikely(req->rq_ctx_fini))
823                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
824
825         /* user descriptor flag, null security can't do it anyway */
826         if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
827             (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
828                 req->rq_pack_udesc = 1;
829
830         /* bulk security flag */
831         if ((req->rq_bulk_read || req->rq_bulk_write) &&
832             sptlrpc_flavor_has_bulk(&req->rq_flvr))
833                 req->rq_pack_bulk = 1;
834 }
835
836 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
837 {
838         if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
839                 return;
840
841         LASSERT(req->rq_clrbuf);
842         if (req->rq_pool || !req->rq_reqbuf)
843                 return;
844
845         OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
846         req->rq_reqbuf = NULL;
847         req->rq_reqbuf_len = 0;
848 }
849
850 /*
851  * check whether current user have valid context for an import or not.
852  * might repeatedly try in case of non-fatal errors.
853  * return 0 on success, < 0 on failure
854  */
855 int sptlrpc_import_check_ctx(struct obd_import *imp)
856 {
857         struct ptlrpc_sec     *sec;
858         struct ptlrpc_cli_ctx *ctx;
859         struct ptlrpc_request *req = NULL;
860         int rc;
861         ENTRY;
862
863         might_sleep();
864
865         sec = sptlrpc_import_sec_ref(imp);
866         ctx = get_my_ctx(sec);
867         sptlrpc_sec_put(sec);
868
869         if (!ctx)
870                 RETURN(-ENOMEM);
871
872         if (cli_ctx_is_eternal(ctx) ||
873             ctx->cc_ops->validate(ctx) == 0) {
874                 sptlrpc_cli_ctx_put(ctx, 1);
875                 RETURN(0);
876         }
877
878         if (cli_ctx_is_error(ctx)) {
879                 sptlrpc_cli_ctx_put(ctx, 1);
880                 RETURN(-EACCES);
881         }
882
883         OBD_ALLOC_PTR(req);
884         if (!req)
885                 RETURN(-ENOMEM);
886
887         spin_lock_init(&req->rq_lock);
888         atomic_set(&req->rq_refcount, 10000);
889         CFS_INIT_LIST_HEAD(&req->rq_ctx_chain);
890         cfs_waitq_init(&req->rq_reply_waitq);
891         req->rq_import = imp;
892         req->rq_flvr = sec->ps_flvr;
893         req->rq_cli_ctx = ctx;
894
895         rc = sptlrpc_req_refresh_ctx(req, 0);
896         LASSERT(list_empty(&req->rq_ctx_chain));
897         sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
898         OBD_FREE_PTR(req);
899
900         RETURN(rc);
901 }
902
903 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
904 {
905         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
906         int rc = 0;
907         ENTRY;
908
909         LASSERT(ctx);
910         LASSERT(ctx->cc_sec);
911         LASSERT(req->rq_reqbuf || req->rq_clrbuf);
912
913         /* we wrap bulk request here because now we can be sure
914          * the context is uptodate.
915          */
916         if (req->rq_bulk) {
917                 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
918                 if (rc)
919                         RETURN(rc);
920         }
921
922         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
923         case SPTLRPC_SVC_NULL:
924         case SPTLRPC_SVC_AUTH:
925         case SPTLRPC_SVC_INTG:
926                 LASSERT(ctx->cc_ops->sign);
927                 rc = ctx->cc_ops->sign(ctx, req);
928                 break;
929         case SPTLRPC_SVC_PRIV:
930                 LASSERT(ctx->cc_ops->seal);
931                 rc = ctx->cc_ops->seal(ctx, req);
932                 break;
933         default:
934                 LBUG();
935         }
936
937         if (rc == 0) {
938                 LASSERT(req->rq_reqdata_len);
939                 LASSERT(req->rq_reqdata_len % 8 == 0);
940                 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
941         }
942
943         RETURN(rc);
944 }
945
946 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
947 {
948         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
949         int                    rc;
950         ENTRY;
951
952         LASSERT(ctx);
953         LASSERT(ctx->cc_sec);
954         LASSERT(req->rq_repbuf);
955         LASSERT(req->rq_repdata);
956         LASSERT(req->rq_repmsg == NULL);
957
958         req->rq_rep_swab_mask = 0;
959
960         rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
961         switch (rc) {
962         case 1:
963                 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
964         case 0:
965                 break;
966         default:
967                 CERROR("failed unpack reply: x"LPU64"\n", req->rq_xid);
968                 RETURN(-EPROTO);
969         }
970
971         if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
972                 CERROR("replied data length %d too small\n",
973                        req->rq_repdata_len);
974                 RETURN(-EPROTO);
975         }
976
977         if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
978             SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
979                 CERROR("reply policy %u doesn't match request policy %u\n",
980                        SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
981                        SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
982                 RETURN(-EPROTO);
983         }
984
985         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
986         case SPTLRPC_SVC_NULL:
987         case SPTLRPC_SVC_AUTH:
988         case SPTLRPC_SVC_INTG:
989                 LASSERT(ctx->cc_ops->verify);
990                 rc = ctx->cc_ops->verify(ctx, req);
991                 break;
992         case SPTLRPC_SVC_PRIV:
993                 LASSERT(ctx->cc_ops->unseal);
994                 rc = ctx->cc_ops->unseal(ctx, req);
995                 break;
996         default:
997                 LBUG();
998         }
999         LASSERT(rc || req->rq_repmsg || req->rq_resend);
1000
1001         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
1002             !req->rq_ctx_init)
1003                 req->rq_rep_swab_mask = 0;
1004         RETURN(rc);
1005 }
1006
1007 /*
1008  * upon this be called, the reply buffer should have been un-posted,
1009  * so nothing is going to change.
1010  */
1011 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1012 {
1013         LASSERT(req->rq_repbuf);
1014         LASSERT(req->rq_repdata == NULL);
1015         LASSERT(req->rq_repmsg == NULL);
1016         LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1017
1018         if (req->rq_reply_off == 0 &&
1019             (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1020                 CERROR("real reply with offset 0\n");
1021                 return -EPROTO;
1022         }
1023
1024         if (req->rq_reply_off % 8 != 0) {
1025                 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1026                 return -EPROTO;
1027         }
1028
1029         req->rq_repdata = (struct lustre_msg *)
1030                                 (req->rq_repbuf + req->rq_reply_off);
1031         req->rq_repdata_len = req->rq_nob_received;
1032
1033         return do_cli_unwrap_reply(req);
1034 }
1035
1036 /**
1037  * Upon called, the receive buffer might be still posted, so the reply data
1038  * might be changed at any time, no matter we're holding rq_lock or not. we
1039  * expect the rq_reply_off be 0, rq_nob_received is the early reply size.
1040  *
1041  * we allocate separate ptlrpc_request and reply buffer for early reply
1042  * processing, return 0 and \a req_ret is a duplicated ptlrpc_request. caller
1043  * must call sptlrpc_cli_finish_early_reply() on the returned request to
1044  * release it. if anything goes wrong \a req_ret will not be set.
1045  */
1046 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1047                                    struct ptlrpc_request **req_ret)
1048 {
1049         struct ptlrpc_request  *early_req;
1050         char                   *early_buf;
1051         int                     early_bufsz, early_size;
1052         int                     rc;
1053         ENTRY;
1054
1055         OBD_ALLOC_PTR(early_req);
1056         if (early_req == NULL)
1057                 RETURN(-ENOMEM);
1058
1059         early_size = req->rq_nob_received;
1060         early_bufsz = size_roundup_power2(early_size);
1061         OBD_ALLOC(early_buf, early_bufsz);
1062         if (early_buf == NULL)
1063                 GOTO(err_req, rc = -ENOMEM);
1064
1065         /* sanity checkings and copy data out, do it inside spinlock */
1066         spin_lock(&req->rq_lock);
1067
1068         if (req->rq_replied) {
1069                 spin_unlock(&req->rq_lock);
1070                 GOTO(err_buf, rc = -EALREADY);
1071         }
1072
1073         LASSERT(req->rq_repbuf);
1074         LASSERT(req->rq_repdata == NULL);
1075         LASSERT(req->rq_repmsg == NULL);
1076
1077         if (req->rq_reply_off != 0) {
1078                 CERROR("early reply with offset %u\n", req->rq_reply_off);
1079                 spin_unlock(&req->rq_lock);
1080                 GOTO(err_buf, rc = -EPROTO);
1081         }
1082
1083         if (req->rq_nob_received != early_size) {
1084                 /* even another early arrived the size should be the same */
1085                 CERROR("data size has changed from %u to %u\n",
1086                        early_size, req->rq_nob_received);
1087                 spin_unlock(&req->rq_lock);
1088                 GOTO(err_buf, rc = -EINVAL);
1089         }
1090
1091         if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1092                 CERROR("early reply length %d too small\n",
1093                        req->rq_nob_received);
1094                 spin_unlock(&req->rq_lock);
1095                 GOTO(err_buf, rc = -EALREADY);
1096         }
1097
1098         memcpy(early_buf, req->rq_repbuf, early_size);
1099         spin_unlock(&req->rq_lock);
1100
1101         spin_lock_init(&early_req->rq_lock);
1102         early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1103         early_req->rq_flvr = req->rq_flvr;
1104         early_req->rq_repbuf = early_buf;
1105         early_req->rq_repbuf_len = early_bufsz;
1106         early_req->rq_repdata = (struct lustre_msg *) early_buf;
1107         early_req->rq_repdata_len = early_size;
1108         early_req->rq_early = 1;
1109
1110         rc = do_cli_unwrap_reply(early_req);
1111         if (rc) {
1112                 DEBUG_REQ(D_ADAPTTO, early_req,
1113                           "error %d unwrap early reply", rc);
1114                 GOTO(err_ctx, rc);
1115         }
1116
1117         LASSERT(early_req->rq_repmsg);
1118         *req_ret = early_req;
1119         RETURN(0);
1120
1121 err_ctx:
1122         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1123 err_buf:
1124         OBD_FREE(early_buf, early_bufsz);
1125 err_req:
1126         OBD_FREE_PTR(early_req);
1127         RETURN(rc);
1128 }
1129
1130 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1131 {
1132         LASSERT(early_req->rq_repbuf);
1133         LASSERT(early_req->rq_repdata);
1134         LASSERT(early_req->rq_repmsg);
1135
1136         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1137         OBD_FREE(early_req->rq_repbuf, early_req->rq_repbuf_len);
1138         OBD_FREE_PTR(early_req);
1139 }
1140
1141 /**************************************************
1142  * sec ID                                         *
1143  **************************************************/
1144
1145 /*
1146  * "fixed" sec (e.g. null) use sec_id < 0
1147  */
1148 static atomic_t sptlrpc_sec_id = ATOMIC_INIT(1);
1149
1150 int sptlrpc_get_next_secid(void)
1151 {
1152         return atomic_inc_return(&sptlrpc_sec_id);
1153 }
1154 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1155
1156 /**************************************************
1157  * client side high-level security APIs           *
1158  **************************************************/
1159
1160 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1161                                    int grace, int force)
1162 {
1163         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1164
1165         LASSERT(policy->sp_cops);
1166         LASSERT(policy->sp_cops->flush_ctx_cache);
1167
1168         return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1169 }
1170
1171 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1172 {
1173         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1174
1175         LASSERT(atomic_read(&sec->ps_refcount) == 0);
1176         LASSERT(atomic_read(&sec->ps_nctx) == 0);
1177         LASSERT(policy->sp_cops->destroy_sec);
1178
1179         CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
1180
1181         policy->sp_cops->destroy_sec(sec);
1182         sptlrpc_policy_put(policy);
1183 }
1184
1185 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1186 {
1187         sec_cop_destroy_sec(sec);
1188 }
1189 EXPORT_SYMBOL(sptlrpc_sec_destroy);
1190
1191 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1192 {
1193         LASSERT(atomic_read(&sec->ps_refcount) > 0);
1194
1195         if (sec->ps_policy->sp_cops->kill_sec) {
1196                 sec->ps_policy->sp_cops->kill_sec(sec);
1197
1198                 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1199         }
1200 }
1201
1202 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1203 {
1204         if (sec) {
1205                 LASSERT(atomic_read(&sec->ps_refcount) > 0);
1206                 atomic_inc(&sec->ps_refcount);
1207         }
1208
1209         return sec;
1210 }
1211 EXPORT_SYMBOL(sptlrpc_sec_get);
1212
1213 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1214 {
1215         if (sec) {
1216                 LASSERT(atomic_read(&sec->ps_refcount) > 0);
1217
1218                 if (atomic_dec_and_test(&sec->ps_refcount)) {
1219                         LASSERT(atomic_read(&sec->ps_nctx) == 0);
1220
1221                         sptlrpc_gc_del_sec(sec);
1222                         sec_cop_destroy_sec(sec);
1223                 }
1224         }
1225 }
1226 EXPORT_SYMBOL(sptlrpc_sec_put);
1227
1228 /*
1229  * policy module is responsible for taking refrence of import
1230  */
1231 static
1232 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1233                                        struct ptlrpc_svc_ctx *svc_ctx,
1234                                        struct sptlrpc_flavor *sf,
1235                                        enum lustre_sec_part sp)
1236 {
1237         struct ptlrpc_sec_policy *policy;
1238         struct ptlrpc_sec        *sec;
1239         char                      str[32];
1240         ENTRY;
1241
1242         if (svc_ctx) {
1243                 LASSERT(imp->imp_dlm_fake == 1);
1244
1245                 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1246                        imp->imp_obd->obd_type->typ_name,
1247                        imp->imp_obd->obd_name,
1248                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1249
1250                 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1251                 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1252         } else {
1253                 LASSERT(imp->imp_dlm_fake == 0);
1254
1255                 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1256                        imp->imp_obd->obd_type->typ_name,
1257                        imp->imp_obd->obd_name,
1258                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1259
1260                 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1261                 if (!policy) {
1262                         CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1263                         RETURN(NULL);
1264                 }
1265         }
1266
1267         sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1268         if (sec) {
1269                 atomic_inc(&sec->ps_refcount);
1270
1271                 sec->ps_part = sp;
1272
1273                 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1274                         sptlrpc_gc_add_sec(sec);
1275         } else {
1276                 sptlrpc_policy_put(policy);
1277         }
1278
1279         RETURN(sec);
1280 }
1281
1282 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1283 {
1284         struct ptlrpc_sec *sec;
1285
1286         spin_lock(&imp->imp_lock);
1287         sec = sptlrpc_sec_get(imp->imp_sec);
1288         spin_unlock(&imp->imp_lock);
1289
1290         return sec;
1291 }
1292 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1293
1294 static void sptlrpc_import_sec_install(struct obd_import *imp,
1295                                        struct ptlrpc_sec *sec)
1296 {
1297         struct ptlrpc_sec *old_sec;
1298
1299         LASSERT(atomic_read(&sec->ps_refcount) > 0);
1300
1301         spin_lock(&imp->imp_lock);
1302         old_sec = imp->imp_sec;
1303         imp->imp_sec = sec;
1304         spin_unlock(&imp->imp_lock);
1305
1306         if (old_sec) {
1307                 sptlrpc_sec_kill(old_sec);
1308
1309                 /* balance the ref taken by this import */
1310                 sptlrpc_sec_put(old_sec);
1311         }
1312 }
1313
1314 static inline
1315 int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1316 {
1317         return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1318 }
1319
1320 static inline
1321 void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1322 {
1323         *dst = *src;
1324 }
1325
1326 static void sptlrpc_import_sec_adapt_inplace(struct obd_import *imp,
1327                                              struct ptlrpc_sec *sec,
1328                                              struct sptlrpc_flavor *sf)
1329 {
1330         char    str1[32], str2[32];
1331
1332         if (sec->ps_flvr.sf_flags != sf->sf_flags)
1333                 CWARN("changing sec flags: %s -> %s\n",
1334                       sptlrpc_secflags2str(sec->ps_flvr.sf_flags,
1335                                            str1, sizeof(str1)),
1336                       sptlrpc_secflags2str(sf->sf_flags,
1337                                            str2, sizeof(str2)));
1338
1339         spin_lock(&sec->ps_lock);
1340         flavor_copy(&sec->ps_flvr, sf);
1341         spin_unlock(&sec->ps_lock);
1342 }
1343
1344 /*
1345  * for normal import, @svc_ctx should be NULL and @flvr is ignored;
1346  * for reverse import, @svc_ctx and @flvr is from incoming request.
1347  */
1348 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1349                              struct ptlrpc_svc_ctx *svc_ctx,
1350                              struct sptlrpc_flavor *flvr)
1351 {
1352         struct ptlrpc_connection   *conn;
1353         struct sptlrpc_flavor       sf;
1354         struct ptlrpc_sec          *sec, *newsec;
1355         enum lustre_sec_part        sp;
1356         char                        str[24];
1357         int                         rc = 0;
1358         ENTRY;
1359
1360         might_sleep();
1361
1362         if (imp == NULL)
1363                 RETURN(0);
1364
1365         conn = imp->imp_connection;
1366
1367         if (svc_ctx == NULL) {
1368                 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1369                 /*
1370                  * normal import, determine flavor from rule set, except
1371                  * for mgc the flavor is predetermined.
1372                  */
1373                 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1374                         sf = cliobd->cl_flvr_mgc;
1375                 else 
1376                         sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1377                                                    cliobd->cl_sp_to,
1378                                                    &cliobd->cl_target_uuid,
1379                                                    conn->c_self, &sf);
1380
1381                 sp = imp->imp_obd->u.cli.cl_sp_me;
1382         } else {
1383                 /* reverse import, determine flavor from incoming reqeust */
1384                 sf = *flvr;
1385
1386                 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1387                         sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1388                                       PTLRPC_SEC_FL_ROOTONLY;
1389
1390                 sp = sptlrpc_target_sec_part(imp->imp_obd);
1391         }
1392
1393         sec = sptlrpc_import_sec_ref(imp);
1394         if (sec) {
1395                 char    str2[24];
1396
1397                 if (flavor_equal(&sf, &sec->ps_flvr))
1398                         GOTO(out, rc);
1399
1400                 CWARN("import %s->%s: changing flavor %s -> %s\n",
1401                       imp->imp_obd->obd_name,
1402                       obd_uuid2str(&conn->c_remote_uuid),
1403                       sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1404                       sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1405
1406                 if (SPTLRPC_FLVR_POLICY(sf.sf_rpc) ==
1407                     SPTLRPC_FLVR_POLICY(sec->ps_flvr.sf_rpc) &&
1408                     SPTLRPC_FLVR_MECH(sf.sf_rpc) ==
1409                     SPTLRPC_FLVR_MECH(sec->ps_flvr.sf_rpc)) {
1410                         sptlrpc_import_sec_adapt_inplace(imp, sec, &sf);
1411                         GOTO(out, rc);
1412                 }
1413         } else {
1414                 CWARN("import %s->%s netid %x: select flavor %s\n",
1415                       imp->imp_obd->obd_name,
1416                       obd_uuid2str(&conn->c_remote_uuid),
1417                       LNET_NIDNET(conn->c_self),
1418                       sptlrpc_flavor2name(&sf, str, sizeof(str)));
1419         }
1420
1421         mutex_down(&imp->imp_sec_mutex);
1422
1423         newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1424         if (newsec) {
1425                 sptlrpc_import_sec_install(imp, newsec);
1426         } else {
1427                 CERROR("import %s->%s: failed to create new sec\n",
1428                        imp->imp_obd->obd_name,
1429                        obd_uuid2str(&conn->c_remote_uuid));
1430                 rc = -EPERM;
1431         }
1432
1433         mutex_up(&imp->imp_sec_mutex);
1434 out:
1435         sptlrpc_sec_put(sec);
1436         RETURN(rc);
1437 }
1438
1439 void sptlrpc_import_sec_put(struct obd_import *imp)
1440 {
1441         if (imp->imp_sec) {
1442                 sptlrpc_sec_kill(imp->imp_sec);
1443
1444                 sptlrpc_sec_put(imp->imp_sec);
1445                 imp->imp_sec = NULL;
1446         }
1447 }
1448
1449 static void import_flush_ctx_common(struct obd_import *imp,
1450                                     uid_t uid, int grace, int force)
1451 {
1452         struct ptlrpc_sec *sec;
1453
1454         if (imp == NULL)
1455                 return;
1456
1457         sec = sptlrpc_import_sec_ref(imp);
1458         if (sec == NULL)
1459                 return;
1460
1461         sec_cop_flush_ctx_cache(sec, uid, grace, force);
1462         sptlrpc_sec_put(sec);
1463 }
1464
1465 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1466 {
1467         /* it's important to use grace mode, see explain in
1468          * sptlrpc_req_refresh_ctx() */
1469         import_flush_ctx_common(imp, 0, 1, 1);
1470 }
1471
1472 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1473 {
1474         import_flush_ctx_common(imp, cfs_current()->uid, 1, 1);
1475 }
1476 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1477
1478 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1479 {
1480         import_flush_ctx_common(imp, -1, 1, 1);
1481 }
1482 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1483
1484 /*
1485  * when complete successfully, req->rq_reqmsg should point to the
1486  * right place.
1487  */
1488 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1489 {
1490         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1491         struct ptlrpc_sec_policy *policy;
1492         int rc;
1493
1494         LASSERT(ctx);
1495         LASSERT(atomic_read(&ctx->cc_refcount));
1496         LASSERT(ctx->cc_sec);
1497         LASSERT(ctx->cc_sec->ps_policy);
1498         LASSERT(req->rq_reqmsg == NULL);
1499
1500         policy = ctx->cc_sec->ps_policy;
1501         rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1502         if (!rc) {
1503                 LASSERT(req->rq_reqmsg);
1504                 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1505
1506                 /* zeroing preallocated buffer */
1507                 if (req->rq_pool)
1508                         memset(req->rq_reqmsg, 0, msgsize);
1509         }
1510
1511         return rc;
1512 }
1513
1514 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1515 {
1516         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1517         struct ptlrpc_sec_policy *policy;
1518
1519         LASSERT(ctx);
1520         LASSERT(atomic_read(&ctx->cc_refcount));
1521         LASSERT(ctx->cc_sec);
1522         LASSERT(ctx->cc_sec->ps_policy);
1523
1524         if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1525                 return;
1526
1527         policy = ctx->cc_sec->ps_policy;
1528         policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1529 }
1530
1531 /*
1532  * NOTE caller must guarantee the buffer size is enough for the enlargement
1533  */
1534 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1535                                   int segment, int newsize)
1536 {
1537         void   *src, *dst;
1538         int     oldsize, oldmsg_size, movesize;
1539
1540         LASSERT(segment < msg->lm_bufcount);
1541         LASSERT(msg->lm_buflens[segment] <= newsize);
1542
1543         if (msg->lm_buflens[segment] == newsize)
1544                 return;
1545
1546         /* nothing to do if we are enlarging the last segment */
1547         if (segment == msg->lm_bufcount - 1) {
1548                 msg->lm_buflens[segment] = newsize;
1549                 return;
1550         }
1551
1552         oldsize = msg->lm_buflens[segment];
1553
1554         src = lustre_msg_buf(msg, segment + 1, 0);
1555         msg->lm_buflens[segment] = newsize;
1556         dst = lustre_msg_buf(msg, segment + 1, 0);
1557         msg->lm_buflens[segment] = oldsize;
1558
1559         /* move from segment + 1 to end segment */
1560         LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1561         oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1562         movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1563         LASSERT(movesize >= 0);
1564
1565         if (movesize)
1566                 memmove(dst, src, movesize);
1567
1568         /* note we don't clear the ares where old data live, not secret */
1569
1570         /* finally set new segment size */
1571         msg->lm_buflens[segment] = newsize;
1572 }
1573 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1574
1575 /*
1576  * enlarge @segment of upper message req->rq_reqmsg to @newsize, all data
1577  * will be preserved after enlargement. this must be called after rq_reqmsg has
1578  * been intialized at least.
1579  *
1580  * caller's attention: upon return, rq_reqmsg and rq_reqlen might have
1581  * been changed.
1582  */
1583 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1584                                int segment, int newsize)
1585 {
1586         struct ptlrpc_cli_ctx    *ctx = req->rq_cli_ctx;
1587         struct ptlrpc_sec_cops   *cops;
1588         struct lustre_msg        *msg = req->rq_reqmsg;
1589
1590         LASSERT(ctx);
1591         LASSERT(msg);
1592         LASSERT(msg->lm_bufcount > segment);
1593         LASSERT(msg->lm_buflens[segment] <= newsize);
1594
1595         if (msg->lm_buflens[segment] == newsize)
1596                 return 0;
1597
1598         cops = ctx->cc_sec->ps_policy->sp_cops;
1599         LASSERT(cops->enlarge_reqbuf);
1600         return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1601 }
1602 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1603
1604 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1605 {
1606         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1607         struct ptlrpc_sec_policy *policy;
1608         ENTRY;
1609
1610         LASSERT(ctx);
1611         LASSERT(atomic_read(&ctx->cc_refcount));
1612         LASSERT(ctx->cc_sec);
1613         LASSERT(ctx->cc_sec->ps_policy);
1614
1615         if (req->rq_repbuf)
1616                 RETURN(0);
1617
1618         policy = ctx->cc_sec->ps_policy;
1619         RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1620 }
1621
1622 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1623 {
1624         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1625         struct ptlrpc_sec_policy *policy;
1626         ENTRY;
1627
1628         LASSERT(ctx);
1629         LASSERT(atomic_read(&ctx->cc_refcount));
1630         LASSERT(ctx->cc_sec);
1631         LASSERT(ctx->cc_sec->ps_policy);
1632
1633         if (req->rq_repbuf == NULL)
1634                 return;
1635         LASSERT(req->rq_repbuf_len);
1636
1637         policy = ctx->cc_sec->ps_policy;
1638         policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1639         EXIT;
1640 }
1641
1642 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1643                                 struct ptlrpc_cli_ctx *ctx)
1644 {
1645         struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1646
1647         if (!policy->sp_cops->install_rctx)
1648                 return 0;
1649         return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1650 }
1651
1652 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1653                                 struct ptlrpc_svc_ctx *ctx)
1654 {
1655         struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1656
1657         if (!policy->sp_sops->install_rctx)
1658                 return 0;
1659         return policy->sp_sops->install_rctx(imp, ctx);
1660 }
1661
1662 /****************************************
1663  * server side security                 *
1664  ****************************************/
1665
1666 static int flavor_allowed(struct sptlrpc_flavor *exp,
1667                           struct ptlrpc_request *req)
1668 {
1669         struct sptlrpc_flavor *flvr = &req->rq_flvr;
1670
1671         if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
1672                 return 1;
1673
1674         if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1675             SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
1676             SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
1677             SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
1678                 return 1;
1679
1680         return 0;
1681 }
1682
1683 #define EXP_FLVR_UPDATE_EXPIRE      (OBD_TIMEOUT_DEFAULT + 10)
1684
1685 int sptlrpc_target_export_check(struct obd_export *exp,
1686                                 struct ptlrpc_request *req)
1687 {
1688         struct sptlrpc_flavor   flavor;
1689
1690         if (exp == NULL)
1691                 return 0;
1692
1693         /* client side export has no imp_reverse, skip
1694          * FIXME maybe we should check flavor this as well??? */
1695         if (exp->exp_imp_reverse == NULL)
1696                 return 0;
1697
1698         /* don't care about ctx fini rpc */
1699         if (req->rq_ctx_fini)
1700                 return 0;
1701
1702         spin_lock(&exp->exp_lock);
1703
1704         /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1705          * the first req with the new flavor, then treat it as current flavor,
1706          * adapt reverse sec according to it.
1707          * note the first rpc with new flavor might not be with root ctx, in
1708          * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */
1709         if (unlikely(exp->exp_flvr_changed) &&
1710             flavor_allowed(&exp->exp_flvr_old[1], req)) {
1711                 /* make the new flavor as "current", and old ones as
1712                  * about-to-expire */
1713                 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1714                        exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1715                 flavor = exp->exp_flvr_old[1];
1716                 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1717                 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1718                 exp->exp_flvr_old[0] = exp->exp_flvr;
1719                 exp->exp_flvr_expire[0] = cfs_time_current_sec() +
1720                                           EXP_FLVR_UPDATE_EXPIRE;
1721                 exp->exp_flvr = flavor;
1722
1723                 /* flavor change finished */
1724                 exp->exp_flvr_changed = 0;
1725                 LASSERT(exp->exp_flvr_adapt == 1);
1726
1727                 /* if it's gss, we only interested in root ctx init */
1728                 if (req->rq_auth_gss &&
1729                     !(req->rq_ctx_init && (req->rq_auth_usr_root ||
1730                                            req->rq_auth_usr_mdt))) {
1731                         spin_unlock(&exp->exp_lock);
1732                         CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d)\n",
1733                                req->rq_auth_gss, req->rq_ctx_init,
1734                                req->rq_auth_usr_root, req->rq_auth_usr_mdt);
1735                         return 0;
1736                 }
1737
1738                 exp->exp_flvr_adapt = 0;
1739                 spin_unlock(&exp->exp_lock);
1740
1741                 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1742                                                 req->rq_svc_ctx, &flavor);
1743         }
1744
1745         /* if it equals to the current flavor, we accept it, but need to
1746          * dealing with reverse sec/ctx */
1747         if (likely(flavor_allowed(&exp->exp_flvr, req))) {
1748                 /* most cases should return here, we only interested in
1749                  * gss root ctx init */
1750                 if (!req->rq_auth_gss || !req->rq_ctx_init ||
1751                     (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt)) {
1752                         spin_unlock(&exp->exp_lock);
1753                         return 0;
1754                 }
1755
1756                 /* if flavor just changed, we should not proceed, just leave
1757                  * it and current flavor will be discovered and replaced
1758                  * shortly, and let _this_ rpc pass through */
1759                 if (exp->exp_flvr_changed) {
1760                         LASSERT(exp->exp_flvr_adapt);
1761                         spin_unlock(&exp->exp_lock);
1762                         return 0;
1763                 }
1764
1765                 if (exp->exp_flvr_adapt) {
1766                         exp->exp_flvr_adapt = 0;
1767                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
1768                                exp, exp->exp_flvr.sf_rpc,
1769                                exp->exp_flvr_old[0].sf_rpc,
1770                                exp->exp_flvr_old[1].sf_rpc);
1771                         flavor = exp->exp_flvr;
1772                         spin_unlock(&exp->exp_lock);
1773
1774                         return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1775                                                         req->rq_svc_ctx,
1776                                                         &flavor);
1777                 } else {
1778                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
1779                                "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
1780                                exp->exp_flvr_old[0].sf_rpc,
1781                                exp->exp_flvr_old[1].sf_rpc);
1782                         spin_unlock(&exp->exp_lock);
1783
1784                         return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
1785                                                            req->rq_svc_ctx);
1786                 }
1787         }
1788
1789         if (exp->exp_flvr_expire[0]) {
1790                 if (exp->exp_flvr_expire[0] >= cfs_time_current_sec()) {
1791                         if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
1792                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1793                                        "middle one ("CFS_DURATION_T")\n", exp,
1794                                        exp->exp_flvr.sf_rpc,
1795                                        exp->exp_flvr_old[0].sf_rpc,
1796                                        exp->exp_flvr_old[1].sf_rpc,
1797                                        exp->exp_flvr_expire[0] -
1798                                                 cfs_time_current_sec());
1799                                 spin_unlock(&exp->exp_lock);
1800                                 return 0;
1801                         }
1802                 } else {
1803                         CDEBUG(D_SEC, "mark middle expired\n");
1804                         exp->exp_flvr_expire[0] = 0;
1805                 }
1806                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
1807                        exp->exp_flvr.sf_rpc,
1808                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1809                        req->rq_flvr.sf_rpc);
1810         }
1811
1812         /* now it doesn't match the current flavor, the only chance we can
1813          * accept it is match the old flavors which is not expired. */
1814         if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
1815                 if (exp->exp_flvr_expire[1] >= cfs_time_current_sec()) {
1816                         if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
1817                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1818                                        "oldest one ("CFS_DURATION_T")\n", exp,
1819                                        exp->exp_flvr.sf_rpc,
1820                                        exp->exp_flvr_old[0].sf_rpc,
1821                                        exp->exp_flvr_old[1].sf_rpc,
1822                                        exp->exp_flvr_expire[1] -
1823                                                 cfs_time_current_sec());
1824                                 spin_unlock(&exp->exp_lock);
1825                                 return 0;
1826                         }
1827                 } else {
1828                         CDEBUG(D_SEC, "mark oldest expired\n");
1829                         exp->exp_flvr_expire[1] = 0;
1830                 }
1831                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
1832                        exp, exp->exp_flvr.sf_rpc,
1833                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1834                        req->rq_flvr.sf_rpc);
1835         } else {
1836                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
1837                        exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
1838                        exp->exp_flvr_old[1].sf_rpc);
1839         }
1840
1841         spin_unlock(&exp->exp_lock);
1842
1843         CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u) with "
1844               "unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
1845               exp, exp->exp_obd->obd_name,
1846               req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
1847               req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_flvr.sf_rpc,
1848               exp->exp_flvr.sf_rpc,
1849               exp->exp_flvr_old[0].sf_rpc,
1850               exp->exp_flvr_expire[0] ?
1851               (unsigned long) (exp->exp_flvr_expire[0] -
1852                                cfs_time_current_sec()) : 0,
1853               exp->exp_flvr_old[1].sf_rpc,
1854               exp->exp_flvr_expire[1] ?
1855               (unsigned long) (exp->exp_flvr_expire[1] -
1856                                cfs_time_current_sec()) : 0);
1857         return -EACCES;
1858 }
1859 EXPORT_SYMBOL(sptlrpc_target_export_check);
1860
1861 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
1862                                       struct sptlrpc_rule_set *rset)
1863 {
1864         struct obd_export       *exp;
1865         struct sptlrpc_flavor    new_flvr;
1866
1867         LASSERT(obd);
1868
1869         spin_lock(&obd->obd_dev_lock);
1870
1871         list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
1872                 if (exp->exp_connection == NULL)
1873                         continue;
1874
1875                 /* note if this export had just been updated flavor
1876                  * (exp_flvr_changed == 1), this will override the
1877                  * previous one. */
1878                 spin_lock(&exp->exp_lock);
1879                 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
1880                                              exp->exp_connection->c_peer.nid,
1881                                              &new_flvr);
1882                 if (exp->exp_flvr_changed ||
1883                     !flavor_equal(&new_flvr, &exp->exp_flvr)) {
1884                         exp->exp_flvr_old[1] = new_flvr;
1885                         exp->exp_flvr_expire[1] = 0;
1886                         exp->exp_flvr_changed = 1;
1887                         exp->exp_flvr_adapt = 1;
1888
1889                         CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
1890                                exp, sptlrpc_part2name(exp->exp_sp_peer),
1891                                exp->exp_flvr.sf_rpc,
1892                                exp->exp_flvr_old[1].sf_rpc);
1893                 }
1894                 spin_unlock(&exp->exp_lock);
1895         }
1896
1897         spin_unlock(&obd->obd_dev_lock);
1898 }
1899 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
1900
1901 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
1902 {
1903         if (svc_rc == SECSVC_DROP)
1904                 return SECSVC_DROP;
1905
1906         switch (req->rq_sp_from) {
1907         case LUSTRE_SP_CLI:
1908         case LUSTRE_SP_MDT:
1909         case LUSTRE_SP_OST:
1910         case LUSTRE_SP_MGC:
1911         case LUSTRE_SP_MGS:
1912         case LUSTRE_SP_ANY:
1913                 break;
1914         default:
1915                 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
1916                 return SECSVC_DROP;
1917         }
1918
1919         if (!req->rq_auth_gss)
1920                 return svc_rc;
1921
1922         if (unlikely(req->rq_sp_from == LUSTRE_SP_ANY)) {
1923                 CERROR("not specific part\n");
1924                 return SECSVC_DROP;
1925         }
1926
1927         /* from MDT, must be authenticated as MDT */
1928         if (unlikely(req->rq_sp_from == LUSTRE_SP_MDT &&
1929                      !req->rq_auth_usr_mdt)) {
1930                 DEBUG_REQ(D_ERROR, req, "fake source MDT");
1931                 return SECSVC_DROP;
1932         }
1933
1934         /* from OST, must be callback to MDT and CLI, the reverse sec
1935          * was from mdt/root keytab, so it should be MDT or root FIXME */
1936         if (unlikely(req->rq_sp_from == LUSTRE_SP_OST &&
1937                      !req->rq_auth_usr_mdt && !req->rq_auth_usr_root)) {
1938                 DEBUG_REQ(D_ERROR, req, "fake source OST");
1939                 return SECSVC_DROP;
1940         }
1941
1942         return svc_rc;
1943 }
1944
1945 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
1946 {
1947         struct ptlrpc_sec_policy *policy;
1948         struct lustre_msg        *msg = req->rq_reqbuf;
1949         int                       rc;
1950         ENTRY;
1951
1952         LASSERT(msg);
1953         LASSERT(req->rq_reqmsg == NULL);
1954         LASSERT(req->rq_repmsg == NULL);
1955         LASSERT(req->rq_svc_ctx == NULL);
1956
1957         req->rq_req_swab_mask = 0;
1958
1959         rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
1960         switch (rc) {
1961         case 1:
1962                 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1963         case 0:
1964                 break;
1965         default:
1966                 CERROR("error unpacking request from %s x"LPU64"\n",
1967                        libcfs_id2str(req->rq_peer), req->rq_xid);
1968                 RETURN(SECSVC_DROP);
1969         }
1970
1971         req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
1972         req->rq_sp_from = LUSTRE_SP_ANY;
1973         req->rq_auth_uid = INVALID_UID;
1974         req->rq_auth_mapped_uid = INVALID_UID;
1975
1976         policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
1977         if (!policy) {
1978                 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
1979                 RETURN(SECSVC_DROP);
1980         }
1981
1982         LASSERT(policy->sp_sops->accept);
1983         rc = policy->sp_sops->accept(req);
1984         sptlrpc_policy_put(policy);
1985         LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
1986         LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
1987
1988         /*
1989          * if it's not null flavor (which means embedded packing msg),
1990          * reset the swab mask for the comming inner msg unpacking.
1991          */
1992         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
1993                 req->rq_req_swab_mask = 0;
1994
1995         /* sanity check for the request source */
1996         rc = sptlrpc_svc_check_from(req, rc);
1997         RETURN(rc);
1998 }
1999
2000 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req,
2001                          int msglen)
2002 {
2003         struct ptlrpc_sec_policy *policy;
2004         struct ptlrpc_reply_state *rs;
2005         int rc;
2006         ENTRY;
2007
2008         LASSERT(req->rq_svc_ctx);
2009         LASSERT(req->rq_svc_ctx->sc_policy);
2010
2011         policy = req->rq_svc_ctx->sc_policy;
2012         LASSERT(policy->sp_sops->alloc_rs);
2013
2014         rc = policy->sp_sops->alloc_rs(req, msglen);
2015         if (unlikely(rc == -ENOMEM)) {
2016                 /* failed alloc, try emergency pool */
2017                 rs = lustre_get_emerg_rs(req->rq_rqbd->rqbd_service);
2018                 if (rs == NULL)
2019                         RETURN(-ENOMEM);
2020
2021                 req->rq_reply_state = rs;
2022                 rc = policy->sp_sops->alloc_rs(req, msglen);
2023                 if (rc) {
2024                         lustre_put_emerg_rs(rs);
2025                         req->rq_reply_state = NULL;
2026                 }
2027         }
2028
2029         LASSERT(rc != 0 ||
2030                 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2031
2032         RETURN(rc);
2033 }
2034
2035 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2036 {
2037         struct ptlrpc_sec_policy *policy;
2038         int rc;
2039         ENTRY;
2040
2041         LASSERT(req->rq_svc_ctx);
2042         LASSERT(req->rq_svc_ctx->sc_policy);
2043
2044         policy = req->rq_svc_ctx->sc_policy;
2045         LASSERT(policy->sp_sops->authorize);
2046
2047         rc = policy->sp_sops->authorize(req);
2048         LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2049
2050         RETURN(rc);
2051 }
2052
2053 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2054 {
2055         struct ptlrpc_sec_policy *policy;
2056         unsigned int prealloc;
2057         ENTRY;
2058
2059         LASSERT(rs->rs_svc_ctx);
2060         LASSERT(rs->rs_svc_ctx->sc_policy);
2061
2062         policy = rs->rs_svc_ctx->sc_policy;
2063         LASSERT(policy->sp_sops->free_rs);
2064
2065         prealloc = rs->rs_prealloc;
2066         policy->sp_sops->free_rs(rs);
2067
2068         if (prealloc)
2069                 lustre_put_emerg_rs(rs);
2070         EXIT;
2071 }
2072
2073 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2074 {
2075         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2076
2077         if (ctx == NULL)
2078                 return;
2079
2080         LASSERT(atomic_read(&ctx->sc_refcount) > 0);
2081         atomic_inc(&ctx->sc_refcount);
2082 }
2083
2084 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2085 {
2086         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2087
2088         if (ctx == NULL)
2089                 return;
2090
2091         LASSERT(atomic_read(&ctx->sc_refcount) > 0);
2092         if (atomic_dec_and_test(&ctx->sc_refcount)) {
2093                 if (ctx->sc_policy->sp_sops->free_ctx)
2094                         ctx->sc_policy->sp_sops->free_ctx(ctx);
2095         }
2096         req->rq_svc_ctx = NULL;
2097 }
2098
2099 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
2100 {
2101         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2102
2103         if (ctx == NULL)
2104                 return;
2105
2106         LASSERT(atomic_read(&ctx->sc_refcount) > 0);
2107         if (ctx->sc_policy->sp_sops->invalidate_ctx)
2108                 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
2109 }
2110 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
2111
2112 /****************************************
2113  * bulk security                        *
2114  ****************************************/
2115
2116 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2117                           struct ptlrpc_bulk_desc *desc)
2118 {
2119         struct ptlrpc_cli_ctx *ctx;
2120
2121         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2122
2123         if (!req->rq_pack_bulk)
2124                 return 0;
2125
2126         ctx = req->rq_cli_ctx;
2127         if (ctx->cc_ops->wrap_bulk)
2128                 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2129         return 0;
2130 }
2131 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2132
2133 /*
2134  * return nob of actual plain text size received, or error code.
2135  */
2136 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2137                                  struct ptlrpc_bulk_desc *desc,
2138                                  int nob)
2139 {
2140         struct ptlrpc_cli_ctx  *ctx;
2141         int                     rc;
2142
2143         LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2144
2145         if (!req->rq_pack_bulk)
2146                 return desc->bd_nob_transferred;
2147
2148         ctx = req->rq_cli_ctx;
2149         if (ctx->cc_ops->unwrap_bulk) {
2150                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2151                 if (rc < 0)
2152                         return rc;
2153         }
2154         return desc->bd_nob_transferred;
2155 }
2156 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2157
2158 /*
2159  * return 0 for success or error code.
2160  */
2161 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2162                                   struct ptlrpc_bulk_desc *desc)
2163 {
2164         struct ptlrpc_cli_ctx  *ctx;
2165         int                     rc;
2166
2167         LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2168
2169         if (!req->rq_pack_bulk)
2170                 return 0;
2171
2172         ctx = req->rq_cli_ctx;
2173         if (ctx->cc_ops->unwrap_bulk) {
2174                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2175                 if (rc < 0)
2176                         return rc;
2177         }
2178
2179         /*
2180          * if everything is going right, nob should equals to nob_transferred.
2181          * in case of privacy mode, nob_transferred needs to be adjusted.
2182          */
2183         if (desc->bd_nob != desc->bd_nob_transferred) {
2184                 CERROR("nob %d doesn't match transferred nob %d",
2185                        desc->bd_nob, desc->bd_nob_transferred);
2186                 return -EPROTO;
2187         }
2188
2189         return 0;
2190 }
2191 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2192
2193 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
2194                           struct ptlrpc_bulk_desc *desc)
2195 {
2196         struct ptlrpc_svc_ctx *ctx;
2197
2198         LASSERT(req->rq_bulk_read);
2199
2200         if (!req->rq_pack_bulk)
2201                 return 0;
2202
2203         ctx = req->rq_svc_ctx;
2204         if (ctx->sc_policy->sp_sops->wrap_bulk)
2205                 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2206
2207         return 0;
2208 }
2209 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2210
2211 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2212                             struct ptlrpc_bulk_desc *desc)
2213 {
2214         struct ptlrpc_svc_ctx *ctx;
2215         int                    rc;
2216
2217         LASSERT(req->rq_bulk_write);
2218
2219         /*
2220          * if it's in privacy mode, transferred should >= expected; otherwise
2221          * transferred should == expected.
2222          */
2223         if (desc->bd_nob_transferred < desc->bd_nob ||
2224             (desc->bd_nob_transferred > desc->bd_nob &&
2225              SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
2226              SPTLRPC_BULK_SVC_PRIV)) {
2227                 DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
2228                           desc->bd_nob_transferred, desc->bd_nob);
2229                 return -ETIMEDOUT;
2230         }
2231
2232         if (!req->rq_pack_bulk)
2233                 return 0;
2234
2235         ctx = req->rq_svc_ctx;
2236         if (ctx->sc_policy->sp_sops->unwrap_bulk) {
2237                 rc = ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2238                 if (rc)
2239                         CERROR("error unwrap bulk: %d\n", rc);
2240         }
2241
2242         /* return 0 to allow reply be sent */
2243         return 0;
2244 }
2245 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2246
2247 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
2248                           struct ptlrpc_bulk_desc *desc)
2249 {
2250         struct ptlrpc_svc_ctx *ctx;
2251
2252         LASSERT(req->rq_bulk_write);
2253
2254         if (!req->rq_pack_bulk)
2255                 return 0;
2256
2257         ctx = req->rq_svc_ctx;
2258         if (ctx->sc_policy->sp_sops->prep_bulk)
2259                 return ctx->sc_policy->sp_sops->prep_bulk(req, desc);
2260
2261         return 0;
2262 }
2263 EXPORT_SYMBOL(sptlrpc_svc_prep_bulk);
2264
2265 /****************************************
2266  * user descriptor helpers              *
2267  ****************************************/
2268
2269 int sptlrpc_current_user_desc_size(void)
2270 {
2271         int ngroups;
2272
2273 #ifdef __KERNEL__
2274         ngroups = current_ngroups;
2275
2276         if (ngroups > LUSTRE_MAX_GROUPS)
2277                 ngroups = LUSTRE_MAX_GROUPS;
2278 #else
2279         ngroups = 0;
2280 #endif
2281         return sptlrpc_user_desc_size(ngroups);
2282 }
2283 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2284
2285 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2286 {
2287         struct ptlrpc_user_desc *pud;
2288
2289         pud = lustre_msg_buf(msg, offset, 0);
2290
2291         pud->pud_uid = cfs_current()->uid;
2292         pud->pud_gid = cfs_current()->gid;
2293         pud->pud_fsuid = cfs_current()->fsuid;
2294         pud->pud_fsgid = cfs_current()->fsgid;
2295         pud->pud_cap = cfs_curproc_cap_pack();
2296         pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2297
2298 #ifdef __KERNEL__
2299         task_lock(current);
2300         if (pud->pud_ngroups > current_ngroups)
2301                 pud->pud_ngroups = current_ngroups;
2302         memcpy(pud->pud_groups, cfs_current()->group_info->blocks[0],
2303                pud->pud_ngroups * sizeof(__u32));
2304         task_unlock(current);
2305 #endif
2306
2307         return 0;
2308 }
2309 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2310
2311 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2312 {
2313         struct ptlrpc_user_desc *pud;
2314         int                      i;
2315
2316         pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2317         if (!pud)
2318                 return -EINVAL;
2319
2320         if (swabbed) {
2321                 __swab32s(&pud->pud_uid);
2322                 __swab32s(&pud->pud_gid);
2323                 __swab32s(&pud->pud_fsuid);
2324                 __swab32s(&pud->pud_fsgid);
2325                 __swab32s(&pud->pud_cap);
2326                 __swab32s(&pud->pud_ngroups);
2327         }
2328
2329         if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2330                 CERROR("%u groups is too large\n", pud->pud_ngroups);
2331                 return -EINVAL;
2332         }
2333
2334         if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2335             msg->lm_buflens[offset]) {
2336                 CERROR("%u groups are claimed but bufsize only %u\n",
2337                        pud->pud_ngroups, msg->lm_buflens[offset]);
2338                 return -EINVAL;
2339         }
2340
2341         if (swabbed) {
2342                 for (i = 0; i < pud->pud_ngroups; i++)
2343                         __swab32s(&pud->pud_groups[i]);
2344         }
2345
2346         return 0;
2347 }
2348 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2349
2350 /****************************************
2351  * misc helpers                         *
2352  ****************************************/
2353
2354 const char * sec2target_str(struct ptlrpc_sec *sec)
2355 {
2356         if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2357                 return "*";
2358         if (sec_is_reverse(sec))
2359                 return "c";
2360         return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2361 }
2362 EXPORT_SYMBOL(sec2target_str);
2363
2364 /*
2365  * return true if the bulk data is protected
2366  */
2367 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
2368 {
2369         switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2370         case SPTLRPC_BULK_SVC_INTG:
2371         case SPTLRPC_BULK_SVC_PRIV:
2372                 return 1;
2373         default:
2374                 return 0;
2375         }
2376 }
2377 EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2378
2379 /****************************************
2380  * crypto API helper/alloc blkciper     *
2381  ****************************************/
2382
2383 /****************************************
2384  * initialize/finalize                  *
2385  ****************************************/
2386
2387 int __init sptlrpc_init(void)
2388 {
2389         int rc;
2390
2391         rwlock_init(&policy_lock);
2392
2393         rc = sptlrpc_gc_init();
2394         if (rc)
2395                 goto out;
2396
2397         rc = sptlrpc_conf_init();
2398         if (rc)
2399                 goto out_gc;
2400
2401         rc = sptlrpc_enc_pool_init();
2402         if (rc)
2403                 goto out_conf;
2404
2405         rc = sptlrpc_null_init();
2406         if (rc)
2407                 goto out_pool;
2408
2409         rc = sptlrpc_plain_init();
2410         if (rc)
2411                 goto out_null;
2412
2413         rc = sptlrpc_lproc_init();
2414         if (rc)
2415                 goto out_plain;
2416
2417         return 0;
2418
2419 out_plain:
2420         sptlrpc_plain_fini();
2421 out_null:
2422         sptlrpc_null_fini();
2423 out_pool:
2424         sptlrpc_enc_pool_fini();
2425 out_conf:
2426         sptlrpc_conf_fini();
2427 out_gc:
2428         sptlrpc_gc_fini();
2429 out:
2430         return rc;
2431 }
2432
2433 void __exit sptlrpc_fini(void)
2434 {
2435         sptlrpc_lproc_fini();
2436         sptlrpc_plain_fini();
2437         sptlrpc_null_fini();
2438         sptlrpc_enc_pool_fini();
2439         sptlrpc_conf_fini();
2440         sptlrpc_gc_fini();
2441 }