Whamcloud - gitweb
b=21877 protect modification of request flag's bitfield with rq_lock
[fs/lustre-release.git] / lustre / ptlrpc / sec.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright  2008 Sun Microsystems, Inc. All rights reserved
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/sec.c
37  *
38  * Author: Eric Mei <ericm@clusterfs.com>
39  */
40
41 #ifndef EXPORT_SYMTAB
42 #define EXPORT_SYMTAB
43 #endif
44 #define DEBUG_SUBSYSTEM S_SEC
45
46 #include <libcfs/libcfs.h>
47 #ifndef __KERNEL__
48 #include <liblustre.h>
49 #include <libcfs/list.h>
50 #else
51 #include <linux/crypto.h>
52 #include <linux/key.h>
53 #endif
54
55 #include <obd.h>
56 #include <obd_class.h>
57 #include <obd_support.h>
58 #include <lustre_net.h>
59 #include <lustre_import.h>
60 #include <lustre_dlm.h>
61 #include <lustre_sec.h>
62
63 #include "ptlrpc_internal.h"
64
65 /***********************************************
66  * policy registers                            *
67  ***********************************************/
68
69 static cfs_rwlock_t policy_lock;
70 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
71         NULL,
72 };
73
74 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
75 {
76         __u16 number = policy->sp_policy;
77
78         LASSERT(policy->sp_name);
79         LASSERT(policy->sp_cops);
80         LASSERT(policy->sp_sops);
81
82         if (number >= SPTLRPC_POLICY_MAX)
83                 return -EINVAL;
84
85         cfs_write_lock(&policy_lock);
86         if (unlikely(policies[number])) {
87                 cfs_write_unlock(&policy_lock);
88                 return -EALREADY;
89         }
90         policies[number] = policy;
91         cfs_write_unlock(&policy_lock);
92
93         CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
94         return 0;
95 }
96 EXPORT_SYMBOL(sptlrpc_register_policy);
97
98 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
99 {
100         __u16 number = policy->sp_policy;
101
102         LASSERT(number < SPTLRPC_POLICY_MAX);
103
104         cfs_write_lock(&policy_lock);
105         if (unlikely(policies[number] == NULL)) {
106                 cfs_write_unlock(&policy_lock);
107                 CERROR("%s: already unregistered\n", policy->sp_name);
108                 return -EINVAL;
109         }
110
111         LASSERT(policies[number] == policy);
112         policies[number] = NULL;
113         cfs_write_unlock(&policy_lock);
114
115         CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
116         return 0;
117 }
118 EXPORT_SYMBOL(sptlrpc_unregister_policy);
119
120 static
121 struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
122 {
123         static CFS_DECLARE_MUTEX(load_mutex);
124         static cfs_atomic_t       loaded = CFS_ATOMIC_INIT(0);
125         struct ptlrpc_sec_policy *policy;
126         __u16                     number = SPTLRPC_FLVR_POLICY(flavor);
127         __u16                     flag = 0;
128
129         if (number >= SPTLRPC_POLICY_MAX)
130                 return NULL;
131
132         while (1) {
133                 cfs_read_lock(&policy_lock);
134                 policy = policies[number];
135                 if (policy && !cfs_try_module_get(policy->sp_owner))
136                         policy = NULL;
137                 if (policy == NULL)
138                         flag = cfs_atomic_read(&loaded);
139                 cfs_read_unlock(&policy_lock);
140
141                 if (policy != NULL || flag != 0 ||
142                     number != SPTLRPC_POLICY_GSS)
143                         break;
144
145                 /* try to load gss module, once */
146                 cfs_mutex_down(&load_mutex);
147                 if (cfs_atomic_read(&loaded) == 0) {
148                         if (cfs_request_module("ptlrpc_gss") == 0)
149                                 CWARN("module ptlrpc_gss loaded on demand\n");
150                         else
151                                 CERROR("Unable to load module ptlrpc_gss\n");
152
153                         cfs_atomic_set(&loaded, 1);
154                 }
155                 cfs_mutex_up(&load_mutex);
156         }
157
158         return policy;
159 }
160
161 __u32 sptlrpc_name2flavor_base(const char *name)
162 {
163         if (!strcmp(name, "null"))
164                 return SPTLRPC_FLVR_NULL;
165         if (!strcmp(name, "plain"))
166                 return SPTLRPC_FLVR_PLAIN;
167         if (!strcmp(name, "krb5n"))
168                 return SPTLRPC_FLVR_KRB5N;
169         if (!strcmp(name, "krb5a"))
170                 return SPTLRPC_FLVR_KRB5A;
171         if (!strcmp(name, "krb5i"))
172                 return SPTLRPC_FLVR_KRB5I;
173         if (!strcmp(name, "krb5p"))
174                 return SPTLRPC_FLVR_KRB5P;
175
176         return SPTLRPC_FLVR_INVALID;
177 }
178 EXPORT_SYMBOL(sptlrpc_name2flavor_base);
179
180 const char *sptlrpc_flavor2name_base(__u32 flvr)
181 {
182         __u32   base = SPTLRPC_FLVR_BASE(flvr);
183
184         if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
185                 return "null";
186         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
187                 return "plain";
188         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
189                 return "krb5n";
190         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
191                 return "krb5a";
192         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
193                 return "krb5i";
194         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
195                 return "krb5p";
196
197         CERROR("invalid wire flavor 0x%x\n", flvr);
198         return "invalid";
199 }
200 EXPORT_SYMBOL(sptlrpc_flavor2name_base);
201
202 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
203                                char *buf, int bufsize)
204 {
205         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
206                 snprintf(buf, bufsize, "hash:%s",
207                          sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
208         else
209                 snprintf(buf, bufsize, "%s",
210                          sptlrpc_flavor2name_base(sf->sf_rpc));
211
212         buf[bufsize - 1] = '\0';
213         return buf;
214 }
215 EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
216
217 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
218 {
219         snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
220
221         /*
222          * currently we don't support customized bulk specification for
223          * flavors other than plain
224          */
225         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
226                 char bspec[16];
227
228                 bspec[0] = '-';
229                 sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
230                 strncat(buf, bspec, bufsize);
231         }
232
233         buf[bufsize - 1] = '\0';
234         return buf;
235 }
236 EXPORT_SYMBOL(sptlrpc_flavor2name);
237
238 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
239 {
240         buf[0] = '\0';
241
242         if (flags & PTLRPC_SEC_FL_REVERSE)
243                 strncat(buf, "reverse,", bufsize);
244         if (flags & PTLRPC_SEC_FL_ROOTONLY)
245                 strncat(buf, "rootonly,", bufsize);
246         if (flags & PTLRPC_SEC_FL_UDESC)
247                 strncat(buf, "udesc,", bufsize);
248         if (flags & PTLRPC_SEC_FL_BULK)
249                 strncat(buf, "bulk,", bufsize);
250         if (buf[0] == '\0')
251                 strncat(buf, "-,", bufsize);
252
253         buf[bufsize - 1] = '\0';
254         return buf;
255 }
256 EXPORT_SYMBOL(sptlrpc_secflags2str);
257
258 /**************************************************
259  * client context APIs                            *
260  **************************************************/
261
262 static
263 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
264 {
265         struct vfs_cred vcred;
266         int create = 1, remove_dead = 1;
267
268         LASSERT(sec);
269         LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
270
271         if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
272                                      PTLRPC_SEC_FL_ROOTONLY)) {
273                 vcred.vc_uid = 0;
274                 vcred.vc_gid = 0;
275                 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
276                         create = 0;
277                         remove_dead = 0;
278                 }
279         } else {
280                 vcred.vc_uid = cfs_curproc_uid();
281                 vcred.vc_gid = cfs_curproc_gid();
282         }
283
284         return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred,
285                                                    create, remove_dead);
286 }
287
288 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
289 {
290         LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
291         cfs_atomic_inc(&ctx->cc_refcount);
292         return ctx;
293 }
294 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
295
296 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
297 {
298         struct ptlrpc_sec *sec = ctx->cc_sec;
299
300         LASSERT(sec);
301         LASSERT(cfs_atomic_read(&ctx->cc_refcount));
302
303         if (!cfs_atomic_dec_and_test(&ctx->cc_refcount))
304                 return;
305
306         sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
307 }
308 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
309
310 /*
311  * expire the context immediately.
312  * the caller must hold at least 1 ref on the ctx.
313  */
314 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
315 {
316         LASSERT(ctx->cc_ops->die);
317         ctx->cc_ops->die(ctx, 0);
318 }
319 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
320
321 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
322 {
323         struct ptlrpc_request *req, *next;
324
325         cfs_spin_lock(&ctx->cc_lock);
326         cfs_list_for_each_entry_safe(req, next, &ctx->cc_req_list,
327                                      rq_ctx_chain) {
328                 cfs_list_del_init(&req->rq_ctx_chain);
329                 ptlrpc_client_wake_req(req);
330         }
331         cfs_spin_unlock(&ctx->cc_lock);
332 }
333 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
334
335 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
336 {
337         LASSERT(ctx->cc_ops);
338
339         if (ctx->cc_ops->display == NULL)
340                 return 0;
341
342         return ctx->cc_ops->display(ctx, buf, bufsize);
343 }
344
345 static int import_sec_check_expire(struct obd_import *imp)
346 {
347         int     adapt = 0;
348
349         cfs_spin_lock(&imp->imp_lock);
350         if (imp->imp_sec_expire &&
351             imp->imp_sec_expire < cfs_time_current_sec()) {
352                 adapt = 1;
353                 imp->imp_sec_expire = 0;
354         }
355         cfs_spin_unlock(&imp->imp_lock);
356
357         if (!adapt)
358                 return 0;
359
360         CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
361         return sptlrpc_import_sec_adapt(imp, NULL, 0);
362 }
363
364 static int import_sec_validate_get(struct obd_import *imp,
365                                    struct ptlrpc_sec **sec)
366 {
367         int     rc;
368
369         if (unlikely(imp->imp_sec_expire)) {
370                 rc = import_sec_check_expire(imp);
371                 if (rc)
372                         return rc;
373         }
374
375         *sec = sptlrpc_import_sec_ref(imp);
376         if (*sec == NULL) {
377                 CERROR("import %p (%s) with no sec\n",
378                        imp, ptlrpc_import_state_name(imp->imp_state));
379                 return -EACCES;
380         }
381
382         if (unlikely((*sec)->ps_dying)) {
383                 CERROR("attempt to use dying sec %p\n", sec);
384                 sptlrpc_sec_put(*sec);
385                 return -EACCES;
386         }
387
388         return 0;
389 }
390
391 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
392 {
393         struct obd_import *imp = req->rq_import;
394         struct ptlrpc_sec *sec;
395         int                rc;
396         ENTRY;
397
398         LASSERT(!req->rq_cli_ctx);
399         LASSERT(imp);
400
401         rc = import_sec_validate_get(imp, &sec);
402         if (rc)
403                 RETURN(rc);
404
405         req->rq_cli_ctx = get_my_ctx(sec);
406
407         sptlrpc_sec_put(sec);
408
409         if (!req->rq_cli_ctx) {
410                 CERROR("req %p: fail to get context\n", req);
411                 RETURN(-ENOMEM);
412         }
413
414         RETURN(0);
415 }
416
417 /*
418  * if @sync == 0, this function should return quickly without sleep;
419  * otherwise might trigger ctx destroying rpc to server.
420  */
421 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
422 {
423         ENTRY;
424
425         LASSERT(req);
426         LASSERT(req->rq_cli_ctx);
427
428         /* request might be asked to release earlier while still
429          * in the context waiting list.
430          */
431         if (!cfs_list_empty(&req->rq_ctx_chain)) {
432                 cfs_spin_lock(&req->rq_cli_ctx->cc_lock);
433                 cfs_list_del_init(&req->rq_ctx_chain);
434                 cfs_spin_unlock(&req->rq_cli_ctx->cc_lock);
435         }
436
437         sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
438         req->rq_cli_ctx = NULL;
439         EXIT;
440 }
441
442 static
443 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
444                            struct ptlrpc_cli_ctx *oldctx,
445                            struct ptlrpc_cli_ctx *newctx)
446 {
447         struct sptlrpc_flavor   old_flvr;
448         char                   *reqmsg;
449         int                     reqmsg_size;
450         int                     rc;
451
452         LASSERT(req->rq_reqmsg);
453         LASSERT(req->rq_reqlen);
454         LASSERT(req->rq_replen);
455
456         CWARN("req %p: switch ctx %p(%u->%s) -> %p(%u->%s), "
457               "switch sec %p(%s) -> %p(%s)\n", req,
458               oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec),
459               newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec),
460               oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
461               newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name);
462
463         /* save flavor */
464         old_flvr = req->rq_flvr;
465
466         /* save request message */
467         reqmsg_size = req->rq_reqlen;
468         OBD_ALLOC(reqmsg, reqmsg_size);
469         if (reqmsg == NULL)
470                 return -ENOMEM;
471         memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
472
473         /* release old req/rep buf */
474         req->rq_cli_ctx = oldctx;
475         sptlrpc_cli_free_reqbuf(req);
476         sptlrpc_cli_free_repbuf(req);
477         req->rq_cli_ctx = newctx;
478
479         /* recalculate the flavor */
480         sptlrpc_req_set_flavor(req, 0);
481
482         /* alloc new request buffer
483          * we don't need to alloc reply buffer here, leave it to the
484          * rest procedure of ptlrpc
485          */
486         rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
487         if (!rc) {
488                 LASSERT(req->rq_reqmsg);
489                 memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
490         } else {
491                 CWARN("failed to alloc reqbuf: %d\n", rc);
492                 req->rq_flvr = old_flvr;
493         }
494
495         OBD_FREE(reqmsg, reqmsg_size);
496         return rc;
497 }
498
499 /**
500  * if current context has died, or if we resend after flavor switched,
501  * call this func to switch context. if no switch is needed, request
502  * will end up with the same context.
503  *
504  * request must have a context. in any case of failure, restore the
505  * restore the old one - a request must have a context.
506  */
507 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
508 {
509         struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
510         struct ptlrpc_cli_ctx *newctx;
511         int                    rc;
512         ENTRY;
513
514         LASSERT(oldctx);
515
516         sptlrpc_cli_ctx_get(oldctx);
517         sptlrpc_req_put_ctx(req, 0);
518
519         rc = sptlrpc_req_get_ctx(req);
520         if (unlikely(rc)) {
521                 LASSERT(!req->rq_cli_ctx);
522
523                 /* restore old ctx */
524                 req->rq_cli_ctx = oldctx;
525                 RETURN(rc);
526         }
527
528         newctx = req->rq_cli_ctx;
529         LASSERT(newctx);
530
531         if (unlikely(newctx == oldctx && 
532                      cfs_test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
533                 /*
534                  * still get the old dead ctx, usually means system too busy
535                  */
536                 CWARN("ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
537                       newctx, newctx->cc_flags);
538
539                 cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
540                                                    CFS_HZ);
541         } else {
542                 /*
543                  * it's possible newctx == oldctx if we're switching
544                  * subflavor with the same sec.
545                  */
546                 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
547                 if (rc) {
548                         /* restore old ctx */
549                         sptlrpc_req_put_ctx(req, 0);
550                         req->rq_cli_ctx = oldctx;
551                         RETURN(rc);
552                 }
553
554                 LASSERT(req->rq_cli_ctx == newctx);
555         }
556
557         sptlrpc_cli_ctx_put(oldctx, 1);
558         RETURN(0);
559 }
560 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
561
562 static
563 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
564 {
565         if (cli_ctx_is_refreshed(ctx))
566                 return 1;
567         return 0;
568 }
569
570 static
571 int ctx_refresh_timeout(void *data)
572 {
573         struct ptlrpc_request *req = data;
574         int rc;
575
576         /* conn_cnt is needed in expire_one_request */
577         lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
578
579         rc = ptlrpc_expire_one_request(req, 1);
580         /* if we started recovery, we should mark this ctx dead; otherwise
581          * in case of lgssd died nobody would retire this ctx, following
582          * connecting will still find the same ctx thus cause deadlock.
583          * there's an assumption that expire time of the request should be
584          * later than the context refresh expire time.
585          */
586         if (rc == 0)
587                 req->rq_cli_ctx->cc_ops->die(req->rq_cli_ctx, 0);
588         return rc;
589 }
590
591 static
592 void ctx_refresh_interrupt(void *data)
593 {
594         struct ptlrpc_request *req = data;
595
596         cfs_spin_lock(&req->rq_lock);
597         req->rq_intr = 1;
598         cfs_spin_unlock(&req->rq_lock);
599 }
600
601 static
602 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
603 {
604         cfs_spin_lock(&ctx->cc_lock);
605         if (!cfs_list_empty(&req->rq_ctx_chain))
606                 cfs_list_del_init(&req->rq_ctx_chain);
607         cfs_spin_unlock(&ctx->cc_lock);
608 }
609
610 /*
611  * the status of context could be subject to be changed by other threads at any
612  * time. we allow this race. but once we return with 0, the caller will
613  * suppose it's uptodated and keep using it until the owning rpc is done.
614  *
615  * @timeout:
616  *    < 0  - don't wait
617  *    = 0  - wait until success or fatal error occur
618  *    > 0  - timeout value
619  *
620  * return 0 only if the context is uptodated.
621  */
622 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
623 {
624         struct ptlrpc_cli_ctx  *ctx = req->rq_cli_ctx;
625         struct ptlrpc_sec      *sec;
626         struct l_wait_info      lwi;
627         int                     rc;
628         ENTRY;
629
630         LASSERT(ctx);
631
632         if (req->rq_ctx_init || req->rq_ctx_fini)
633                 RETURN(0);
634
635         /*
636          * during the process a request's context might change type even
637          * (e.g. from gss ctx to plain ctx), so each loop we need to re-check
638          * everything
639          */
640 again:
641         rc = import_sec_validate_get(req->rq_import, &sec);
642         if (rc)
643                 RETURN(rc);
644
645         if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
646                 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
647                       req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
648                 req_off_ctx_list(req, ctx);
649                 sptlrpc_req_replace_dead_ctx(req);
650                 ctx = req->rq_cli_ctx;
651         }
652         sptlrpc_sec_put(sec);
653
654         if (cli_ctx_is_eternal(ctx))
655                 RETURN(0);
656
657         if (unlikely(cfs_test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
658                 LASSERT(ctx->cc_ops->refresh);
659                 ctx->cc_ops->refresh(ctx);
660         }
661         LASSERT(cfs_test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
662
663         LASSERT(ctx->cc_ops->validate);
664         if (ctx->cc_ops->validate(ctx) == 0) {
665                 req_off_ctx_list(req, ctx);
666                 RETURN(0);
667         }
668
669         if (unlikely(cfs_test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
670                 cfs_spin_lock(&req->rq_lock);
671                 req->rq_err = 1;
672                 cfs_spin_unlock(&req->rq_lock);
673                 req_off_ctx_list(req, ctx);
674                 RETURN(-EPERM);
675         }
676
677         /* This is subtle. For resent message we have to keep original
678          * context to survive following situation:
679          *  1. the request sent to server
680          *  2. recovery was kick start
681          *  3. recovery finished, the request marked as resent
682          *  4. resend the request
683          *  5. old reply from server received (because xid is the same)
684          *  6. verify reply (has to be success)
685          *  7. new reply from server received, lnet drop it
686          *
687          * Note we can't simply change xid for resent request because
688          * server reply on it for reply reconstruction.
689          *
690          * Commonly the original context should be uptodate because we
691          * have a expiry nice time; And server will keep their half part
692          * context because we at least hold a ref of old context which
693          * prevent the context destroy RPC be sent. So server still can
694          * accept the request and finish RPC. Two cases:
695          *  1. If server side context has been trimmed, a NO_CONTEXT will
696          *     be returned, gss_cli_ctx_verify/unseal will switch to new
697          *     context by force.
698          *  2. Current context never be refreshed, then we are fine: we
699          *     never really send request with old context before.
700          */
701         if (cfs_test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
702             unlikely(req->rq_reqmsg) &&
703             lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
704                 req_off_ctx_list(req, ctx);
705                 RETURN(0);
706         }
707
708         if (unlikely(cfs_test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
709                 req_off_ctx_list(req, ctx);
710                 /*
711                  * don't switch ctx if import was deactivated
712                  */
713                 if (req->rq_import->imp_deactive) {
714                         cfs_spin_lock(&req->rq_lock);
715                         req->rq_err = 1;
716                         cfs_spin_unlock(&req->rq_lock);
717                         RETURN(-EINTR);
718                 }
719
720                 rc = sptlrpc_req_replace_dead_ctx(req);
721                 if (rc) {
722                         LASSERT(ctx == req->rq_cli_ctx);
723                         CERROR("req %p: failed to replace dead ctx %p: %d\n",
724                                 req, ctx, rc);
725                         cfs_spin_lock(&req->rq_lock);
726                         req->rq_err = 1;
727                         cfs_spin_unlock(&req->rq_lock);
728                         RETURN(rc);
729                 }
730
731                 ctx = req->rq_cli_ctx;
732                 goto again;
733         }
734
735         /* Now we're sure this context is during upcall, add myself into
736          * waiting list
737          */
738         cfs_spin_lock(&ctx->cc_lock);
739         if (cfs_list_empty(&req->rq_ctx_chain))
740                 cfs_list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
741         cfs_spin_unlock(&ctx->cc_lock);
742
743         if (timeout < 0)
744                 RETURN(-EWOULDBLOCK);
745
746         /* Clear any flags that may be present from previous sends */
747         LASSERT(req->rq_receiving_reply == 0);
748         cfs_spin_lock(&req->rq_lock);
749         req->rq_err = 0;
750         req->rq_timedout = 0;
751         req->rq_resend = 0;
752         req->rq_restart = 0;
753         cfs_spin_unlock(&req->rq_lock);
754
755         lwi = LWI_TIMEOUT_INTR(timeout * CFS_HZ, ctx_refresh_timeout,
756                                ctx_refresh_interrupt, req);
757         rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
758
759         /* following cases we could be here:
760          * - successfully refreshed;
761          * - interruptted;
762          * - timedout, and we don't want recover from the failure;
763          * - timedout, and waked up upon recovery finished;
764          * - someone else mark this ctx dead by force;
765          * - someone invalidate the req and call ptlrpc_client_wake_req(),
766          *   e.g. ptlrpc_abort_inflight();
767          */
768         if (!cli_ctx_is_refreshed(ctx)) {
769                 /* timed out or interruptted */
770                 req_off_ctx_list(req, ctx);
771
772                 LASSERT(rc != 0);
773                 RETURN(rc);
774         }
775
776         goto again;
777 }
778
779 /*
780  * Note this could be called in two situations:
781  * - new request from ptlrpc_pre_req(), with proper @opcode
782  * - old request which changed ctx in the middle, with @opcode == 0
783  */
784 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
785 {
786         struct ptlrpc_sec *sec;
787
788         LASSERT(req->rq_import);
789         LASSERT(req->rq_cli_ctx);
790         LASSERT(req->rq_cli_ctx->cc_sec);
791         LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
792
793         /* special security flags accoding to opcode */
794         switch (opcode) {
795         case OST_READ:
796         case MDS_READPAGE:
797                 req->rq_bulk_read = 1;
798                 break;
799         case OST_WRITE:
800         case MDS_WRITEPAGE:
801                 req->rq_bulk_write = 1;
802                 break;
803         case SEC_CTX_INIT:
804                 req->rq_ctx_init = 1;
805                 break;
806         case SEC_CTX_FINI:
807                 req->rq_ctx_fini = 1;
808                 break;
809         case 0:
810                 /* init/fini rpc won't be resend, so can't be here */
811                 LASSERT(req->rq_ctx_init == 0);
812                 LASSERT(req->rq_ctx_fini == 0);
813
814                 /* cleanup flags, which should be recalculated */
815                 req->rq_pack_udesc = 0;
816                 req->rq_pack_bulk = 0;
817                 break;
818         }
819
820         sec = req->rq_cli_ctx->cc_sec;
821
822         cfs_spin_lock(&sec->ps_lock);
823         req->rq_flvr = sec->ps_flvr;
824         cfs_spin_unlock(&sec->ps_lock);
825
826         /* force SVC_NULL for context initiation rpc, SVC_INTG for context
827          * destruction rpc */
828         if (unlikely(req->rq_ctx_init))
829                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
830         else if (unlikely(req->rq_ctx_fini))
831                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
832
833         /* user descriptor flag, null security can't do it anyway */
834         if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
835             (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
836                 req->rq_pack_udesc = 1;
837
838         /* bulk security flag */
839         if ((req->rq_bulk_read || req->rq_bulk_write) &&
840             sptlrpc_flavor_has_bulk(&req->rq_flvr))
841                 req->rq_pack_bulk = 1;
842 }
843
844 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
845 {
846         if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
847                 return;
848
849         LASSERT(req->rq_clrbuf);
850         if (req->rq_pool || !req->rq_reqbuf)
851                 return;
852
853         OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
854         req->rq_reqbuf = NULL;
855         req->rq_reqbuf_len = 0;
856 }
857
858 /*
859  * check whether current user have valid context for an import or not.
860  * might repeatedly try in case of non-fatal errors.
861  * return 0 on success, < 0 on failure
862  */
863 int sptlrpc_import_check_ctx(struct obd_import *imp)
864 {
865         struct ptlrpc_sec     *sec;
866         struct ptlrpc_cli_ctx *ctx;
867         struct ptlrpc_request *req = NULL;
868         int rc;
869         ENTRY;
870
871         cfs_might_sleep();
872
873         sec = sptlrpc_import_sec_ref(imp);
874         ctx = get_my_ctx(sec);
875         sptlrpc_sec_put(sec);
876
877         if (!ctx)
878                 RETURN(-ENOMEM);
879
880         if (cli_ctx_is_eternal(ctx) ||
881             ctx->cc_ops->validate(ctx) == 0) {
882                 sptlrpc_cli_ctx_put(ctx, 1);
883                 RETURN(0);
884         }
885
886         if (cli_ctx_is_error(ctx)) {
887                 sptlrpc_cli_ctx_put(ctx, 1);
888                 RETURN(-EACCES);
889         }
890
891         OBD_ALLOC_PTR(req);
892         if (!req)
893                 RETURN(-ENOMEM);
894
895         cfs_spin_lock_init(&req->rq_lock);
896         cfs_atomic_set(&req->rq_refcount, 10000);
897         CFS_INIT_LIST_HEAD(&req->rq_ctx_chain);
898         cfs_waitq_init(&req->rq_reply_waitq);
899         req->rq_import = imp;
900         req->rq_flvr = sec->ps_flvr;
901         req->rq_cli_ctx = ctx;
902
903         rc = sptlrpc_req_refresh_ctx(req, 0);
904         LASSERT(cfs_list_empty(&req->rq_ctx_chain));
905         sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
906         OBD_FREE_PTR(req);
907
908         RETURN(rc);
909 }
910
911 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
912 {
913         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
914         int rc = 0;
915         ENTRY;
916
917         LASSERT(ctx);
918         LASSERT(ctx->cc_sec);
919         LASSERT(req->rq_reqbuf || req->rq_clrbuf);
920
921         /* we wrap bulk request here because now we can be sure
922          * the context is uptodate.
923          */
924         if (req->rq_bulk) {
925                 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
926                 if (rc)
927                         RETURN(rc);
928         }
929
930         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
931         case SPTLRPC_SVC_NULL:
932         case SPTLRPC_SVC_AUTH:
933         case SPTLRPC_SVC_INTG:
934                 LASSERT(ctx->cc_ops->sign);
935                 rc = ctx->cc_ops->sign(ctx, req);
936                 break;
937         case SPTLRPC_SVC_PRIV:
938                 LASSERT(ctx->cc_ops->seal);
939                 rc = ctx->cc_ops->seal(ctx, req);
940                 break;
941         default:
942                 LBUG();
943         }
944
945         if (rc == 0) {
946                 LASSERT(req->rq_reqdata_len);
947                 LASSERT(req->rq_reqdata_len % 8 == 0);
948                 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
949         }
950
951         RETURN(rc);
952 }
953
954 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
955 {
956         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
957         int                    rc;
958         ENTRY;
959
960         LASSERT(ctx);
961         LASSERT(ctx->cc_sec);
962         LASSERT(req->rq_repbuf);
963         LASSERT(req->rq_repdata);
964         LASSERT(req->rq_repmsg == NULL);
965
966         req->rq_rep_swab_mask = 0;
967
968         rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
969         switch (rc) {
970         case 1:
971                 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
972         case 0:
973                 break;
974         default:
975                 CERROR("failed unpack reply: x"LPU64"\n", req->rq_xid);
976                 RETURN(-EPROTO);
977         }
978
979         if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
980                 CERROR("replied data length %d too small\n",
981                        req->rq_repdata_len);
982                 RETURN(-EPROTO);
983         }
984
985         if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
986             SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
987                 CERROR("reply policy %u doesn't match request policy %u\n",
988                        SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
989                        SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
990                 RETURN(-EPROTO);
991         }
992
993         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
994         case SPTLRPC_SVC_NULL:
995         case SPTLRPC_SVC_AUTH:
996         case SPTLRPC_SVC_INTG:
997                 LASSERT(ctx->cc_ops->verify);
998                 rc = ctx->cc_ops->verify(ctx, req);
999                 break;
1000         case SPTLRPC_SVC_PRIV:
1001                 LASSERT(ctx->cc_ops->unseal);
1002                 rc = ctx->cc_ops->unseal(ctx, req);
1003                 break;
1004         default:
1005                 LBUG();
1006         }
1007         LASSERT(rc || req->rq_repmsg || req->rq_resend);
1008
1009         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
1010             !req->rq_ctx_init)
1011                 req->rq_rep_swab_mask = 0;
1012         RETURN(rc);
1013 }
1014
1015 /*
1016  * upon this be called, the reply buffer should have been un-posted,
1017  * so nothing is going to change.
1018  */
1019 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1020 {
1021         LASSERT(req->rq_repbuf);
1022         LASSERT(req->rq_repdata == NULL);
1023         LASSERT(req->rq_repmsg == NULL);
1024         LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1025
1026         if (req->rq_reply_off == 0 &&
1027             (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1028                 CERROR("real reply with offset 0\n");
1029                 return -EPROTO;
1030         }
1031
1032         if (req->rq_reply_off % 8 != 0) {
1033                 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1034                 return -EPROTO;
1035         }
1036
1037         req->rq_repdata = (struct lustre_msg *)
1038                                 (req->rq_repbuf + req->rq_reply_off);
1039         req->rq_repdata_len = req->rq_nob_received;
1040
1041         return do_cli_unwrap_reply(req);
1042 }
1043
1044 /**
1045  * Upon called, the receive buffer might be still posted, so the reply data
1046  * might be changed at any time, no matter we're holding rq_lock or not. we
1047  * expect the rq_reply_off be 0, rq_nob_received is the early reply size.
1048  *
1049  * we allocate separate ptlrpc_request and reply buffer for early reply
1050  * processing, return 0 and \a req_ret is a duplicated ptlrpc_request. caller
1051  * must call sptlrpc_cli_finish_early_reply() on the returned request to
1052  * release it. if anything goes wrong \a req_ret will not be set.
1053  */
1054 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1055                                    struct ptlrpc_request **req_ret)
1056 {
1057         struct ptlrpc_request  *early_req;
1058         char                   *early_buf;
1059         int                     early_bufsz, early_size;
1060         int                     rc;
1061         ENTRY;
1062
1063         OBD_ALLOC_PTR(early_req);
1064         if (early_req == NULL)
1065                 RETURN(-ENOMEM);
1066
1067         early_size = req->rq_nob_received;
1068         early_bufsz = size_roundup_power2(early_size);
1069         OBD_ALLOC(early_buf, early_bufsz);
1070         if (early_buf == NULL)
1071                 GOTO(err_req, rc = -ENOMEM);
1072
1073         /* sanity checkings and copy data out, do it inside spinlock */
1074         cfs_spin_lock(&req->rq_lock);
1075
1076         if (req->rq_replied) {
1077                 cfs_spin_unlock(&req->rq_lock);
1078                 GOTO(err_buf, rc = -EALREADY);
1079         }
1080
1081         LASSERT(req->rq_repbuf);
1082         LASSERT(req->rq_repdata == NULL);
1083         LASSERT(req->rq_repmsg == NULL);
1084
1085         if (req->rq_reply_off != 0) {
1086                 CERROR("early reply with offset %u\n", req->rq_reply_off);
1087                 cfs_spin_unlock(&req->rq_lock);
1088                 GOTO(err_buf, rc = -EPROTO);
1089         }
1090
1091         if (req->rq_nob_received != early_size) {
1092                 /* even another early arrived the size should be the same */
1093                 CERROR("data size has changed from %u to %u\n",
1094                        early_size, req->rq_nob_received);
1095                 cfs_spin_unlock(&req->rq_lock);
1096                 GOTO(err_buf, rc = -EINVAL);
1097         }
1098
1099         if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1100                 CERROR("early reply length %d too small\n",
1101                        req->rq_nob_received);
1102                 cfs_spin_unlock(&req->rq_lock);
1103                 GOTO(err_buf, rc = -EALREADY);
1104         }
1105
1106         memcpy(early_buf, req->rq_repbuf, early_size);
1107         cfs_spin_unlock(&req->rq_lock);
1108
1109         cfs_spin_lock_init(&early_req->rq_lock);
1110         early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1111         early_req->rq_flvr = req->rq_flvr;
1112         early_req->rq_repbuf = early_buf;
1113         early_req->rq_repbuf_len = early_bufsz;
1114         early_req->rq_repdata = (struct lustre_msg *) early_buf;
1115         early_req->rq_repdata_len = early_size;
1116         early_req->rq_early = 1;
1117
1118         rc = do_cli_unwrap_reply(early_req);
1119         if (rc) {
1120                 DEBUG_REQ(D_ADAPTTO, early_req,
1121                           "error %d unwrap early reply", rc);
1122                 GOTO(err_ctx, rc);
1123         }
1124
1125         LASSERT(early_req->rq_repmsg);
1126         *req_ret = early_req;
1127         RETURN(0);
1128
1129 err_ctx:
1130         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1131 err_buf:
1132         OBD_FREE(early_buf, early_bufsz);
1133 err_req:
1134         OBD_FREE_PTR(early_req);
1135         RETURN(rc);
1136 }
1137
1138 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1139 {
1140         LASSERT(early_req->rq_repbuf);
1141         LASSERT(early_req->rq_repdata);
1142         LASSERT(early_req->rq_repmsg);
1143
1144         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1145         OBD_FREE(early_req->rq_repbuf, early_req->rq_repbuf_len);
1146         OBD_FREE_PTR(early_req);
1147 }
1148
1149 /**************************************************
1150  * sec ID                                         *
1151  **************************************************/
1152
1153 /*
1154  * "fixed" sec (e.g. null) use sec_id < 0
1155  */
1156 static cfs_atomic_t sptlrpc_sec_id = CFS_ATOMIC_INIT(1);
1157
1158 int sptlrpc_get_next_secid(void)
1159 {
1160         return cfs_atomic_inc_return(&sptlrpc_sec_id);
1161 }
1162 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1163
1164 /**************************************************
1165  * client side high-level security APIs           *
1166  **************************************************/
1167
1168 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1169                                    int grace, int force)
1170 {
1171         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1172
1173         LASSERT(policy->sp_cops);
1174         LASSERT(policy->sp_cops->flush_ctx_cache);
1175
1176         return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1177 }
1178
1179 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1180 {
1181         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1182
1183         LASSERT(cfs_atomic_read(&sec->ps_refcount) == 0);
1184         LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
1185         LASSERT(policy->sp_cops->destroy_sec);
1186
1187         CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
1188
1189         policy->sp_cops->destroy_sec(sec);
1190         sptlrpc_policy_put(policy);
1191 }
1192
1193 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1194 {
1195         sec_cop_destroy_sec(sec);
1196 }
1197 EXPORT_SYMBOL(sptlrpc_sec_destroy);
1198
1199 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1200 {
1201         LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
1202
1203         if (sec->ps_policy->sp_cops->kill_sec) {
1204                 sec->ps_policy->sp_cops->kill_sec(sec);
1205
1206                 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1207         }
1208 }
1209
1210 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1211 {
1212         if (sec) {
1213                 LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
1214                 cfs_atomic_inc(&sec->ps_refcount);
1215         }
1216
1217         return sec;
1218 }
1219 EXPORT_SYMBOL(sptlrpc_sec_get);
1220
1221 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1222 {
1223         if (sec) {
1224                 LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
1225
1226                 if (cfs_atomic_dec_and_test(&sec->ps_refcount)) {
1227                         LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
1228
1229                         sptlrpc_gc_del_sec(sec);
1230                         sec_cop_destroy_sec(sec);
1231                 }
1232         }
1233 }
1234 EXPORT_SYMBOL(sptlrpc_sec_put);
1235
1236 /*
1237  * policy module is responsible for taking refrence of import
1238  */
1239 static
1240 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1241                                        struct ptlrpc_svc_ctx *svc_ctx,
1242                                        struct sptlrpc_flavor *sf,
1243                                        enum lustre_sec_part sp)
1244 {
1245         struct ptlrpc_sec_policy *policy;
1246         struct ptlrpc_sec        *sec;
1247         char                      str[32];
1248         ENTRY;
1249
1250         if (svc_ctx) {
1251                 LASSERT(imp->imp_dlm_fake == 1);
1252
1253                 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1254                        imp->imp_obd->obd_type->typ_name,
1255                        imp->imp_obd->obd_name,
1256                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1257
1258                 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1259                 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1260         } else {
1261                 LASSERT(imp->imp_dlm_fake == 0);
1262
1263                 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1264                        imp->imp_obd->obd_type->typ_name,
1265                        imp->imp_obd->obd_name,
1266                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1267
1268                 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1269                 if (!policy) {
1270                         CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1271                         RETURN(NULL);
1272                 }
1273         }
1274
1275         sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1276         if (sec) {
1277                 cfs_atomic_inc(&sec->ps_refcount);
1278
1279                 sec->ps_part = sp;
1280
1281                 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1282                         sptlrpc_gc_add_sec(sec);
1283         } else {
1284                 sptlrpc_policy_put(policy);
1285         }
1286
1287         RETURN(sec);
1288 }
1289
1290 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1291 {
1292         struct ptlrpc_sec *sec;
1293
1294         cfs_spin_lock(&imp->imp_lock);
1295         sec = sptlrpc_sec_get(imp->imp_sec);
1296         cfs_spin_unlock(&imp->imp_lock);
1297
1298         return sec;
1299 }
1300 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1301
1302 static void sptlrpc_import_sec_install(struct obd_import *imp,
1303                                        struct ptlrpc_sec *sec)
1304 {
1305         struct ptlrpc_sec *old_sec;
1306
1307         LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
1308
1309         cfs_spin_lock(&imp->imp_lock);
1310         old_sec = imp->imp_sec;
1311         imp->imp_sec = sec;
1312         cfs_spin_unlock(&imp->imp_lock);
1313
1314         if (old_sec) {
1315                 sptlrpc_sec_kill(old_sec);
1316
1317                 /* balance the ref taken by this import */
1318                 sptlrpc_sec_put(old_sec);
1319         }
1320 }
1321
1322 static inline
1323 int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1324 {
1325         return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1326 }
1327
1328 static inline
1329 void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1330 {
1331         *dst = *src;
1332 }
1333
1334 static void sptlrpc_import_sec_adapt_inplace(struct obd_import *imp,
1335                                              struct ptlrpc_sec *sec,
1336                                              struct sptlrpc_flavor *sf)
1337 {
1338         char    str1[32], str2[32];
1339
1340         if (sec->ps_flvr.sf_flags != sf->sf_flags)
1341                 CWARN("changing sec flags: %s -> %s\n",
1342                       sptlrpc_secflags2str(sec->ps_flvr.sf_flags,
1343                                            str1, sizeof(str1)),
1344                       sptlrpc_secflags2str(sf->sf_flags,
1345                                            str2, sizeof(str2)));
1346
1347         cfs_spin_lock(&sec->ps_lock);
1348         flavor_copy(&sec->ps_flvr, sf);
1349         cfs_spin_unlock(&sec->ps_lock);
1350 }
1351
1352 /*
1353  * for normal import, @svc_ctx should be NULL and @flvr is ignored;
1354  * for reverse import, @svc_ctx and @flvr is from incoming request.
1355  */
1356 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1357                              struct ptlrpc_svc_ctx *svc_ctx,
1358                              struct sptlrpc_flavor *flvr)
1359 {
1360         struct ptlrpc_connection   *conn;
1361         struct sptlrpc_flavor       sf;
1362         struct ptlrpc_sec          *sec, *newsec;
1363         enum lustre_sec_part        sp;
1364         char                        str[24];
1365         int                         rc = 0;
1366         ENTRY;
1367
1368         cfs_might_sleep();
1369
1370         if (imp == NULL)
1371                 RETURN(0);
1372
1373         conn = imp->imp_connection;
1374
1375         if (svc_ctx == NULL) {
1376                 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1377                 /*
1378                  * normal import, determine flavor from rule set, except
1379                  * for mgc the flavor is predetermined.
1380                  */
1381                 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1382                         sf = cliobd->cl_flvr_mgc;
1383                 else 
1384                         sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1385                                                    cliobd->cl_sp_to,
1386                                                    &cliobd->cl_target_uuid,
1387                                                    conn->c_self, &sf);
1388
1389                 sp = imp->imp_obd->u.cli.cl_sp_me;
1390         } else {
1391                 /* reverse import, determine flavor from incoming reqeust */
1392                 sf = *flvr;
1393
1394                 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1395                         sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1396                                       PTLRPC_SEC_FL_ROOTONLY;
1397
1398                 sp = sptlrpc_target_sec_part(imp->imp_obd);
1399         }
1400
1401         sec = sptlrpc_import_sec_ref(imp);
1402         if (sec) {
1403                 char    str2[24];
1404
1405                 if (flavor_equal(&sf, &sec->ps_flvr))
1406                         GOTO(out, rc);
1407
1408                 CWARN("import %s->%s: changing flavor %s -> %s\n",
1409                       imp->imp_obd->obd_name,
1410                       obd_uuid2str(&conn->c_remote_uuid),
1411                       sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1412                       sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1413
1414                 if (SPTLRPC_FLVR_POLICY(sf.sf_rpc) ==
1415                     SPTLRPC_FLVR_POLICY(sec->ps_flvr.sf_rpc) &&
1416                     SPTLRPC_FLVR_MECH(sf.sf_rpc) ==
1417                     SPTLRPC_FLVR_MECH(sec->ps_flvr.sf_rpc)) {
1418                         sptlrpc_import_sec_adapt_inplace(imp, sec, &sf);
1419                         GOTO(out, rc);
1420                 }
1421         } else {
1422                 CWARN("import %s->%s netid %x: select flavor %s\n",
1423                       imp->imp_obd->obd_name,
1424                       obd_uuid2str(&conn->c_remote_uuid),
1425                       LNET_NIDNET(conn->c_self),
1426                       sptlrpc_flavor2name(&sf, str, sizeof(str)));
1427         }
1428
1429         cfs_mutex_down(&imp->imp_sec_mutex);
1430
1431         newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1432         if (newsec) {
1433                 sptlrpc_import_sec_install(imp, newsec);
1434         } else {
1435                 CERROR("import %s->%s: failed to create new sec\n",
1436                        imp->imp_obd->obd_name,
1437                        obd_uuid2str(&conn->c_remote_uuid));
1438                 rc = -EPERM;
1439         }
1440
1441         cfs_mutex_up(&imp->imp_sec_mutex);
1442 out:
1443         sptlrpc_sec_put(sec);
1444         RETURN(rc);
1445 }
1446
1447 void sptlrpc_import_sec_put(struct obd_import *imp)
1448 {
1449         if (imp->imp_sec) {
1450                 sptlrpc_sec_kill(imp->imp_sec);
1451
1452                 sptlrpc_sec_put(imp->imp_sec);
1453                 imp->imp_sec = NULL;
1454         }
1455 }
1456
1457 static void import_flush_ctx_common(struct obd_import *imp,
1458                                     uid_t uid, int grace, int force)
1459 {
1460         struct ptlrpc_sec *sec;
1461
1462         if (imp == NULL)
1463                 return;
1464
1465         sec = sptlrpc_import_sec_ref(imp);
1466         if (sec == NULL)
1467                 return;
1468
1469         sec_cop_flush_ctx_cache(sec, uid, grace, force);
1470         sptlrpc_sec_put(sec);
1471 }
1472
1473 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1474 {
1475         /* it's important to use grace mode, see explain in
1476          * sptlrpc_req_refresh_ctx() */
1477         import_flush_ctx_common(imp, 0, 1, 1);
1478 }
1479
1480 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1481 {
1482         import_flush_ctx_common(imp, cfs_curproc_uid(), 1, 1);
1483 }
1484 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1485
1486 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1487 {
1488         import_flush_ctx_common(imp, -1, 1, 1);
1489 }
1490 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1491
1492 /*
1493  * when complete successfully, req->rq_reqmsg should point to the
1494  * right place.
1495  */
1496 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1497 {
1498         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1499         struct ptlrpc_sec_policy *policy;
1500         int rc;
1501
1502         LASSERT(ctx);
1503         LASSERT(cfs_atomic_read(&ctx->cc_refcount));
1504         LASSERT(ctx->cc_sec);
1505         LASSERT(ctx->cc_sec->ps_policy);
1506         LASSERT(req->rq_reqmsg == NULL);
1507
1508         policy = ctx->cc_sec->ps_policy;
1509         rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1510         if (!rc) {
1511                 LASSERT(req->rq_reqmsg);
1512                 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1513
1514                 /* zeroing preallocated buffer */
1515                 if (req->rq_pool)
1516                         memset(req->rq_reqmsg, 0, msgsize);
1517         }
1518
1519         return rc;
1520 }
1521
1522 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1523 {
1524         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1525         struct ptlrpc_sec_policy *policy;
1526
1527         LASSERT(ctx);
1528         LASSERT(cfs_atomic_read(&ctx->cc_refcount));
1529         LASSERT(ctx->cc_sec);
1530         LASSERT(ctx->cc_sec->ps_policy);
1531
1532         if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1533                 return;
1534
1535         policy = ctx->cc_sec->ps_policy;
1536         policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1537 }
1538
1539 /*
1540  * NOTE caller must guarantee the buffer size is enough for the enlargement
1541  */
1542 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1543                                   int segment, int newsize)
1544 {
1545         void   *src, *dst;
1546         int     oldsize, oldmsg_size, movesize;
1547
1548         LASSERT(segment < msg->lm_bufcount);
1549         LASSERT(msg->lm_buflens[segment] <= newsize);
1550
1551         if (msg->lm_buflens[segment] == newsize)
1552                 return;
1553
1554         /* nothing to do if we are enlarging the last segment */
1555         if (segment == msg->lm_bufcount - 1) {
1556                 msg->lm_buflens[segment] = newsize;
1557                 return;
1558         }
1559
1560         oldsize = msg->lm_buflens[segment];
1561
1562         src = lustre_msg_buf(msg, segment + 1, 0);
1563         msg->lm_buflens[segment] = newsize;
1564         dst = lustre_msg_buf(msg, segment + 1, 0);
1565         msg->lm_buflens[segment] = oldsize;
1566
1567         /* move from segment + 1 to end segment */
1568         LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1569         oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1570         movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1571         LASSERT(movesize >= 0);
1572
1573         if (movesize)
1574                 memmove(dst, src, movesize);
1575
1576         /* note we don't clear the ares where old data live, not secret */
1577
1578         /* finally set new segment size */
1579         msg->lm_buflens[segment] = newsize;
1580 }
1581 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1582
1583 /*
1584  * enlarge @segment of upper message req->rq_reqmsg to @newsize, all data
1585  * will be preserved after enlargement. this must be called after rq_reqmsg has
1586  * been intialized at least.
1587  *
1588  * caller's attention: upon return, rq_reqmsg and rq_reqlen might have
1589  * been changed.
1590  */
1591 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1592                                int segment, int newsize)
1593 {
1594         struct ptlrpc_cli_ctx    *ctx = req->rq_cli_ctx;
1595         struct ptlrpc_sec_cops   *cops;
1596         struct lustre_msg        *msg = req->rq_reqmsg;
1597
1598         LASSERT(ctx);
1599         LASSERT(msg);
1600         LASSERT(msg->lm_bufcount > segment);
1601         LASSERT(msg->lm_buflens[segment] <= newsize);
1602
1603         if (msg->lm_buflens[segment] == newsize)
1604                 return 0;
1605
1606         cops = ctx->cc_sec->ps_policy->sp_cops;
1607         LASSERT(cops->enlarge_reqbuf);
1608         return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1609 }
1610 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1611
1612 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1613 {
1614         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1615         struct ptlrpc_sec_policy *policy;
1616         ENTRY;
1617
1618         LASSERT(ctx);
1619         LASSERT(cfs_atomic_read(&ctx->cc_refcount));
1620         LASSERT(ctx->cc_sec);
1621         LASSERT(ctx->cc_sec->ps_policy);
1622
1623         if (req->rq_repbuf)
1624                 RETURN(0);
1625
1626         policy = ctx->cc_sec->ps_policy;
1627         RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1628 }
1629
1630 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1631 {
1632         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1633         struct ptlrpc_sec_policy *policy;
1634         ENTRY;
1635
1636         LASSERT(ctx);
1637         LASSERT(cfs_atomic_read(&ctx->cc_refcount));
1638         LASSERT(ctx->cc_sec);
1639         LASSERT(ctx->cc_sec->ps_policy);
1640
1641         if (req->rq_repbuf == NULL)
1642                 return;
1643         LASSERT(req->rq_repbuf_len);
1644
1645         policy = ctx->cc_sec->ps_policy;
1646         policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1647         EXIT;
1648 }
1649
1650 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1651                                 struct ptlrpc_cli_ctx *ctx)
1652 {
1653         struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1654
1655         if (!policy->sp_cops->install_rctx)
1656                 return 0;
1657         return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1658 }
1659
1660 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1661                                 struct ptlrpc_svc_ctx *ctx)
1662 {
1663         struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1664
1665         if (!policy->sp_sops->install_rctx)
1666                 return 0;
1667         return policy->sp_sops->install_rctx(imp, ctx);
1668 }
1669
1670 /****************************************
1671  * server side security                 *
1672  ****************************************/
1673
1674 static int flavor_allowed(struct sptlrpc_flavor *exp,
1675                           struct ptlrpc_request *req)
1676 {
1677         struct sptlrpc_flavor *flvr = &req->rq_flvr;
1678
1679         if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
1680                 return 1;
1681
1682         if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1683             SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
1684             SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
1685             SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
1686                 return 1;
1687
1688         return 0;
1689 }
1690
1691 #define EXP_FLVR_UPDATE_EXPIRE      (OBD_TIMEOUT_DEFAULT + 10)
1692
1693 int sptlrpc_target_export_check(struct obd_export *exp,
1694                                 struct ptlrpc_request *req)
1695 {
1696         struct sptlrpc_flavor   flavor;
1697
1698         if (exp == NULL)
1699                 return 0;
1700
1701         /* client side export has no imp_reverse, skip
1702          * FIXME maybe we should check flavor this as well??? */
1703         if (exp->exp_imp_reverse == NULL)
1704                 return 0;
1705
1706         /* don't care about ctx fini rpc */
1707         if (req->rq_ctx_fini)
1708                 return 0;
1709
1710         cfs_spin_lock(&exp->exp_lock);
1711
1712         /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1713          * the first req with the new flavor, then treat it as current flavor,
1714          * adapt reverse sec according to it.
1715          * note the first rpc with new flavor might not be with root ctx, in
1716          * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */
1717         if (unlikely(exp->exp_flvr_changed) &&
1718             flavor_allowed(&exp->exp_flvr_old[1], req)) {
1719                 /* make the new flavor as "current", and old ones as
1720                  * about-to-expire */
1721                 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1722                        exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1723                 flavor = exp->exp_flvr_old[1];
1724                 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1725                 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1726                 exp->exp_flvr_old[0] = exp->exp_flvr;
1727                 exp->exp_flvr_expire[0] = cfs_time_current_sec() +
1728                                           EXP_FLVR_UPDATE_EXPIRE;
1729                 exp->exp_flvr = flavor;
1730
1731                 /* flavor change finished */
1732                 exp->exp_flvr_changed = 0;
1733                 LASSERT(exp->exp_flvr_adapt == 1);
1734
1735                 /* if it's gss, we only interested in root ctx init */
1736                 if (req->rq_auth_gss &&
1737                     !(req->rq_ctx_init && (req->rq_auth_usr_root ||
1738                                            req->rq_auth_usr_mdt))) {
1739                         cfs_spin_unlock(&exp->exp_lock);
1740                         CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d)\n",
1741                                req->rq_auth_gss, req->rq_ctx_init,
1742                                req->rq_auth_usr_root, req->rq_auth_usr_mdt);
1743                         return 0;
1744                 }
1745
1746                 exp->exp_flvr_adapt = 0;
1747                 cfs_spin_unlock(&exp->exp_lock);
1748
1749                 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1750                                                 req->rq_svc_ctx, &flavor);
1751         }
1752
1753         /* if it equals to the current flavor, we accept it, but need to
1754          * dealing with reverse sec/ctx */
1755         if (likely(flavor_allowed(&exp->exp_flvr, req))) {
1756                 /* most cases should return here, we only interested in
1757                  * gss root ctx init */
1758                 if (!req->rq_auth_gss || !req->rq_ctx_init ||
1759                     (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt)) {
1760                         cfs_spin_unlock(&exp->exp_lock);
1761                         return 0;
1762                 }
1763
1764                 /* if flavor just changed, we should not proceed, just leave
1765                  * it and current flavor will be discovered and replaced
1766                  * shortly, and let _this_ rpc pass through */
1767                 if (exp->exp_flvr_changed) {
1768                         LASSERT(exp->exp_flvr_adapt);
1769                         cfs_spin_unlock(&exp->exp_lock);
1770                         return 0;
1771                 }
1772
1773                 if (exp->exp_flvr_adapt) {
1774                         exp->exp_flvr_adapt = 0;
1775                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
1776                                exp, exp->exp_flvr.sf_rpc,
1777                                exp->exp_flvr_old[0].sf_rpc,
1778                                exp->exp_flvr_old[1].sf_rpc);
1779                         flavor = exp->exp_flvr;
1780                         cfs_spin_unlock(&exp->exp_lock);
1781
1782                         return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1783                                                         req->rq_svc_ctx,
1784                                                         &flavor);
1785                 } else {
1786                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
1787                                "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
1788                                exp->exp_flvr_old[0].sf_rpc,
1789                                exp->exp_flvr_old[1].sf_rpc);
1790                         cfs_spin_unlock(&exp->exp_lock);
1791
1792                         return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
1793                                                            req->rq_svc_ctx);
1794                 }
1795         }
1796
1797         if (exp->exp_flvr_expire[0]) {
1798                 if (exp->exp_flvr_expire[0] >= cfs_time_current_sec()) {
1799                         if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
1800                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1801                                        "middle one ("CFS_DURATION_T")\n", exp,
1802                                        exp->exp_flvr.sf_rpc,
1803                                        exp->exp_flvr_old[0].sf_rpc,
1804                                        exp->exp_flvr_old[1].sf_rpc,
1805                                        exp->exp_flvr_expire[0] -
1806                                                 cfs_time_current_sec());
1807                                 cfs_spin_unlock(&exp->exp_lock);
1808                                 return 0;
1809                         }
1810                 } else {
1811                         CDEBUG(D_SEC, "mark middle expired\n");
1812                         exp->exp_flvr_expire[0] = 0;
1813                 }
1814                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
1815                        exp->exp_flvr.sf_rpc,
1816                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1817                        req->rq_flvr.sf_rpc);
1818         }
1819
1820         /* now it doesn't match the current flavor, the only chance we can
1821          * accept it is match the old flavors which is not expired. */
1822         if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
1823                 if (exp->exp_flvr_expire[1] >= cfs_time_current_sec()) {
1824                         if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
1825                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1826                                        "oldest one ("CFS_DURATION_T")\n", exp,
1827                                        exp->exp_flvr.sf_rpc,
1828                                        exp->exp_flvr_old[0].sf_rpc,
1829                                        exp->exp_flvr_old[1].sf_rpc,
1830                                        exp->exp_flvr_expire[1] -
1831                                                 cfs_time_current_sec());
1832                                 cfs_spin_unlock(&exp->exp_lock);
1833                                 return 0;
1834                         }
1835                 } else {
1836                         CDEBUG(D_SEC, "mark oldest expired\n");
1837                         exp->exp_flvr_expire[1] = 0;
1838                 }
1839                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
1840                        exp, exp->exp_flvr.sf_rpc,
1841                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1842                        req->rq_flvr.sf_rpc);
1843         } else {
1844                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
1845                        exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
1846                        exp->exp_flvr_old[1].sf_rpc);
1847         }
1848
1849         cfs_spin_unlock(&exp->exp_lock);
1850
1851         CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u) with "
1852               "unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
1853               exp, exp->exp_obd->obd_name,
1854               req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
1855               req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_flvr.sf_rpc,
1856               exp->exp_flvr.sf_rpc,
1857               exp->exp_flvr_old[0].sf_rpc,
1858               exp->exp_flvr_expire[0] ?
1859               (unsigned long) (exp->exp_flvr_expire[0] -
1860                                cfs_time_current_sec()) : 0,
1861               exp->exp_flvr_old[1].sf_rpc,
1862               exp->exp_flvr_expire[1] ?
1863               (unsigned long) (exp->exp_flvr_expire[1] -
1864                                cfs_time_current_sec()) : 0);
1865         return -EACCES;
1866 }
1867 EXPORT_SYMBOL(sptlrpc_target_export_check);
1868
1869 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
1870                                       struct sptlrpc_rule_set *rset)
1871 {
1872         struct obd_export       *exp;
1873         struct sptlrpc_flavor    new_flvr;
1874
1875         LASSERT(obd);
1876
1877         cfs_spin_lock(&obd->obd_dev_lock);
1878
1879         cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
1880                 if (exp->exp_connection == NULL)
1881                         continue;
1882
1883                 /* note if this export had just been updated flavor
1884                  * (exp_flvr_changed == 1), this will override the
1885                  * previous one. */
1886                 cfs_spin_lock(&exp->exp_lock);
1887                 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
1888                                              exp->exp_connection->c_peer.nid,
1889                                              &new_flvr);
1890                 if (exp->exp_flvr_changed ||
1891                     !flavor_equal(&new_flvr, &exp->exp_flvr)) {
1892                         exp->exp_flvr_old[1] = new_flvr;
1893                         exp->exp_flvr_expire[1] = 0;
1894                         exp->exp_flvr_changed = 1;
1895                         exp->exp_flvr_adapt = 1;
1896
1897                         CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
1898                                exp, sptlrpc_part2name(exp->exp_sp_peer),
1899                                exp->exp_flvr.sf_rpc,
1900                                exp->exp_flvr_old[1].sf_rpc);
1901                 }
1902                 cfs_spin_unlock(&exp->exp_lock);
1903         }
1904
1905         cfs_spin_unlock(&obd->obd_dev_lock);
1906 }
1907 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
1908
1909 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
1910 {
1911         if (svc_rc == SECSVC_DROP)
1912                 return SECSVC_DROP;
1913
1914         switch (req->rq_sp_from) {
1915         case LUSTRE_SP_CLI:
1916         case LUSTRE_SP_MDT:
1917         case LUSTRE_SP_OST:
1918         case LUSTRE_SP_MGC:
1919         case LUSTRE_SP_MGS:
1920         case LUSTRE_SP_ANY:
1921                 break;
1922         default:
1923                 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
1924                 return SECSVC_DROP;
1925         }
1926
1927         if (!req->rq_auth_gss)
1928                 return svc_rc;
1929
1930         if (unlikely(req->rq_sp_from == LUSTRE_SP_ANY)) {
1931                 CERROR("not specific part\n");
1932                 return SECSVC_DROP;
1933         }
1934
1935         /* from MDT, must be authenticated as MDT */
1936         if (unlikely(req->rq_sp_from == LUSTRE_SP_MDT &&
1937                      !req->rq_auth_usr_mdt)) {
1938                 DEBUG_REQ(D_ERROR, req, "fake source MDT");
1939                 return SECSVC_DROP;
1940         }
1941
1942         /* from OST, must be callback to MDT and CLI, the reverse sec
1943          * was from mdt/root keytab, so it should be MDT or root FIXME */
1944         if (unlikely(req->rq_sp_from == LUSTRE_SP_OST &&
1945                      !req->rq_auth_usr_mdt && !req->rq_auth_usr_root)) {
1946                 DEBUG_REQ(D_ERROR, req, "fake source OST");
1947                 return SECSVC_DROP;
1948         }
1949
1950         return svc_rc;
1951 }
1952
1953 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
1954 {
1955         struct ptlrpc_sec_policy *policy;
1956         struct lustre_msg        *msg = req->rq_reqbuf;
1957         int                       rc;
1958         ENTRY;
1959
1960         LASSERT(msg);
1961         LASSERT(req->rq_reqmsg == NULL);
1962         LASSERT(req->rq_repmsg == NULL);
1963         LASSERT(req->rq_svc_ctx == NULL);
1964
1965         req->rq_req_swab_mask = 0;
1966
1967         rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
1968         switch (rc) {
1969         case 1:
1970                 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1971         case 0:
1972                 break;
1973         default:
1974                 CERROR("error unpacking request from %s x"LPU64"\n",
1975                        libcfs_id2str(req->rq_peer), req->rq_xid);
1976                 RETURN(SECSVC_DROP);
1977         }
1978
1979         req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
1980         req->rq_sp_from = LUSTRE_SP_ANY;
1981         req->rq_auth_uid = INVALID_UID;
1982         req->rq_auth_mapped_uid = INVALID_UID;
1983
1984         policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
1985         if (!policy) {
1986                 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
1987                 RETURN(SECSVC_DROP);
1988         }
1989
1990         LASSERT(policy->sp_sops->accept);
1991         rc = policy->sp_sops->accept(req);
1992         sptlrpc_policy_put(policy);
1993         LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
1994         LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
1995
1996         /*
1997          * if it's not null flavor (which means embedded packing msg),
1998          * reset the swab mask for the comming inner msg unpacking.
1999          */
2000         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
2001                 req->rq_req_swab_mask = 0;
2002
2003         /* sanity check for the request source */
2004         rc = sptlrpc_svc_check_from(req, rc);
2005         RETURN(rc);
2006 }
2007
2008 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req,
2009                          int msglen)
2010 {
2011         struct ptlrpc_sec_policy *policy;
2012         struct ptlrpc_reply_state *rs;
2013         int rc;
2014         ENTRY;
2015
2016         LASSERT(req->rq_svc_ctx);
2017         LASSERT(req->rq_svc_ctx->sc_policy);
2018
2019         policy = req->rq_svc_ctx->sc_policy;
2020         LASSERT(policy->sp_sops->alloc_rs);
2021
2022         rc = policy->sp_sops->alloc_rs(req, msglen);
2023         if (unlikely(rc == -ENOMEM)) {
2024                 /* failed alloc, try emergency pool */
2025                 rs = lustre_get_emerg_rs(req->rq_rqbd->rqbd_service);
2026                 if (rs == NULL)
2027                         RETURN(-ENOMEM);
2028
2029                 req->rq_reply_state = rs;
2030                 rc = policy->sp_sops->alloc_rs(req, msglen);
2031                 if (rc) {
2032                         lustre_put_emerg_rs(rs);
2033                         req->rq_reply_state = NULL;
2034                 }
2035         }
2036
2037         LASSERT(rc != 0 ||
2038                 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2039
2040         RETURN(rc);
2041 }
2042
2043 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2044 {
2045         struct ptlrpc_sec_policy *policy;
2046         int rc;
2047         ENTRY;
2048
2049         LASSERT(req->rq_svc_ctx);
2050         LASSERT(req->rq_svc_ctx->sc_policy);
2051
2052         policy = req->rq_svc_ctx->sc_policy;
2053         LASSERT(policy->sp_sops->authorize);
2054
2055         rc = policy->sp_sops->authorize(req);
2056         LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2057
2058         RETURN(rc);
2059 }
2060
2061 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2062 {
2063         struct ptlrpc_sec_policy *policy;
2064         unsigned int prealloc;
2065         ENTRY;
2066
2067         LASSERT(rs->rs_svc_ctx);
2068         LASSERT(rs->rs_svc_ctx->sc_policy);
2069
2070         policy = rs->rs_svc_ctx->sc_policy;
2071         LASSERT(policy->sp_sops->free_rs);
2072
2073         prealloc = rs->rs_prealloc;
2074         policy->sp_sops->free_rs(rs);
2075
2076         if (prealloc)
2077                 lustre_put_emerg_rs(rs);
2078         EXIT;
2079 }
2080
2081 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2082 {
2083         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2084
2085         if (ctx == NULL)
2086                 return;
2087
2088         LASSERT(cfs_atomic_read(&ctx->sc_refcount) > 0);
2089         cfs_atomic_inc(&ctx->sc_refcount);
2090 }
2091
2092 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2093 {
2094         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2095
2096         if (ctx == NULL)
2097                 return;
2098
2099         LASSERT(cfs_atomic_read(&ctx->sc_refcount) > 0);
2100         if (cfs_atomic_dec_and_test(&ctx->sc_refcount)) {
2101                 if (ctx->sc_policy->sp_sops->free_ctx)
2102                         ctx->sc_policy->sp_sops->free_ctx(ctx);
2103         }
2104         req->rq_svc_ctx = NULL;
2105 }
2106
2107 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
2108 {
2109         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2110
2111         if (ctx == NULL)
2112                 return;
2113
2114         LASSERT(cfs_atomic_read(&ctx->sc_refcount) > 0);
2115         if (ctx->sc_policy->sp_sops->invalidate_ctx)
2116                 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
2117 }
2118 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
2119
2120 /****************************************
2121  * bulk security                        *
2122  ****************************************/
2123
2124 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2125                           struct ptlrpc_bulk_desc *desc)
2126 {
2127         struct ptlrpc_cli_ctx *ctx;
2128
2129         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2130
2131         if (!req->rq_pack_bulk)
2132                 return 0;
2133
2134         ctx = req->rq_cli_ctx;
2135         if (ctx->cc_ops->wrap_bulk)
2136                 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2137         return 0;
2138 }
2139 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2140
2141 /*
2142  * return nob of actual plain text size received, or error code.
2143  */
2144 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2145                                  struct ptlrpc_bulk_desc *desc,
2146                                  int nob)
2147 {
2148         struct ptlrpc_cli_ctx  *ctx;
2149         int                     rc;
2150
2151         LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2152
2153         if (!req->rq_pack_bulk)
2154                 return desc->bd_nob_transferred;
2155
2156         ctx = req->rq_cli_ctx;
2157         if (ctx->cc_ops->unwrap_bulk) {
2158                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2159                 if (rc < 0)
2160                         return rc;
2161         }
2162         return desc->bd_nob_transferred;
2163 }
2164 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2165
2166 /*
2167  * return 0 for success or error code.
2168  */
2169 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2170                                   struct ptlrpc_bulk_desc *desc)
2171 {
2172         struct ptlrpc_cli_ctx  *ctx;
2173         int                     rc;
2174
2175         LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2176
2177         if (!req->rq_pack_bulk)
2178                 return 0;
2179
2180         ctx = req->rq_cli_ctx;
2181         if (ctx->cc_ops->unwrap_bulk) {
2182                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2183                 if (rc < 0)
2184                         return rc;
2185         }
2186
2187         /*
2188          * if everything is going right, nob should equals to nob_transferred.
2189          * in case of privacy mode, nob_transferred needs to be adjusted.
2190          */
2191         if (desc->bd_nob != desc->bd_nob_transferred) {
2192                 CERROR("nob %d doesn't match transferred nob %d",
2193                        desc->bd_nob, desc->bd_nob_transferred);
2194                 return -EPROTO;
2195         }
2196
2197         return 0;
2198 }
2199 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2200
2201 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
2202                           struct ptlrpc_bulk_desc *desc)
2203 {
2204         struct ptlrpc_svc_ctx *ctx;
2205
2206         LASSERT(req->rq_bulk_read);
2207
2208         if (!req->rq_pack_bulk)
2209                 return 0;
2210
2211         ctx = req->rq_svc_ctx;
2212         if (ctx->sc_policy->sp_sops->wrap_bulk)
2213                 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2214
2215         return 0;
2216 }
2217 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2218
2219 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2220                             struct ptlrpc_bulk_desc *desc)
2221 {
2222         struct ptlrpc_svc_ctx *ctx;
2223         int                    rc;
2224
2225         LASSERT(req->rq_bulk_write);
2226
2227         /*
2228          * if it's in privacy mode, transferred should >= expected; otherwise
2229          * transferred should == expected.
2230          */
2231         if (desc->bd_nob_transferred < desc->bd_nob ||
2232             (desc->bd_nob_transferred > desc->bd_nob &&
2233              SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
2234              SPTLRPC_BULK_SVC_PRIV)) {
2235                 DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
2236                           desc->bd_nob_transferred, desc->bd_nob);
2237                 return -ETIMEDOUT;
2238         }
2239
2240         if (!req->rq_pack_bulk)
2241                 return 0;
2242
2243         ctx = req->rq_svc_ctx;
2244         if (ctx->sc_policy->sp_sops->unwrap_bulk) {
2245                 rc = ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2246                 if (rc)
2247                         CERROR("error unwrap bulk: %d\n", rc);
2248         }
2249
2250         /* return 0 to allow reply be sent */
2251         return 0;
2252 }
2253 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2254
2255 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
2256                           struct ptlrpc_bulk_desc *desc)
2257 {
2258         struct ptlrpc_svc_ctx *ctx;
2259
2260         LASSERT(req->rq_bulk_write);
2261
2262         if (!req->rq_pack_bulk)
2263                 return 0;
2264
2265         ctx = req->rq_svc_ctx;
2266         if (ctx->sc_policy->sp_sops->prep_bulk)
2267                 return ctx->sc_policy->sp_sops->prep_bulk(req, desc);
2268
2269         return 0;
2270 }
2271 EXPORT_SYMBOL(sptlrpc_svc_prep_bulk);
2272
2273 /****************************************
2274  * user descriptor helpers              *
2275  ****************************************/
2276
2277 int sptlrpc_current_user_desc_size(void)
2278 {
2279         int ngroups;
2280
2281 #ifdef __KERNEL__
2282         ngroups = current_ngroups;
2283
2284         if (ngroups > LUSTRE_MAX_GROUPS)
2285                 ngroups = LUSTRE_MAX_GROUPS;
2286 #else
2287         ngroups = 0;
2288 #endif
2289         return sptlrpc_user_desc_size(ngroups);
2290 }
2291 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2292
2293 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2294 {
2295         struct ptlrpc_user_desc *pud;
2296
2297         pud = lustre_msg_buf(msg, offset, 0);
2298
2299         pud->pud_uid = cfs_curproc_uid();
2300         pud->pud_gid = cfs_curproc_gid();
2301         pud->pud_fsuid = cfs_curproc_fsuid();
2302         pud->pud_fsgid = cfs_curproc_fsgid();
2303         pud->pud_cap = cfs_curproc_cap_pack();
2304         pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2305
2306 #ifdef __KERNEL__
2307         task_lock(current);
2308         if (pud->pud_ngroups > current_ngroups)
2309                 pud->pud_ngroups = current_ngroups;
2310         memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
2311                pud->pud_ngroups * sizeof(__u32));
2312         task_unlock(current);
2313 #endif
2314
2315         return 0;
2316 }
2317 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2318
2319 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2320 {
2321         struct ptlrpc_user_desc *pud;
2322         int                      i;
2323
2324         pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2325         if (!pud)
2326                 return -EINVAL;
2327
2328         if (swabbed) {
2329                 __swab32s(&pud->pud_uid);
2330                 __swab32s(&pud->pud_gid);
2331                 __swab32s(&pud->pud_fsuid);
2332                 __swab32s(&pud->pud_fsgid);
2333                 __swab32s(&pud->pud_cap);
2334                 __swab32s(&pud->pud_ngroups);
2335         }
2336
2337         if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2338                 CERROR("%u groups is too large\n", pud->pud_ngroups);
2339                 return -EINVAL;
2340         }
2341
2342         if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2343             msg->lm_buflens[offset]) {
2344                 CERROR("%u groups are claimed but bufsize only %u\n",
2345                        pud->pud_ngroups, msg->lm_buflens[offset]);
2346                 return -EINVAL;
2347         }
2348
2349         if (swabbed) {
2350                 for (i = 0; i < pud->pud_ngroups; i++)
2351                         __swab32s(&pud->pud_groups[i]);
2352         }
2353
2354         return 0;
2355 }
2356 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2357
2358 /****************************************
2359  * misc helpers                         *
2360  ****************************************/
2361
2362 const char * sec2target_str(struct ptlrpc_sec *sec)
2363 {
2364         if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2365                 return "*";
2366         if (sec_is_reverse(sec))
2367                 return "c";
2368         return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2369 }
2370 EXPORT_SYMBOL(sec2target_str);
2371
2372 /*
2373  * return true if the bulk data is protected
2374  */
2375 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
2376 {
2377         switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2378         case SPTLRPC_BULK_SVC_INTG:
2379         case SPTLRPC_BULK_SVC_PRIV:
2380                 return 1;
2381         default:
2382                 return 0;
2383         }
2384 }
2385 EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2386
2387 /****************************************
2388  * crypto API helper/alloc blkciper     *
2389  ****************************************/
2390
2391 /****************************************
2392  * initialize/finalize                  *
2393  ****************************************/
2394
2395 int __init sptlrpc_init(void)
2396 {
2397         int rc;
2398
2399         cfs_rwlock_init(&policy_lock);
2400
2401         rc = sptlrpc_gc_init();
2402         if (rc)
2403                 goto out;
2404
2405         rc = sptlrpc_conf_init();
2406         if (rc)
2407                 goto out_gc;
2408
2409         rc = sptlrpc_enc_pool_init();
2410         if (rc)
2411                 goto out_conf;
2412
2413         rc = sptlrpc_null_init();
2414         if (rc)
2415                 goto out_pool;
2416
2417         rc = sptlrpc_plain_init();
2418         if (rc)
2419                 goto out_null;
2420
2421         rc = sptlrpc_lproc_init();
2422         if (rc)
2423                 goto out_plain;
2424
2425         return 0;
2426
2427 out_plain:
2428         sptlrpc_plain_fini();
2429 out_null:
2430         sptlrpc_null_fini();
2431 out_pool:
2432         sptlrpc_enc_pool_fini();
2433 out_conf:
2434         sptlrpc_conf_fini();
2435 out_gc:
2436         sptlrpc_gc_fini();
2437 out:
2438         return rc;
2439 }
2440
2441 void __exit sptlrpc_fini(void)
2442 {
2443         sptlrpc_lproc_fini();
2444         sptlrpc_plain_fini();
2445         sptlrpc_null_fini();
2446         sptlrpc_enc_pool_fini();
2447         sptlrpc_conf_fini();
2448         sptlrpc_gc_fini();
2449 }