Whamcloud - gitweb
b=23728 gss: allow oss authenticate with mgs.
[fs/lustre-release.git] / lustre / ptlrpc / sec.c
1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2  * vim:expandtab:shiftwidth=8:tabstop=8:
3  *
4  * GPL HEADER START
5  *
6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 only,
10  * as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License version 2 for more details (a copy is included
16  * in the LICENSE file that accompanied this code).
17  *
18  * You should have received a copy of the GNU General Public License
19  * version 2 along with this program; If not, see
20  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
21  *
22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23  * CA 95054 USA or visit www.sun.com if you need additional information or
24  * have any questions.
25  *
26  * GPL HEADER END
27  */
28 /*
29  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
30  * Use is subject to license terms.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/ptlrpc/sec.c
37  *
38  * Author: Eric Mei <ericm@clusterfs.com>
39  */
40
41 #ifndef EXPORT_SYMTAB
42 #define EXPORT_SYMTAB
43 #endif
44 #define DEBUG_SUBSYSTEM S_SEC
45
46 #include <libcfs/libcfs.h>
47 #ifndef __KERNEL__
48 #include <liblustre.h>
49 #include <libcfs/list.h>
50 #else
51 #include <linux/crypto.h>
52 #include <linux/key.h>
53 #endif
54
55 #include <obd.h>
56 #include <obd_class.h>
57 #include <obd_support.h>
58 #include <lustre_net.h>
59 #include <lustre_import.h>
60 #include <lustre_dlm.h>
61 #include <lustre_sec.h>
62
63 #include "ptlrpc_internal.h"
64
65 /***********************************************
66  * policy registers                            *
67  ***********************************************/
68
69 static cfs_rwlock_t policy_lock;
70 static struct ptlrpc_sec_policy *policies[SPTLRPC_POLICY_MAX] = {
71         NULL,
72 };
73
74 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy)
75 {
76         __u16 number = policy->sp_policy;
77
78         LASSERT(policy->sp_name);
79         LASSERT(policy->sp_cops);
80         LASSERT(policy->sp_sops);
81
82         if (number >= SPTLRPC_POLICY_MAX)
83                 return -EINVAL;
84
85         cfs_write_lock(&policy_lock);
86         if (unlikely(policies[number])) {
87                 cfs_write_unlock(&policy_lock);
88                 return -EALREADY;
89         }
90         policies[number] = policy;
91         cfs_write_unlock(&policy_lock);
92
93         CDEBUG(D_SEC, "%s: registered\n", policy->sp_name);
94         return 0;
95 }
96 EXPORT_SYMBOL(sptlrpc_register_policy);
97
98 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy)
99 {
100         __u16 number = policy->sp_policy;
101
102         LASSERT(number < SPTLRPC_POLICY_MAX);
103
104         cfs_write_lock(&policy_lock);
105         if (unlikely(policies[number] == NULL)) {
106                 cfs_write_unlock(&policy_lock);
107                 CERROR("%s: already unregistered\n", policy->sp_name);
108                 return -EINVAL;
109         }
110
111         LASSERT(policies[number] == policy);
112         policies[number] = NULL;
113         cfs_write_unlock(&policy_lock);
114
115         CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name);
116         return 0;
117 }
118 EXPORT_SYMBOL(sptlrpc_unregister_policy);
119
120 static
121 struct ptlrpc_sec_policy * sptlrpc_wireflavor2policy(__u32 flavor)
122 {
123         static CFS_DECLARE_MUTEX(load_mutex);
124         static cfs_atomic_t       loaded = CFS_ATOMIC_INIT(0);
125         struct ptlrpc_sec_policy *policy;
126         __u16                     number = SPTLRPC_FLVR_POLICY(flavor);
127         __u16                     flag = 0;
128
129         if (number >= SPTLRPC_POLICY_MAX)
130                 return NULL;
131
132         while (1) {
133                 cfs_read_lock(&policy_lock);
134                 policy = policies[number];
135                 if (policy && !cfs_try_module_get(policy->sp_owner))
136                         policy = NULL;
137                 if (policy == NULL)
138                         flag = cfs_atomic_read(&loaded);
139                 cfs_read_unlock(&policy_lock);
140
141                 if (policy != NULL || flag != 0 ||
142                     number != SPTLRPC_POLICY_GSS)
143                         break;
144
145                 /* try to load gss module, once */
146                 cfs_mutex_down(&load_mutex);
147                 if (cfs_atomic_read(&loaded) == 0) {
148                         if (cfs_request_module("ptlrpc_gss") == 0)
149                                 CWARN("module ptlrpc_gss loaded on demand\n");
150                         else
151                                 CERROR("Unable to load module ptlrpc_gss\n");
152
153                         cfs_atomic_set(&loaded, 1);
154                 }
155                 cfs_mutex_up(&load_mutex);
156         }
157
158         return policy;
159 }
160
161 __u32 sptlrpc_name2flavor_base(const char *name)
162 {
163         if (!strcmp(name, "null"))
164                 return SPTLRPC_FLVR_NULL;
165         if (!strcmp(name, "plain"))
166                 return SPTLRPC_FLVR_PLAIN;
167         if (!strcmp(name, "krb5n"))
168                 return SPTLRPC_FLVR_KRB5N;
169         if (!strcmp(name, "krb5a"))
170                 return SPTLRPC_FLVR_KRB5A;
171         if (!strcmp(name, "krb5i"))
172                 return SPTLRPC_FLVR_KRB5I;
173         if (!strcmp(name, "krb5p"))
174                 return SPTLRPC_FLVR_KRB5P;
175
176         return SPTLRPC_FLVR_INVALID;
177 }
178 EXPORT_SYMBOL(sptlrpc_name2flavor_base);
179
180 const char *sptlrpc_flavor2name_base(__u32 flvr)
181 {
182         __u32   base = SPTLRPC_FLVR_BASE(flvr);
183
184         if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_NULL))
185                 return "null";
186         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_PLAIN))
187                 return "plain";
188         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5N))
189                 return "krb5n";
190         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5A))
191                 return "krb5a";
192         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5I))
193                 return "krb5i";
194         else if (base == SPTLRPC_FLVR_BASE(SPTLRPC_FLVR_KRB5P))
195                 return "krb5p";
196
197         CERROR("invalid wire flavor 0x%x\n", flvr);
198         return "invalid";
199 }
200 EXPORT_SYMBOL(sptlrpc_flavor2name_base);
201
202 char *sptlrpc_flavor2name_bulk(struct sptlrpc_flavor *sf,
203                                char *buf, int bufsize)
204 {
205         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN)
206                 snprintf(buf, bufsize, "hash:%s",
207                          sptlrpc_get_hash_name(sf->u_bulk.hash.hash_alg));
208         else
209                 snprintf(buf, bufsize, "%s",
210                          sptlrpc_flavor2name_base(sf->sf_rpc));
211
212         buf[bufsize - 1] = '\0';
213         return buf;
214 }
215 EXPORT_SYMBOL(sptlrpc_flavor2name_bulk);
216
217 char *sptlrpc_flavor2name(struct sptlrpc_flavor *sf, char *buf, int bufsize)
218 {
219         snprintf(buf, bufsize, "%s", sptlrpc_flavor2name_base(sf->sf_rpc));
220
221         /*
222          * currently we don't support customized bulk specification for
223          * flavors other than plain
224          */
225         if (SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN) {
226                 char bspec[16];
227
228                 bspec[0] = '-';
229                 sptlrpc_flavor2name_bulk(sf, &bspec[1], sizeof(bspec) - 1);
230                 strncat(buf, bspec, bufsize);
231         }
232
233         buf[bufsize - 1] = '\0';
234         return buf;
235 }
236 EXPORT_SYMBOL(sptlrpc_flavor2name);
237
238 char *sptlrpc_secflags2str(__u32 flags, char *buf, int bufsize)
239 {
240         buf[0] = '\0';
241
242         if (flags & PTLRPC_SEC_FL_REVERSE)
243                 strncat(buf, "reverse,", bufsize);
244         if (flags & PTLRPC_SEC_FL_ROOTONLY)
245                 strncat(buf, "rootonly,", bufsize);
246         if (flags & PTLRPC_SEC_FL_UDESC)
247                 strncat(buf, "udesc,", bufsize);
248         if (flags & PTLRPC_SEC_FL_BULK)
249                 strncat(buf, "bulk,", bufsize);
250         if (buf[0] == '\0')
251                 strncat(buf, "-,", bufsize);
252
253         buf[bufsize - 1] = '\0';
254         return buf;
255 }
256 EXPORT_SYMBOL(sptlrpc_secflags2str);
257
258 /**************************************************
259  * client context APIs                            *
260  **************************************************/
261
262 static
263 struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
264 {
265         struct vfs_cred vcred;
266         int create = 1, remove_dead = 1;
267
268         LASSERT(sec);
269         LASSERT(sec->ps_policy->sp_cops->lookup_ctx);
270
271         if (sec->ps_flvr.sf_flags & (PTLRPC_SEC_FL_REVERSE |
272                                      PTLRPC_SEC_FL_ROOTONLY)) {
273                 vcred.vc_uid = 0;
274                 vcred.vc_gid = 0;
275                 if (sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_REVERSE) {
276                         create = 0;
277                         remove_dead = 0;
278                 }
279         } else {
280                 vcred.vc_uid = cfs_curproc_uid();
281                 vcred.vc_gid = cfs_curproc_gid();
282         }
283
284         return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred,
285                                                    create, remove_dead);
286 }
287
288 struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
289 {
290         LASSERT(cfs_atomic_read(&ctx->cc_refcount) > 0);
291         cfs_atomic_inc(&ctx->cc_refcount);
292         return ctx;
293 }
294 EXPORT_SYMBOL(sptlrpc_cli_ctx_get);
295
296 void sptlrpc_cli_ctx_put(struct ptlrpc_cli_ctx *ctx, int sync)
297 {
298         struct ptlrpc_sec *sec = ctx->cc_sec;
299
300         LASSERT(sec);
301         LASSERT(cfs_atomic_read(&ctx->cc_refcount));
302
303         if (!cfs_atomic_dec_and_test(&ctx->cc_refcount))
304                 return;
305
306         sec->ps_policy->sp_cops->release_ctx(sec, ctx, sync);
307 }
308 EXPORT_SYMBOL(sptlrpc_cli_ctx_put);
309
310 /**
311  * Expire the client context immediately.
312  *
313  * \pre Caller must hold at least 1 reference on the \a ctx.
314  */
315 void sptlrpc_cli_ctx_expire(struct ptlrpc_cli_ctx *ctx)
316 {
317         LASSERT(ctx->cc_ops->die);
318         ctx->cc_ops->die(ctx, 0);
319 }
320 EXPORT_SYMBOL(sptlrpc_cli_ctx_expire);
321
322 /**
323  * To wake up the threads who are waiting for this client context. Called
324  * after some status change happened on \a ctx.
325  */
326 void sptlrpc_cli_ctx_wakeup(struct ptlrpc_cli_ctx *ctx)
327 {
328         struct ptlrpc_request *req, *next;
329
330         cfs_spin_lock(&ctx->cc_lock);
331         cfs_list_for_each_entry_safe(req, next, &ctx->cc_req_list,
332                                      rq_ctx_chain) {
333                 cfs_list_del_init(&req->rq_ctx_chain);
334                 ptlrpc_client_wake_req(req);
335         }
336         cfs_spin_unlock(&ctx->cc_lock);
337 }
338 EXPORT_SYMBOL(sptlrpc_cli_ctx_wakeup);
339
340 int sptlrpc_cli_ctx_display(struct ptlrpc_cli_ctx *ctx, char *buf, int bufsize)
341 {
342         LASSERT(ctx->cc_ops);
343
344         if (ctx->cc_ops->display == NULL)
345                 return 0;
346
347         return ctx->cc_ops->display(ctx, buf, bufsize);
348 }
349
350 static int import_sec_check_expire(struct obd_import *imp)
351 {
352         int     adapt = 0;
353
354         cfs_spin_lock(&imp->imp_lock);
355         if (imp->imp_sec_expire &&
356             imp->imp_sec_expire < cfs_time_current_sec()) {
357                 adapt = 1;
358                 imp->imp_sec_expire = 0;
359         }
360         cfs_spin_unlock(&imp->imp_lock);
361
362         if (!adapt)
363                 return 0;
364
365         CDEBUG(D_SEC, "found delayed sec adapt expired, do it now\n");
366         return sptlrpc_import_sec_adapt(imp, NULL, 0);
367 }
368
369 static int import_sec_validate_get(struct obd_import *imp,
370                                    struct ptlrpc_sec **sec)
371 {
372         int     rc;
373
374         if (unlikely(imp->imp_sec_expire)) {
375                 rc = import_sec_check_expire(imp);
376                 if (rc)
377                         return rc;
378         }
379
380         *sec = sptlrpc_import_sec_ref(imp);
381         if (*sec == NULL) {
382                 CERROR("import %p (%s) with no sec\n",
383                        imp, ptlrpc_import_state_name(imp->imp_state));
384                 return -EACCES;
385         }
386
387         if (unlikely((*sec)->ps_dying)) {
388                 CERROR("attempt to use dying sec %p\n", sec);
389                 sptlrpc_sec_put(*sec);
390                 return -EACCES;
391         }
392
393         return 0;
394 }
395
396 /**
397  * Given a \a req, find or allocate a appropriate context for it.
398  * \pre req->rq_cli_ctx == NULL.
399  *
400  * \retval 0 succeed, and req->rq_cli_ctx is set.
401  * \retval -ev error number, and req->rq_cli_ctx == NULL.
402  */
403 int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
404 {
405         struct obd_import *imp = req->rq_import;
406         struct ptlrpc_sec *sec;
407         int                rc;
408         ENTRY;
409
410         LASSERT(!req->rq_cli_ctx);
411         LASSERT(imp);
412
413         rc = import_sec_validate_get(imp, &sec);
414         if (rc)
415                 RETURN(rc);
416
417         req->rq_cli_ctx = get_my_ctx(sec);
418
419         sptlrpc_sec_put(sec);
420
421         if (!req->rq_cli_ctx) {
422                 CERROR("req %p: fail to get context\n", req);
423                 RETURN(-ENOMEM);
424         }
425
426         RETURN(0);
427 }
428
429 /**
430  * Drop the context for \a req.
431  * \pre req->rq_cli_ctx != NULL.
432  * \post req->rq_cli_ctx == NULL.
433  *
434  * If \a sync == 0, this function should return quickly without sleep;
435  * otherwise it might trigger and wait for the whole process of sending
436  * an context-destroying rpc to server.
437  */
438 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
439 {
440         ENTRY;
441
442         LASSERT(req);
443         LASSERT(req->rq_cli_ctx);
444
445         /* request might be asked to release earlier while still
446          * in the context waiting list.
447          */
448         if (!cfs_list_empty(&req->rq_ctx_chain)) {
449                 cfs_spin_lock(&req->rq_cli_ctx->cc_lock);
450                 cfs_list_del_init(&req->rq_ctx_chain);
451                 cfs_spin_unlock(&req->rq_cli_ctx->cc_lock);
452         }
453
454         sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
455         req->rq_cli_ctx = NULL;
456         EXIT;
457 }
458
459 static
460 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req,
461                            struct ptlrpc_cli_ctx *oldctx,
462                            struct ptlrpc_cli_ctx *newctx)
463 {
464         struct sptlrpc_flavor   old_flvr;
465         char                   *reqmsg = NULL; /* to workaround old gcc */
466         int                     reqmsg_size;
467         int                     rc = 0;
468
469         LASSERT(req->rq_reqmsg);
470         LASSERT(req->rq_reqlen);
471         LASSERT(req->rq_replen);
472
473         CWARN("req %p: switch ctx %p(%u->%s) -> %p(%u->%s), "
474               "switch sec %p(%s) -> %p(%s)\n", req,
475               oldctx, oldctx->cc_vcred.vc_uid, sec2target_str(oldctx->cc_sec),
476               newctx, newctx->cc_vcred.vc_uid, sec2target_str(newctx->cc_sec),
477               oldctx->cc_sec, oldctx->cc_sec->ps_policy->sp_name,
478               newctx->cc_sec, newctx->cc_sec->ps_policy->sp_name);
479
480         /* save flavor */
481         old_flvr = req->rq_flvr;
482
483         /* save request message */
484         reqmsg_size = req->rq_reqlen;
485         if (reqmsg_size != 0) {
486                 OBD_ALLOC(reqmsg, reqmsg_size);
487                 if (reqmsg == NULL)
488                         return -ENOMEM;
489                 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size);
490         }
491
492         /* release old req/rep buf */
493         req->rq_cli_ctx = oldctx;
494         sptlrpc_cli_free_reqbuf(req);
495         sptlrpc_cli_free_repbuf(req);
496         req->rq_cli_ctx = newctx;
497
498         /* recalculate the flavor */
499         sptlrpc_req_set_flavor(req, 0);
500
501         /* alloc new request buffer
502          * we don't need to alloc reply buffer here, leave it to the
503          * rest procedure of ptlrpc */
504         if (reqmsg_size != 0) {
505                 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size);
506                 if (!rc) {
507                         LASSERT(req->rq_reqmsg);
508                         memcpy(req->rq_reqmsg, reqmsg, reqmsg_size);
509                 } else {
510                         CWARN("failed to alloc reqbuf: %d\n", rc);
511                         req->rq_flvr = old_flvr;
512                 }
513
514                 OBD_FREE(reqmsg, reqmsg_size);
515         }
516         return rc;
517 }
518
519 /**
520  * If current context of \a req is dead somehow, e.g. we just switched flavor
521  * thus marked original contexts dead, we'll find a new context for it. if
522  * no switch is needed, \a req will end up with the same context.
523  *
524  * \note a request must have a context, to keep other parts of code happy.
525  * In any case of failure during the switching, we must restore the old one.
526  */
527 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
528 {
529         struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
530         struct ptlrpc_cli_ctx *newctx;
531         int                    rc;
532         ENTRY;
533
534         LASSERT(oldctx);
535
536         sptlrpc_cli_ctx_get(oldctx);
537         sptlrpc_req_put_ctx(req, 0);
538
539         rc = sptlrpc_req_get_ctx(req);
540         if (unlikely(rc)) {
541                 LASSERT(!req->rq_cli_ctx);
542
543                 /* restore old ctx */
544                 req->rq_cli_ctx = oldctx;
545                 RETURN(rc);
546         }
547
548         newctx = req->rq_cli_ctx;
549         LASSERT(newctx);
550
551         if (unlikely(newctx == oldctx && 
552                      cfs_test_bit(PTLRPC_CTX_DEAD_BIT, &oldctx->cc_flags))) {
553                 /*
554                  * still get the old dead ctx, usually means system too busy
555                  */
556                 CWARN("ctx (%p, fl %lx) doesn't switch, relax a little bit\n",
557                       newctx, newctx->cc_flags);
558
559                 cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
560                                                    CFS_HZ);
561         } else {
562                 /*
563                  * it's possible newctx == oldctx if we're switching
564                  * subflavor with the same sec.
565                  */
566                 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx);
567                 if (rc) {
568                         /* restore old ctx */
569                         sptlrpc_req_put_ctx(req, 0);
570                         req->rq_cli_ctx = oldctx;
571                         RETURN(rc);
572                 }
573
574                 LASSERT(req->rq_cli_ctx == newctx);
575         }
576
577         sptlrpc_cli_ctx_put(oldctx, 1);
578         RETURN(0);
579 }
580 EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
581
582 static
583 int ctx_check_refresh(struct ptlrpc_cli_ctx *ctx)
584 {
585         if (cli_ctx_is_refreshed(ctx))
586                 return 1;
587         return 0;
588 }
589
590 static
591 int ctx_refresh_timeout(void *data)
592 {
593         struct ptlrpc_request *req = data;
594         int rc;
595
596         /* conn_cnt is needed in expire_one_request */
597         lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt);
598
599         rc = ptlrpc_expire_one_request(req, 1);
600         /* if we started recovery, we should mark this ctx dead; otherwise
601          * in case of lgssd died nobody would retire this ctx, following
602          * connecting will still find the same ctx thus cause deadlock.
603          * there's an assumption that expire time of the request should be
604          * later than the context refresh expire time.
605          */
606         if (rc == 0)
607                 req->rq_cli_ctx->cc_ops->die(req->rq_cli_ctx, 0);
608         return rc;
609 }
610
611 static
612 void ctx_refresh_interrupt(void *data)
613 {
614         struct ptlrpc_request *req = data;
615
616         cfs_spin_lock(&req->rq_lock);
617         req->rq_intr = 1;
618         cfs_spin_unlock(&req->rq_lock);
619 }
620
621 static
622 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx)
623 {
624         cfs_spin_lock(&ctx->cc_lock);
625         if (!cfs_list_empty(&req->rq_ctx_chain))
626                 cfs_list_del_init(&req->rq_ctx_chain);
627         cfs_spin_unlock(&ctx->cc_lock);
628 }
629
630 /**
631  * To refresh the context of \req, if it's not up-to-date.
632  * \param timeout
633  * - < 0: don't wait
634  * - = 0: wait until success or fatal error occur
635  * - > 0: timeout value (in seconds)
636  *
637  * The status of the context could be subject to be changed by other threads
638  * at any time. We allow this race, but once we return with 0, the caller will
639  * suppose it's uptodated and keep using it until the owning rpc is done.
640  *
641  * \retval 0 only if the context is uptodated.
642  * \retval -ev error number.
643  */
644 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
645 {
646         struct ptlrpc_cli_ctx  *ctx = req->rq_cli_ctx;
647         struct ptlrpc_sec      *sec;
648         struct l_wait_info      lwi;
649         int                     rc;
650         ENTRY;
651
652         LASSERT(ctx);
653
654         if (req->rq_ctx_init || req->rq_ctx_fini)
655                 RETURN(0);
656
657         /*
658          * during the process a request's context might change type even
659          * (e.g. from gss ctx to null ctx), so each loop we need to re-check
660          * everything
661          */
662 again:
663         rc = import_sec_validate_get(req->rq_import, &sec);
664         if (rc)
665                 RETURN(rc);
666
667         if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
668                 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
669                       req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc);
670                 req_off_ctx_list(req, ctx);
671                 sptlrpc_req_replace_dead_ctx(req);
672                 ctx = req->rq_cli_ctx;
673         }
674         sptlrpc_sec_put(sec);
675
676         if (cli_ctx_is_eternal(ctx))
677                 RETURN(0);
678
679         if (unlikely(cfs_test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
680                 LASSERT(ctx->cc_ops->refresh);
681                 ctx->cc_ops->refresh(ctx);
682         }
683         LASSERT(cfs_test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags) == 0);
684
685         LASSERT(ctx->cc_ops->validate);
686         if (ctx->cc_ops->validate(ctx) == 0) {
687                 req_off_ctx_list(req, ctx);
688                 RETURN(0);
689         }
690
691         if (unlikely(cfs_test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
692                 cfs_spin_lock(&req->rq_lock);
693                 req->rq_err = 1;
694                 cfs_spin_unlock(&req->rq_lock);
695                 req_off_ctx_list(req, ctx);
696                 RETURN(-EPERM);
697         }
698
699         /*
700          * There's a subtle issue for resending RPCs, suppose following
701          * situation:
702          *  1. the request was sent to server.
703          *  2. recovery was kicked start, after finished the request was
704          *     marked as resent.
705          *  3. resend the request.
706          *  4. old reply from server received, we accept and verify the reply.
707          *     this has to be success, otherwise the error will be aware
708          *     by application.
709          *  5. new reply from server received, dropped by LNet.
710          *
711          * Note the xid of old & new request is the same. We can't simply
712          * change xid for the resent request because the server replies on
713          * it for reply reconstruction.
714          *
715          * Commonly the original context should be uptodate because we
716          * have a expiry nice time; server will keep its context because
717          * we at least hold a ref of old context which prevent context
718          * destroying RPC being sent. So server still can accept the request
719          * and finish the RPC. But if that's not the case:
720          *  1. If server side context has been trimmed, a NO_CONTEXT will
721          *     be returned, gss_cli_ctx_verify/unseal will switch to new
722          *     context by force.
723          *  2. Current context never be refreshed, then we are fine: we
724          *     never really send request with old context before.
725          */
726         if (cfs_test_bit(PTLRPC_CTX_UPTODATE_BIT, &ctx->cc_flags) &&
727             unlikely(req->rq_reqmsg) &&
728             lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
729                 req_off_ctx_list(req, ctx);
730                 RETURN(0);
731         }
732
733         if (unlikely(cfs_test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
734                 req_off_ctx_list(req, ctx);
735                 /*
736                  * don't switch ctx if import was deactivated
737                  */
738                 if (req->rq_import->imp_deactive) {
739                         cfs_spin_lock(&req->rq_lock);
740                         req->rq_err = 1;
741                         cfs_spin_unlock(&req->rq_lock);
742                         RETURN(-EINTR);
743                 }
744
745                 rc = sptlrpc_req_replace_dead_ctx(req);
746                 if (rc) {
747                         LASSERT(ctx == req->rq_cli_ctx);
748                         CERROR("req %p: failed to replace dead ctx %p: %d\n",
749                                 req, ctx, rc);
750                         cfs_spin_lock(&req->rq_lock);
751                         req->rq_err = 1;
752                         cfs_spin_unlock(&req->rq_lock);
753                         RETURN(rc);
754                 }
755
756                 ctx = req->rq_cli_ctx;
757                 goto again;
758         }
759
760         /*
761          * Now we're sure this context is during upcall, add myself into
762          * waiting list
763          */
764         cfs_spin_lock(&ctx->cc_lock);
765         if (cfs_list_empty(&req->rq_ctx_chain))
766                 cfs_list_add(&req->rq_ctx_chain, &ctx->cc_req_list);
767         cfs_spin_unlock(&ctx->cc_lock);
768
769         if (timeout < 0)
770                 RETURN(-EWOULDBLOCK);
771
772         /* Clear any flags that may be present from previous sends */
773         LASSERT(req->rq_receiving_reply == 0);
774         cfs_spin_lock(&req->rq_lock);
775         req->rq_err = 0;
776         req->rq_timedout = 0;
777         req->rq_resend = 0;
778         req->rq_restart = 0;
779         cfs_spin_unlock(&req->rq_lock);
780
781         lwi = LWI_TIMEOUT_INTR(timeout * CFS_HZ, ctx_refresh_timeout,
782                                ctx_refresh_interrupt, req);
783         rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi);
784
785         /*
786          * following cases could lead us here:
787          * - successfully refreshed;
788          * - interrupted;
789          * - timedout, and we don't want recover from the failure;
790          * - timedout, and waked up upon recovery finished;
791          * - someone else mark this ctx dead by force;
792          * - someone invalidate the req and call ptlrpc_client_wake_req(),
793          *   e.g. ptlrpc_abort_inflight();
794          */
795         if (!cli_ctx_is_refreshed(ctx)) {
796                 /* timed out or interruptted */
797                 req_off_ctx_list(req, ctx);
798
799                 LASSERT(rc != 0);
800                 RETURN(rc);
801         }
802
803         goto again;
804 }
805
806 /**
807  * Initialize flavor settings for \a req, according to \a opcode.
808  *
809  * \note this could be called in two situations:
810  * - new request from ptlrpc_pre_req(), with proper @opcode
811  * - old request which changed ctx in the middle, with @opcode == 0
812  */
813 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode)
814 {
815         struct ptlrpc_sec *sec;
816
817         LASSERT(req->rq_import);
818         LASSERT(req->rq_cli_ctx);
819         LASSERT(req->rq_cli_ctx->cc_sec);
820         LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0);
821
822         /* special security flags accoding to opcode */
823         switch (opcode) {
824         case OST_READ:
825         case MDS_READPAGE:
826                 req->rq_bulk_read = 1;
827                 break;
828         case OST_WRITE:
829         case MDS_WRITEPAGE:
830                 req->rq_bulk_write = 1;
831                 break;
832         case SEC_CTX_INIT:
833                 req->rq_ctx_init = 1;
834                 break;
835         case SEC_CTX_FINI:
836                 req->rq_ctx_fini = 1;
837                 break;
838         case 0:
839                 /* init/fini rpc won't be resend, so can't be here */
840                 LASSERT(req->rq_ctx_init == 0);
841                 LASSERT(req->rq_ctx_fini == 0);
842
843                 /* cleanup flags, which should be recalculated */
844                 req->rq_pack_udesc = 0;
845                 req->rq_pack_bulk = 0;
846                 break;
847         }
848
849         sec = req->rq_cli_ctx->cc_sec;
850
851         cfs_spin_lock(&sec->ps_lock);
852         req->rq_flvr = sec->ps_flvr;
853         cfs_spin_unlock(&sec->ps_lock);
854
855         /* force SVC_NULL for context initiation rpc, SVC_INTG for context
856          * destruction rpc */
857         if (unlikely(req->rq_ctx_init))
858                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL);
859         else if (unlikely(req->rq_ctx_fini))
860                 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG);
861
862         /* user descriptor flag, null security can't do it anyway */
863         if ((sec->ps_flvr.sf_flags & PTLRPC_SEC_FL_UDESC) &&
864             (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL))
865                 req->rq_pack_udesc = 1;
866
867         /* bulk security flag */
868         if ((req->rq_bulk_read || req->rq_bulk_write) &&
869             sptlrpc_flavor_has_bulk(&req->rq_flvr))
870                 req->rq_pack_bulk = 1;
871 }
872
873 void sptlrpc_request_out_callback(struct ptlrpc_request *req)
874 {
875         if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV)
876                 return;
877
878         LASSERT(req->rq_clrbuf);
879         if (req->rq_pool || !req->rq_reqbuf)
880                 return;
881
882         OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len);
883         req->rq_reqbuf = NULL;
884         req->rq_reqbuf_len = 0;
885 }
886
887 /**
888  * Given an import \a imp, check whether current user has a valid context
889  * or not. We may create a new context and try to refresh it, and try
890  * repeatedly try in case of non-fatal errors. Return 0 means success.
891  */
892 int sptlrpc_import_check_ctx(struct obd_import *imp)
893 {
894         struct ptlrpc_sec     *sec;
895         struct ptlrpc_cli_ctx *ctx;
896         struct ptlrpc_request *req = NULL;
897         int rc;
898         ENTRY;
899
900         cfs_might_sleep();
901
902         sec = sptlrpc_import_sec_ref(imp);
903         ctx = get_my_ctx(sec);
904         sptlrpc_sec_put(sec);
905
906         if (!ctx)
907                 RETURN(-ENOMEM);
908
909         if (cli_ctx_is_eternal(ctx) ||
910             ctx->cc_ops->validate(ctx) == 0) {
911                 sptlrpc_cli_ctx_put(ctx, 1);
912                 RETURN(0);
913         }
914
915         if (cli_ctx_is_error(ctx)) {
916                 sptlrpc_cli_ctx_put(ctx, 1);
917                 RETURN(-EACCES);
918         }
919
920         OBD_ALLOC_PTR(req);
921         if (!req)
922                 RETURN(-ENOMEM);
923
924         cfs_spin_lock_init(&req->rq_lock);
925         cfs_atomic_set(&req->rq_refcount, 10000);
926         CFS_INIT_LIST_HEAD(&req->rq_ctx_chain);
927         cfs_waitq_init(&req->rq_reply_waitq);
928         cfs_waitq_init(&req->rq_set_waitq);
929         req->rq_import = imp;
930         req->rq_flvr = sec->ps_flvr;
931         req->rq_cli_ctx = ctx;
932
933         rc = sptlrpc_req_refresh_ctx(req, 0);
934         LASSERT(cfs_list_empty(&req->rq_ctx_chain));
935         sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
936         OBD_FREE_PTR(req);
937
938         RETURN(rc);
939 }
940
941 /**
942  * Used by ptlrpc client, to perform the pre-defined security transformation
943  * upon the request message of \a req. After this function called,
944  * req->rq_reqmsg is still accessible as clear text.
945  */
946 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
947 {
948         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
949         int rc = 0;
950         ENTRY;
951
952         LASSERT(ctx);
953         LASSERT(ctx->cc_sec);
954         LASSERT(req->rq_reqbuf || req->rq_clrbuf);
955
956         /* we wrap bulk request here because now we can be sure
957          * the context is uptodate.
958          */
959         if (req->rq_bulk) {
960                 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
961                 if (rc)
962                         RETURN(rc);
963         }
964
965         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
966         case SPTLRPC_SVC_NULL:
967         case SPTLRPC_SVC_AUTH:
968         case SPTLRPC_SVC_INTG:
969                 LASSERT(ctx->cc_ops->sign);
970                 rc = ctx->cc_ops->sign(ctx, req);
971                 break;
972         case SPTLRPC_SVC_PRIV:
973                 LASSERT(ctx->cc_ops->seal);
974                 rc = ctx->cc_ops->seal(ctx, req);
975                 break;
976         default:
977                 LBUG();
978         }
979
980         if (rc == 0) {
981                 LASSERT(req->rq_reqdata_len);
982                 LASSERT(req->rq_reqdata_len % 8 == 0);
983                 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
984         }
985
986         RETURN(rc);
987 }
988
989 static int do_cli_unwrap_reply(struct ptlrpc_request *req)
990 {
991         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
992         int                    rc;
993         ENTRY;
994
995         LASSERT(ctx);
996         LASSERT(ctx->cc_sec);
997         LASSERT(req->rq_repbuf);
998         LASSERT(req->rq_repdata);
999         LASSERT(req->rq_repmsg == NULL);
1000
1001         req->rq_rep_swab_mask = 0;
1002
1003         rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len);
1004         switch (rc) {
1005         case 1:
1006                 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF);
1007         case 0:
1008                 break;
1009         default:
1010                 CERROR("failed unpack reply: x"LPU64"\n", req->rq_xid);
1011                 RETURN(-EPROTO);
1012         }
1013
1014         if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
1015                 CERROR("replied data length %d too small\n",
1016                        req->rq_repdata_len);
1017                 RETURN(-EPROTO);
1018         }
1019
1020         if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
1021             SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) {
1022                 CERROR("reply policy %u doesn't match request policy %u\n",
1023                        SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
1024                        SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
1025                 RETURN(-EPROTO);
1026         }
1027
1028         switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
1029         case SPTLRPC_SVC_NULL:
1030         case SPTLRPC_SVC_AUTH:
1031         case SPTLRPC_SVC_INTG:
1032                 LASSERT(ctx->cc_ops->verify);
1033                 rc = ctx->cc_ops->verify(ctx, req);
1034                 break;
1035         case SPTLRPC_SVC_PRIV:
1036                 LASSERT(ctx->cc_ops->unseal);
1037                 rc = ctx->cc_ops->unseal(ctx, req);
1038                 break;
1039         default:
1040                 LBUG();
1041         }
1042         LASSERT(rc || req->rq_repmsg || req->rq_resend);
1043
1044         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
1045             !req->rq_ctx_init)
1046                 req->rq_rep_swab_mask = 0;
1047         RETURN(rc);
1048 }
1049
1050 /**
1051  * Used by ptlrpc client, to perform security transformation upon the reply
1052  * message of \a req. After return successfully, req->rq_repmsg points to
1053  * the reply message in clear text.
1054  *
1055  * \pre the reply buffer should have been un-posted from LNet, so nothing is
1056  * going to change.
1057  */
1058 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req)
1059 {
1060         LASSERT(req->rq_repbuf);
1061         LASSERT(req->rq_repdata == NULL);
1062         LASSERT(req->rq_repmsg == NULL);
1063         LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len);
1064
1065         if (req->rq_reply_off == 0 &&
1066             (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) {
1067                 CERROR("real reply with offset 0\n");
1068                 return -EPROTO;
1069         }
1070
1071         if (req->rq_reply_off % 8 != 0) {
1072                 CERROR("reply at odd offset %u\n", req->rq_reply_off);
1073                 return -EPROTO;
1074         }
1075
1076         req->rq_repdata = (struct lustre_msg *)
1077                                 (req->rq_repbuf + req->rq_reply_off);
1078         req->rq_repdata_len = req->rq_nob_received;
1079
1080         return do_cli_unwrap_reply(req);
1081 }
1082
1083 /**
1084  * Used by ptlrpc client, to perform security transformation upon the early
1085  * reply message of \a req. We expect the rq_reply_off is 0, and
1086  * rq_nob_received is the early reply size.
1087  * 
1088  * Because the receive buffer might be still posted, the reply data might be
1089  * changed at any time, no matter we're holding rq_lock or not. For this reason
1090  * we allocate a separate ptlrpc_request and reply buffer for early reply
1091  * processing.
1092  * 
1093  * \retval 0 success, \a req_ret is filled with a duplicated ptlrpc_request.
1094  * Later the caller must call sptlrpc_cli_finish_early_reply() on the returned
1095  * \a *req_ret to release it.
1096  * \retval -ev error number, and \a req_ret will not be set.
1097  */
1098 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1099                                    struct ptlrpc_request **req_ret)
1100 {
1101         struct ptlrpc_request  *early_req;
1102         char                   *early_buf;
1103         int                     early_bufsz, early_size;
1104         int                     rc;
1105         ENTRY;
1106
1107         OBD_ALLOC_PTR(early_req);
1108         if (early_req == NULL)
1109                 RETURN(-ENOMEM);
1110
1111         early_size = req->rq_nob_received;
1112         early_bufsz = size_roundup_power2(early_size);
1113         OBD_ALLOC(early_buf, early_bufsz);
1114         if (early_buf == NULL)
1115                 GOTO(err_req, rc = -ENOMEM);
1116
1117         /* sanity checkings and copy data out, do it inside spinlock */
1118         cfs_spin_lock(&req->rq_lock);
1119
1120         if (req->rq_replied) {
1121                 cfs_spin_unlock(&req->rq_lock);
1122                 GOTO(err_buf, rc = -EALREADY);
1123         }
1124
1125         LASSERT(req->rq_repbuf);
1126         LASSERT(req->rq_repdata == NULL);
1127         LASSERT(req->rq_repmsg == NULL);
1128
1129         if (req->rq_reply_off != 0) {
1130                 CERROR("early reply with offset %u\n", req->rq_reply_off);
1131                 cfs_spin_unlock(&req->rq_lock);
1132                 GOTO(err_buf, rc = -EPROTO);
1133         }
1134
1135         if (req->rq_nob_received != early_size) {
1136                 /* even another early arrived the size should be the same */
1137                 CERROR("data size has changed from %u to %u\n",
1138                        early_size, req->rq_nob_received);
1139                 cfs_spin_unlock(&req->rq_lock);
1140                 GOTO(err_buf, rc = -EINVAL);
1141         }
1142
1143         if (req->rq_nob_received < sizeof(struct lustre_msg)) {
1144                 CERROR("early reply length %d too small\n",
1145                        req->rq_nob_received);
1146                 cfs_spin_unlock(&req->rq_lock);
1147                 GOTO(err_buf, rc = -EALREADY);
1148         }
1149
1150         memcpy(early_buf, req->rq_repbuf, early_size);
1151         cfs_spin_unlock(&req->rq_lock);
1152
1153         cfs_spin_lock_init(&early_req->rq_lock);
1154         early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx);
1155         early_req->rq_flvr = req->rq_flvr;
1156         early_req->rq_repbuf = early_buf;
1157         early_req->rq_repbuf_len = early_bufsz;
1158         early_req->rq_repdata = (struct lustre_msg *) early_buf;
1159         early_req->rq_repdata_len = early_size;
1160         early_req->rq_early = 1;
1161         early_req->rq_reqmsg = req->rq_reqmsg;
1162
1163         rc = do_cli_unwrap_reply(early_req);
1164         if (rc) {
1165                 DEBUG_REQ(D_ADAPTTO, early_req,
1166                           "error %d unwrap early reply", rc);
1167                 GOTO(err_ctx, rc);
1168         }
1169
1170         LASSERT(early_req->rq_repmsg);
1171         *req_ret = early_req;
1172         RETURN(0);
1173
1174 err_ctx:
1175         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1176 err_buf:
1177         OBD_FREE(early_buf, early_bufsz);
1178 err_req:
1179         OBD_FREE_PTR(early_req);
1180         RETURN(rc);
1181 }
1182
1183 /**
1184  * Used by ptlrpc client, to release a processed early reply \a early_req.
1185  *
1186  * \pre \a early_req was obtained from calling sptlrpc_cli_unwrap_early_reply().
1187  */
1188 void sptlrpc_cli_finish_early_reply(struct ptlrpc_request *early_req)
1189 {
1190         LASSERT(early_req->rq_repbuf);
1191         LASSERT(early_req->rq_repdata);
1192         LASSERT(early_req->rq_repmsg);
1193
1194         sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
1195         OBD_FREE(early_req->rq_repbuf, early_req->rq_repbuf_len);
1196         OBD_FREE_PTR(early_req);
1197 }
1198
1199 /**************************************************
1200  * sec ID                                         *
1201  **************************************************/
1202
1203 /*
1204  * "fixed" sec (e.g. null) use sec_id < 0
1205  */
1206 static cfs_atomic_t sptlrpc_sec_id = CFS_ATOMIC_INIT(1);
1207
1208 int sptlrpc_get_next_secid(void)
1209 {
1210         return cfs_atomic_inc_return(&sptlrpc_sec_id);
1211 }
1212 EXPORT_SYMBOL(sptlrpc_get_next_secid);
1213
1214 /**************************************************
1215  * client side high-level security APIs           *
1216  **************************************************/
1217
1218 static int sec_cop_flush_ctx_cache(struct ptlrpc_sec *sec, uid_t uid,
1219                                    int grace, int force)
1220 {
1221         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1222
1223         LASSERT(policy->sp_cops);
1224         LASSERT(policy->sp_cops->flush_ctx_cache);
1225
1226         return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force);
1227 }
1228
1229 static void sec_cop_destroy_sec(struct ptlrpc_sec *sec)
1230 {
1231         struct ptlrpc_sec_policy *policy = sec->ps_policy;
1232
1233         LASSERT(cfs_atomic_read(&sec->ps_refcount) == 0);
1234         LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
1235         LASSERT(policy->sp_cops->destroy_sec);
1236
1237         CDEBUG(D_SEC, "%s@%p: being destroied\n", sec->ps_policy->sp_name, sec);
1238
1239         policy->sp_cops->destroy_sec(sec);
1240         sptlrpc_policy_put(policy);
1241 }
1242
1243 void sptlrpc_sec_destroy(struct ptlrpc_sec *sec)
1244 {
1245         sec_cop_destroy_sec(sec);
1246 }
1247 EXPORT_SYMBOL(sptlrpc_sec_destroy);
1248
1249 static void sptlrpc_sec_kill(struct ptlrpc_sec *sec)
1250 {
1251         LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
1252
1253         if (sec->ps_policy->sp_cops->kill_sec) {
1254                 sec->ps_policy->sp_cops->kill_sec(sec);
1255
1256                 sec_cop_flush_ctx_cache(sec, -1, 1, 1);
1257         }
1258 }
1259
1260 struct ptlrpc_sec *sptlrpc_sec_get(struct ptlrpc_sec *sec)
1261 {
1262         if (sec) {
1263                 LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
1264                 cfs_atomic_inc(&sec->ps_refcount);
1265         }
1266
1267         return sec;
1268 }
1269 EXPORT_SYMBOL(sptlrpc_sec_get);
1270
1271 void sptlrpc_sec_put(struct ptlrpc_sec *sec)
1272 {
1273         if (sec) {
1274                 LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
1275
1276                 if (cfs_atomic_dec_and_test(&sec->ps_refcount)) {
1277                         LASSERT(cfs_atomic_read(&sec->ps_nctx) == 0);
1278
1279                         sptlrpc_gc_del_sec(sec);
1280                         sec_cop_destroy_sec(sec);
1281                 }
1282         }
1283 }
1284 EXPORT_SYMBOL(sptlrpc_sec_put);
1285
1286 /*
1287  * policy module is responsible for taking refrence of import
1288  */
1289 static
1290 struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
1291                                        struct ptlrpc_svc_ctx *svc_ctx,
1292                                        struct sptlrpc_flavor *sf,
1293                                        enum lustre_sec_part sp)
1294 {
1295         struct ptlrpc_sec_policy *policy;
1296         struct ptlrpc_sec        *sec;
1297         char                      str[32];
1298         ENTRY;
1299
1300         if (svc_ctx) {
1301                 LASSERT(imp->imp_dlm_fake == 1);
1302
1303                 CDEBUG(D_SEC, "%s %s: reverse sec using flavor %s\n",
1304                        imp->imp_obd->obd_type->typ_name,
1305                        imp->imp_obd->obd_name,
1306                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1307
1308                 policy = sptlrpc_policy_get(svc_ctx->sc_policy);
1309                 sf->sf_flags |= PTLRPC_SEC_FL_REVERSE | PTLRPC_SEC_FL_ROOTONLY;
1310         } else {
1311                 LASSERT(imp->imp_dlm_fake == 0);
1312
1313                 CDEBUG(D_SEC, "%s %s: select security flavor %s\n",
1314                        imp->imp_obd->obd_type->typ_name,
1315                        imp->imp_obd->obd_name,
1316                        sptlrpc_flavor2name(sf, str, sizeof(str)));
1317
1318                 policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
1319                 if (!policy) {
1320                         CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
1321                         RETURN(NULL);
1322                 }
1323         }
1324
1325         sec = policy->sp_cops->create_sec(imp, svc_ctx, sf);
1326         if (sec) {
1327                 cfs_atomic_inc(&sec->ps_refcount);
1328
1329                 sec->ps_part = sp;
1330
1331                 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx)
1332                         sptlrpc_gc_add_sec(sec);
1333         } else {
1334                 sptlrpc_policy_put(policy);
1335         }
1336
1337         RETURN(sec);
1338 }
1339
1340 struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
1341 {
1342         struct ptlrpc_sec *sec;
1343
1344         cfs_spin_lock(&imp->imp_lock);
1345         sec = sptlrpc_sec_get(imp->imp_sec);
1346         cfs_spin_unlock(&imp->imp_lock);
1347
1348         return sec;
1349 }
1350 EXPORT_SYMBOL(sptlrpc_import_sec_ref);
1351
1352 static void sptlrpc_import_sec_install(struct obd_import *imp,
1353                                        struct ptlrpc_sec *sec)
1354 {
1355         struct ptlrpc_sec *old_sec;
1356
1357         LASSERT(cfs_atomic_read(&sec->ps_refcount) > 0);
1358
1359         cfs_spin_lock(&imp->imp_lock);
1360         old_sec = imp->imp_sec;
1361         imp->imp_sec = sec;
1362         cfs_spin_unlock(&imp->imp_lock);
1363
1364         if (old_sec) {
1365                 sptlrpc_sec_kill(old_sec);
1366
1367                 /* balance the ref taken by this import */
1368                 sptlrpc_sec_put(old_sec);
1369         }
1370 }
1371
1372 static inline
1373 int flavor_equal(struct sptlrpc_flavor *sf1, struct sptlrpc_flavor *sf2)
1374 {
1375         return (memcmp(sf1, sf2, sizeof(*sf1)) == 0);
1376 }
1377
1378 static inline
1379 void flavor_copy(struct sptlrpc_flavor *dst, struct sptlrpc_flavor *src)
1380 {
1381         *dst = *src;
1382 }
1383
1384 static void sptlrpc_import_sec_adapt_inplace(struct obd_import *imp,
1385                                              struct ptlrpc_sec *sec,
1386                                              struct sptlrpc_flavor *sf)
1387 {
1388         char    str1[32], str2[32];
1389
1390         if (sec->ps_flvr.sf_flags != sf->sf_flags)
1391                 CWARN("changing sec flags: %s -> %s\n",
1392                       sptlrpc_secflags2str(sec->ps_flvr.sf_flags,
1393                                            str1, sizeof(str1)),
1394                       sptlrpc_secflags2str(sf->sf_flags,
1395                                            str2, sizeof(str2)));
1396
1397         cfs_spin_lock(&sec->ps_lock);
1398         flavor_copy(&sec->ps_flvr, sf);
1399         cfs_spin_unlock(&sec->ps_lock);
1400 }
1401
1402 /**
1403  * To get an appropriate ptlrpc_sec for the \a imp, according to the current
1404  * configuration. Upon called, imp->imp_sec may or may not be NULL.
1405  *
1406  *  - regular import: \a svc_ctx should be NULL and \a flvr is ignored;
1407  *  - reverse import: \a svc_ctx and \a flvr are obtained from incoming request.
1408  */
1409 int sptlrpc_import_sec_adapt(struct obd_import *imp,
1410                              struct ptlrpc_svc_ctx *svc_ctx,
1411                              struct sptlrpc_flavor *flvr)
1412 {
1413         struct ptlrpc_connection   *conn;
1414         struct sptlrpc_flavor       sf;
1415         struct ptlrpc_sec          *sec, *newsec;
1416         enum lustre_sec_part        sp;
1417         char                        str[24];
1418         int                         rc = 0;
1419         ENTRY;
1420
1421         cfs_might_sleep();
1422
1423         if (imp == NULL)
1424                 RETURN(0);
1425
1426         conn = imp->imp_connection;
1427
1428         if (svc_ctx == NULL) {
1429                 struct client_obd *cliobd = &imp->imp_obd->u.cli;
1430                 /*
1431                  * normal import, determine flavor from rule set, except
1432                  * for mgc the flavor is predetermined.
1433                  */
1434                 if (cliobd->cl_sp_me == LUSTRE_SP_MGC)
1435                         sf = cliobd->cl_flvr_mgc;
1436                 else 
1437                         sptlrpc_conf_choose_flavor(cliobd->cl_sp_me,
1438                                                    cliobd->cl_sp_to,
1439                                                    &cliobd->cl_target_uuid,
1440                                                    conn->c_self, &sf);
1441
1442                 sp = imp->imp_obd->u.cli.cl_sp_me;
1443         } else {
1444                 /* reverse import, determine flavor from incoming reqeust */
1445                 sf = *flvr;
1446
1447                 if (sf.sf_rpc != SPTLRPC_FLVR_NULL)
1448                         sf.sf_flags = PTLRPC_SEC_FL_REVERSE |
1449                                       PTLRPC_SEC_FL_ROOTONLY;
1450
1451                 sp = sptlrpc_target_sec_part(imp->imp_obd);
1452         }
1453
1454         sec = sptlrpc_import_sec_ref(imp);
1455         if (sec) {
1456                 char    str2[24];
1457
1458                 if (flavor_equal(&sf, &sec->ps_flvr))
1459                         GOTO(out, rc);
1460
1461                 CWARN("import %s->%s: changing flavor %s -> %s\n",
1462                       imp->imp_obd->obd_name,
1463                       obd_uuid2str(&conn->c_remote_uuid),
1464                       sptlrpc_flavor2name(&sec->ps_flvr, str, sizeof(str)),
1465                       sptlrpc_flavor2name(&sf, str2, sizeof(str2)));
1466
1467                 if (SPTLRPC_FLVR_POLICY(sf.sf_rpc) ==
1468                     SPTLRPC_FLVR_POLICY(sec->ps_flvr.sf_rpc) &&
1469                     SPTLRPC_FLVR_MECH(sf.sf_rpc) ==
1470                     SPTLRPC_FLVR_MECH(sec->ps_flvr.sf_rpc)) {
1471                         sptlrpc_import_sec_adapt_inplace(imp, sec, &sf);
1472                         GOTO(out, rc);
1473                 }
1474         } else {
1475                 CWARN("import %s->%s netid %x: select flavor %s\n",
1476                       imp->imp_obd->obd_name,
1477                       obd_uuid2str(&conn->c_remote_uuid),
1478                       LNET_NIDNET(conn->c_self),
1479                       sptlrpc_flavor2name(&sf, str, sizeof(str)));
1480         }
1481
1482         cfs_mutex_down(&imp->imp_sec_mutex);
1483
1484         newsec = sptlrpc_sec_create(imp, svc_ctx, &sf, sp);
1485         if (newsec) {
1486                 sptlrpc_import_sec_install(imp, newsec);
1487         } else {
1488                 CERROR("import %s->%s: failed to create new sec\n",
1489                        imp->imp_obd->obd_name,
1490                        obd_uuid2str(&conn->c_remote_uuid));
1491                 rc = -EPERM;
1492         }
1493
1494         cfs_mutex_up(&imp->imp_sec_mutex);
1495 out:
1496         sptlrpc_sec_put(sec);
1497         RETURN(rc);
1498 }
1499
1500 void sptlrpc_import_sec_put(struct obd_import *imp)
1501 {
1502         if (imp->imp_sec) {
1503                 sptlrpc_sec_kill(imp->imp_sec);
1504
1505                 sptlrpc_sec_put(imp->imp_sec);
1506                 imp->imp_sec = NULL;
1507         }
1508 }
1509
1510 static void import_flush_ctx_common(struct obd_import *imp,
1511                                     uid_t uid, int grace, int force)
1512 {
1513         struct ptlrpc_sec *sec;
1514
1515         if (imp == NULL)
1516                 return;
1517
1518         sec = sptlrpc_import_sec_ref(imp);
1519         if (sec == NULL)
1520                 return;
1521
1522         sec_cop_flush_ctx_cache(sec, uid, grace, force);
1523         sptlrpc_sec_put(sec);
1524 }
1525
1526 void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
1527 {
1528         /* it's important to use grace mode, see explain in
1529          * sptlrpc_req_refresh_ctx() */
1530         import_flush_ctx_common(imp, 0, 1, 1);
1531 }
1532
1533 void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
1534 {
1535         import_flush_ctx_common(imp, cfs_curproc_uid(), 1, 1);
1536 }
1537 EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
1538
1539 void sptlrpc_import_flush_all_ctx(struct obd_import *imp)
1540 {
1541         import_flush_ctx_common(imp, -1, 1, 1);
1542 }
1543 EXPORT_SYMBOL(sptlrpc_import_flush_all_ctx);
1544
1545 /**
1546  * Used by ptlrpc client to allocate request buffer of \a req. Upon return
1547  * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
1548  */
1549 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize)
1550 {
1551         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1552         struct ptlrpc_sec_policy *policy;
1553         int rc;
1554
1555         LASSERT(ctx);
1556         LASSERT(cfs_atomic_read(&ctx->cc_refcount));
1557         LASSERT(ctx->cc_sec);
1558         LASSERT(ctx->cc_sec->ps_policy);
1559         LASSERT(req->rq_reqmsg == NULL);
1560
1561         policy = ctx->cc_sec->ps_policy;
1562         rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize);
1563         if (!rc) {
1564                 LASSERT(req->rq_reqmsg);
1565                 LASSERT(req->rq_reqbuf || req->rq_clrbuf);
1566
1567                 /* zeroing preallocated buffer */
1568                 if (req->rq_pool)
1569                         memset(req->rq_reqmsg, 0, msgsize);
1570         }
1571
1572         return rc;
1573 }
1574
1575 /**
1576  * Used by ptlrpc client to free request buffer of \a req. After this
1577  * req->rq_reqmsg is set to NULL and should not be accessed anymore.
1578  */
1579 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req)
1580 {
1581         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1582         struct ptlrpc_sec_policy *policy;
1583
1584         LASSERT(ctx);
1585         LASSERT(cfs_atomic_read(&ctx->cc_refcount));
1586         LASSERT(ctx->cc_sec);
1587         LASSERT(ctx->cc_sec->ps_policy);
1588
1589         if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL)
1590                 return;
1591
1592         policy = ctx->cc_sec->ps_policy;
1593         policy->sp_cops->free_reqbuf(ctx->cc_sec, req);
1594         req->rq_reqmsg = NULL;
1595 }
1596
1597 /*
1598  * NOTE caller must guarantee the buffer size is enough for the enlargement
1599  */
1600 void _sptlrpc_enlarge_msg_inplace(struct lustre_msg *msg,
1601                                   int segment, int newsize)
1602 {
1603         void   *src, *dst;
1604         int     oldsize, oldmsg_size, movesize;
1605
1606         LASSERT(segment < msg->lm_bufcount);
1607         LASSERT(msg->lm_buflens[segment] <= newsize);
1608
1609         if (msg->lm_buflens[segment] == newsize)
1610                 return;
1611
1612         /* nothing to do if we are enlarging the last segment */
1613         if (segment == msg->lm_bufcount - 1) {
1614                 msg->lm_buflens[segment] = newsize;
1615                 return;
1616         }
1617
1618         oldsize = msg->lm_buflens[segment];
1619
1620         src = lustre_msg_buf(msg, segment + 1, 0);
1621         msg->lm_buflens[segment] = newsize;
1622         dst = lustre_msg_buf(msg, segment + 1, 0);
1623         msg->lm_buflens[segment] = oldsize;
1624
1625         /* move from segment + 1 to end segment */
1626         LASSERT(msg->lm_magic == LUSTRE_MSG_MAGIC_V2);
1627         oldmsg_size = lustre_msg_size_v2(msg->lm_bufcount, msg->lm_buflens);
1628         movesize = oldmsg_size - ((unsigned long) src - (unsigned long) msg);
1629         LASSERT(movesize >= 0);
1630
1631         if (movesize)
1632                 memmove(dst, src, movesize);
1633
1634         /* note we don't clear the ares where old data live, not secret */
1635
1636         /* finally set new segment size */
1637         msg->lm_buflens[segment] = newsize;
1638 }
1639 EXPORT_SYMBOL(_sptlrpc_enlarge_msg_inplace);
1640
1641 /**
1642  * Used by ptlrpc client to enlarge the \a segment of request message pointed
1643  * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
1644  * preserved after the enlargement. this must be called after original request
1645  * buffer being allocated.
1646  *
1647  * \note after this be called, rq_reqmsg and rq_reqlen might have been changed,
1648  * so caller should refresh its local pointers if needed.
1649  */
1650 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1651                                int segment, int newsize)
1652 {
1653         struct ptlrpc_cli_ctx    *ctx = req->rq_cli_ctx;
1654         struct ptlrpc_sec_cops   *cops;
1655         struct lustre_msg        *msg = req->rq_reqmsg;
1656
1657         LASSERT(ctx);
1658         LASSERT(msg);
1659         LASSERT(msg->lm_bufcount > segment);
1660         LASSERT(msg->lm_buflens[segment] <= newsize);
1661
1662         if (msg->lm_buflens[segment] == newsize)
1663                 return 0;
1664
1665         cops = ctx->cc_sec->ps_policy->sp_cops;
1666         LASSERT(cops->enlarge_reqbuf);
1667         return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize);
1668 }
1669 EXPORT_SYMBOL(sptlrpc_cli_enlarge_reqbuf);
1670
1671 /**
1672  * Used by ptlrpc client to allocate reply buffer of \a req.
1673  *
1674  * \note After this, req->rq_repmsg is still not accessible.
1675  */
1676 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
1677 {
1678         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1679         struct ptlrpc_sec_policy *policy;
1680         ENTRY;
1681
1682         LASSERT(ctx);
1683         LASSERT(cfs_atomic_read(&ctx->cc_refcount));
1684         LASSERT(ctx->cc_sec);
1685         LASSERT(ctx->cc_sec->ps_policy);
1686
1687         if (req->rq_repbuf)
1688                 RETURN(0);
1689
1690         policy = ctx->cc_sec->ps_policy;
1691         RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
1692 }
1693
1694 /**
1695  * Used by ptlrpc client to free reply buffer of \a req. After this
1696  * req->rq_repmsg is set to NULL and should not be accessed anymore.
1697  */
1698 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
1699 {
1700         struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
1701         struct ptlrpc_sec_policy *policy;
1702         ENTRY;
1703
1704         LASSERT(ctx);
1705         LASSERT(cfs_atomic_read(&ctx->cc_refcount));
1706         LASSERT(ctx->cc_sec);
1707         LASSERT(ctx->cc_sec->ps_policy);
1708
1709         if (req->rq_repbuf == NULL)
1710                 return;
1711         LASSERT(req->rq_repbuf_len);
1712
1713         policy = ctx->cc_sec->ps_policy;
1714         policy->sp_cops->free_repbuf(ctx->cc_sec, req);
1715         req->rq_repmsg = NULL;
1716         EXIT;
1717 }
1718
1719 int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
1720                                 struct ptlrpc_cli_ctx *ctx)
1721 {
1722         struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy;
1723
1724         if (!policy->sp_cops->install_rctx)
1725                 return 0;
1726         return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx);
1727 }
1728
1729 int sptlrpc_svc_install_rvs_ctx(struct obd_import *imp,
1730                                 struct ptlrpc_svc_ctx *ctx)
1731 {
1732         struct ptlrpc_sec_policy *policy = ctx->sc_policy;
1733
1734         if (!policy->sp_sops->install_rctx)
1735                 return 0;
1736         return policy->sp_sops->install_rctx(imp, ctx);
1737 }
1738
1739 /****************************************
1740  * server side security                 *
1741  ****************************************/
1742
1743 static int flavor_allowed(struct sptlrpc_flavor *exp,
1744                           struct ptlrpc_request *req)
1745 {
1746         struct sptlrpc_flavor *flvr = &req->rq_flvr;
1747
1748         if (exp->sf_rpc == SPTLRPC_FLVR_ANY || exp->sf_rpc == flvr->sf_rpc)
1749                 return 1;
1750
1751         if ((req->rq_ctx_init || req->rq_ctx_fini) &&
1752             SPTLRPC_FLVR_POLICY(exp->sf_rpc) ==
1753             SPTLRPC_FLVR_POLICY(flvr->sf_rpc) &&
1754             SPTLRPC_FLVR_MECH(exp->sf_rpc) == SPTLRPC_FLVR_MECH(flvr->sf_rpc))
1755                 return 1;
1756
1757         return 0;
1758 }
1759
1760 #define EXP_FLVR_UPDATE_EXPIRE      (OBD_TIMEOUT_DEFAULT + 10)
1761
1762 /**
1763  * Given an export \a exp, check whether the flavor of incoming \a req
1764  * is allowed by the export \a exp. Main logic is about taking care of
1765  * changing configurations. Return 0 means success.
1766  */
1767 int sptlrpc_target_export_check(struct obd_export *exp,
1768                                 struct ptlrpc_request *req)
1769 {
1770         struct sptlrpc_flavor   flavor;
1771
1772         if (exp == NULL)
1773                 return 0;
1774
1775         /* client side export has no imp_reverse, skip
1776          * FIXME maybe we should check flavor this as well??? */
1777         if (exp->exp_imp_reverse == NULL)
1778                 return 0;
1779
1780         /* don't care about ctx fini rpc */
1781         if (req->rq_ctx_fini)
1782                 return 0;
1783
1784         cfs_spin_lock(&exp->exp_lock);
1785
1786         /* if flavor just changed (exp->exp_flvr_changed != 0), we wait for
1787          * the first req with the new flavor, then treat it as current flavor,
1788          * adapt reverse sec according to it.
1789          * note the first rpc with new flavor might not be with root ctx, in
1790          * which case delay the sec_adapt by leaving exp_flvr_adapt == 1. */
1791         if (unlikely(exp->exp_flvr_changed) &&
1792             flavor_allowed(&exp->exp_flvr_old[1], req)) {
1793                 /* make the new flavor as "current", and old ones as
1794                  * about-to-expire */
1795                 CDEBUG(D_SEC, "exp %p: just changed: %x->%x\n", exp,
1796                        exp->exp_flvr.sf_rpc, exp->exp_flvr_old[1].sf_rpc);
1797                 flavor = exp->exp_flvr_old[1];
1798                 exp->exp_flvr_old[1] = exp->exp_flvr_old[0];
1799                 exp->exp_flvr_expire[1] = exp->exp_flvr_expire[0];
1800                 exp->exp_flvr_old[0] = exp->exp_flvr;
1801                 exp->exp_flvr_expire[0] = cfs_time_current_sec() +
1802                                           EXP_FLVR_UPDATE_EXPIRE;
1803                 exp->exp_flvr = flavor;
1804
1805                 /* flavor change finished */
1806                 exp->exp_flvr_changed = 0;
1807                 LASSERT(exp->exp_flvr_adapt == 1);
1808
1809                 /* if it's gss, we only interested in root ctx init */
1810                 if (req->rq_auth_gss &&
1811                     !(req->rq_ctx_init &&
1812                       (req->rq_auth_usr_root || req->rq_auth_usr_mdt ||
1813                        req->rq_auth_usr_ost))) {
1814                         cfs_spin_unlock(&exp->exp_lock);
1815                         CDEBUG(D_SEC, "is good but not root(%d:%d:%d:%d:%d)\n",
1816                                req->rq_auth_gss, req->rq_ctx_init,
1817                                req->rq_auth_usr_root, req->rq_auth_usr_mdt,
1818                                req->rq_auth_usr_ost);
1819                         return 0;
1820                 }
1821
1822                 exp->exp_flvr_adapt = 0;
1823                 cfs_spin_unlock(&exp->exp_lock);
1824
1825                 return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1826                                                 req->rq_svc_ctx, &flavor);
1827         }
1828
1829         /* if it equals to the current flavor, we accept it, but need to
1830          * dealing with reverse sec/ctx */
1831         if (likely(flavor_allowed(&exp->exp_flvr, req))) {
1832                 /* most cases should return here, we only interested in
1833                  * gss root ctx init */
1834                 if (!req->rq_auth_gss || !req->rq_ctx_init ||
1835                     (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
1836                      !req->rq_auth_usr_ost)) {
1837                         cfs_spin_unlock(&exp->exp_lock);
1838                         return 0;
1839                 }
1840
1841                 /* if flavor just changed, we should not proceed, just leave
1842                  * it and current flavor will be discovered and replaced
1843                  * shortly, and let _this_ rpc pass through */
1844                 if (exp->exp_flvr_changed) {
1845                         LASSERT(exp->exp_flvr_adapt);
1846                         cfs_spin_unlock(&exp->exp_lock);
1847                         return 0;
1848                 }
1849
1850                 if (exp->exp_flvr_adapt) {
1851                         exp->exp_flvr_adapt = 0;
1852                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): do delayed adapt\n",
1853                                exp, exp->exp_flvr.sf_rpc,
1854                                exp->exp_flvr_old[0].sf_rpc,
1855                                exp->exp_flvr_old[1].sf_rpc);
1856                         flavor = exp->exp_flvr;
1857                         cfs_spin_unlock(&exp->exp_lock);
1858
1859                         return sptlrpc_import_sec_adapt(exp->exp_imp_reverse,
1860                                                         req->rq_svc_ctx,
1861                                                         &flavor);
1862                 } else {
1863                         CDEBUG(D_SEC, "exp %p (%x|%x|%x): is current flavor, "
1864                                "install rvs ctx\n", exp, exp->exp_flvr.sf_rpc,
1865                                exp->exp_flvr_old[0].sf_rpc,
1866                                exp->exp_flvr_old[1].sf_rpc);
1867                         cfs_spin_unlock(&exp->exp_lock);
1868
1869                         return sptlrpc_svc_install_rvs_ctx(exp->exp_imp_reverse,
1870                                                            req->rq_svc_ctx);
1871                 }
1872         }
1873
1874         if (exp->exp_flvr_expire[0]) {
1875                 if (exp->exp_flvr_expire[0] >= cfs_time_current_sec()) {
1876                         if (flavor_allowed(&exp->exp_flvr_old[0], req)) {
1877                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1878                                        "middle one ("CFS_DURATION_T")\n", exp,
1879                                        exp->exp_flvr.sf_rpc,
1880                                        exp->exp_flvr_old[0].sf_rpc,
1881                                        exp->exp_flvr_old[1].sf_rpc,
1882                                        exp->exp_flvr_expire[0] -
1883                                                 cfs_time_current_sec());
1884                                 cfs_spin_unlock(&exp->exp_lock);
1885                                 return 0;
1886                         }
1887                 } else {
1888                         CDEBUG(D_SEC, "mark middle expired\n");
1889                         exp->exp_flvr_expire[0] = 0;
1890                 }
1891                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match middle\n", exp,
1892                        exp->exp_flvr.sf_rpc,
1893                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1894                        req->rq_flvr.sf_rpc);
1895         }
1896
1897         /* now it doesn't match the current flavor, the only chance we can
1898          * accept it is match the old flavors which is not expired. */
1899         if (exp->exp_flvr_changed == 0 && exp->exp_flvr_expire[1]) {
1900                 if (exp->exp_flvr_expire[1] >= cfs_time_current_sec()) {
1901                         if (flavor_allowed(&exp->exp_flvr_old[1], req)) {
1902                                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): match the "
1903                                        "oldest one ("CFS_DURATION_T")\n", exp,
1904                                        exp->exp_flvr.sf_rpc,
1905                                        exp->exp_flvr_old[0].sf_rpc,
1906                                        exp->exp_flvr_old[1].sf_rpc,
1907                                        exp->exp_flvr_expire[1] -
1908                                                 cfs_time_current_sec());
1909                                 cfs_spin_unlock(&exp->exp_lock);
1910                                 return 0;
1911                         }
1912                 } else {
1913                         CDEBUG(D_SEC, "mark oldest expired\n");
1914                         exp->exp_flvr_expire[1] = 0;
1915                 }
1916                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): %x not match found\n",
1917                        exp, exp->exp_flvr.sf_rpc,
1918                        exp->exp_flvr_old[0].sf_rpc, exp->exp_flvr_old[1].sf_rpc,
1919                        req->rq_flvr.sf_rpc);
1920         } else {
1921                 CDEBUG(D_SEC, "exp %p (%x|%x|%x): skip the last one\n",
1922                        exp, exp->exp_flvr.sf_rpc, exp->exp_flvr_old[0].sf_rpc,
1923                        exp->exp_flvr_old[1].sf_rpc);
1924         }
1925
1926         cfs_spin_unlock(&exp->exp_lock);
1927
1928         CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with "
1929               "unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n",
1930               exp, exp->exp_obd->obd_name,
1931               req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini,
1932               req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost,
1933               req->rq_flvr.sf_rpc,
1934               exp->exp_flvr.sf_rpc,
1935               exp->exp_flvr_old[0].sf_rpc,
1936               exp->exp_flvr_expire[0] ?
1937               (unsigned long) (exp->exp_flvr_expire[0] -
1938                                cfs_time_current_sec()) : 0,
1939               exp->exp_flvr_old[1].sf_rpc,
1940               exp->exp_flvr_expire[1] ?
1941               (unsigned long) (exp->exp_flvr_expire[1] -
1942                                cfs_time_current_sec()) : 0);
1943         return -EACCES;
1944 }
1945 EXPORT_SYMBOL(sptlrpc_target_export_check);
1946
1947 void sptlrpc_target_update_exp_flavor(struct obd_device *obd,
1948                                       struct sptlrpc_rule_set *rset)
1949 {
1950         struct obd_export       *exp;
1951         struct sptlrpc_flavor    new_flvr;
1952
1953         LASSERT(obd);
1954
1955         cfs_spin_lock(&obd->obd_dev_lock);
1956
1957         cfs_list_for_each_entry(exp, &obd->obd_exports, exp_obd_chain) {
1958                 if (exp->exp_connection == NULL)
1959                         continue;
1960
1961                 /* note if this export had just been updated flavor
1962                  * (exp_flvr_changed == 1), this will override the
1963                  * previous one. */
1964                 cfs_spin_lock(&exp->exp_lock);
1965                 sptlrpc_target_choose_flavor(rset, exp->exp_sp_peer,
1966                                              exp->exp_connection->c_peer.nid,
1967                                              &new_flvr);
1968                 if (exp->exp_flvr_changed ||
1969                     !flavor_equal(&new_flvr, &exp->exp_flvr)) {
1970                         exp->exp_flvr_old[1] = new_flvr;
1971                         exp->exp_flvr_expire[1] = 0;
1972                         exp->exp_flvr_changed = 1;
1973                         exp->exp_flvr_adapt = 1;
1974
1975                         CDEBUG(D_SEC, "exp %p (%s): updated flavor %x->%x\n",
1976                                exp, sptlrpc_part2name(exp->exp_sp_peer),
1977                                exp->exp_flvr.sf_rpc,
1978                                exp->exp_flvr_old[1].sf_rpc);
1979                 }
1980                 cfs_spin_unlock(&exp->exp_lock);
1981         }
1982
1983         cfs_spin_unlock(&obd->obd_dev_lock);
1984 }
1985 EXPORT_SYMBOL(sptlrpc_target_update_exp_flavor);
1986
1987 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc)
1988 {
1989         /* peer's claim is unreliable unless gss is being used */
1990         if (!req->rq_auth_gss || svc_rc == SECSVC_DROP)
1991                 return svc_rc;
1992
1993         switch (req->rq_sp_from) {
1994         case LUSTRE_SP_CLI:
1995                 if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) {
1996                         DEBUG_REQ(D_ERROR, req, "faked source CLI");
1997                         svc_rc = SECSVC_DROP;
1998                 }
1999                 break;
2000         case LUSTRE_SP_MDT:
2001                 if (!req->rq_auth_usr_mdt) {
2002                         DEBUG_REQ(D_ERROR, req, "faked source MDT");
2003                         svc_rc = SECSVC_DROP;
2004                 }
2005                 break;
2006         case LUSTRE_SP_OST:
2007                 if (!req->rq_auth_usr_ost) {
2008                         DEBUG_REQ(D_ERROR, req, "faked source OST");
2009                         svc_rc = SECSVC_DROP;
2010                 }
2011                 break;
2012         case LUSTRE_SP_MGS:
2013         case LUSTRE_SP_MGC:
2014                 if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt &&
2015                     !req->rq_auth_usr_ost) {
2016                         DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS");
2017                         svc_rc = SECSVC_DROP;
2018                 }
2019                 break;
2020         case LUSTRE_SP_ANY:
2021         default:
2022                 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from);
2023                 svc_rc = SECSVC_DROP;
2024         }
2025
2026         return svc_rc;
2027 }
2028
2029 /**
2030  * Used by ptlrpc server, to perform transformation upon request message of
2031  * incoming \a req. This must be the first thing to do with a incoming
2032  * request in ptlrpc layer.
2033  *
2034  * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
2035  * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
2036  * \retval SECSVC_COMPLETE success, the request has been fully processed, and
2037  * reply message has been prepared.
2038  * \retval SECSVC_DROP failed, this request should be dropped.
2039  */
2040 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
2041 {
2042         struct ptlrpc_sec_policy *policy;
2043         struct lustre_msg        *msg = req->rq_reqbuf;
2044         int                       rc;
2045         ENTRY;
2046
2047         LASSERT(msg);
2048         LASSERT(req->rq_reqmsg == NULL);
2049         LASSERT(req->rq_repmsg == NULL);
2050         LASSERT(req->rq_svc_ctx == NULL);
2051
2052         req->rq_req_swab_mask = 0;
2053
2054         rc = __lustre_unpack_msg(msg, req->rq_reqdata_len);
2055         switch (rc) {
2056         case 1:
2057                 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF);
2058         case 0:
2059                 break;
2060         default:
2061                 CERROR("error unpacking request from %s x"LPU64"\n",
2062                        libcfs_id2str(req->rq_peer), req->rq_xid);
2063                 RETURN(SECSVC_DROP);
2064         }
2065
2066         req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
2067         req->rq_sp_from = LUSTRE_SP_ANY;
2068         req->rq_auth_uid = INVALID_UID;
2069         req->rq_auth_mapped_uid = INVALID_UID;
2070
2071         policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
2072         if (!policy) {
2073                 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
2074                 RETURN(SECSVC_DROP);
2075         }
2076
2077         LASSERT(policy->sp_sops->accept);
2078         rc = policy->sp_sops->accept(req);
2079         sptlrpc_policy_put(policy);
2080         LASSERT(req->rq_reqmsg || rc != SECSVC_OK);
2081         LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP);
2082
2083         /*
2084          * if it's not null flavor (which means embedded packing msg),
2085          * reset the swab mask for the comming inner msg unpacking.
2086          */
2087         if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL)
2088                 req->rq_req_swab_mask = 0;
2089
2090         /* sanity check for the request source */
2091         rc = sptlrpc_svc_check_from(req, rc);
2092         RETURN(rc);
2093 }
2094
2095 /**
2096  * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
2097  * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
2098  * a buffer of \a msglen size.
2099  */
2100 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
2101 {
2102         struct ptlrpc_sec_policy *policy;
2103         struct ptlrpc_reply_state *rs;
2104         int rc;
2105         ENTRY;
2106
2107         LASSERT(req->rq_svc_ctx);
2108         LASSERT(req->rq_svc_ctx->sc_policy);
2109
2110         policy = req->rq_svc_ctx->sc_policy;
2111         LASSERT(policy->sp_sops->alloc_rs);
2112
2113         rc = policy->sp_sops->alloc_rs(req, msglen);
2114         if (unlikely(rc == -ENOMEM)) {
2115                 /* failed alloc, try emergency pool */
2116                 rs = lustre_get_emerg_rs(req->rq_rqbd->rqbd_service);
2117                 if (rs == NULL)
2118                         RETURN(-ENOMEM);
2119
2120                 req->rq_reply_state = rs;
2121                 rc = policy->sp_sops->alloc_rs(req, msglen);
2122                 if (rc) {
2123                         lustre_put_emerg_rs(rs);
2124                         req->rq_reply_state = NULL;
2125                 }
2126         }
2127
2128         LASSERT(rc != 0 ||
2129                 (req->rq_reply_state && req->rq_reply_state->rs_msg));
2130
2131         RETURN(rc);
2132 }
2133
2134 /**
2135  * Used by ptlrpc server, to perform transformation upon reply message.
2136  *
2137  * \post req->rq_reply_off is set to approriate server-controlled reply offset.
2138  * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
2139  */
2140 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
2141 {
2142         struct ptlrpc_sec_policy *policy;
2143         int rc;
2144         ENTRY;
2145
2146         LASSERT(req->rq_svc_ctx);
2147         LASSERT(req->rq_svc_ctx->sc_policy);
2148
2149         policy = req->rq_svc_ctx->sc_policy;
2150         LASSERT(policy->sp_sops->authorize);
2151
2152         rc = policy->sp_sops->authorize(req);
2153         LASSERT(rc || req->rq_reply_state->rs_repdata_len);
2154
2155         RETURN(rc);
2156 }
2157
2158 /**
2159  * Used by ptlrpc server, to free reply_state.
2160  */
2161 void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
2162 {
2163         struct ptlrpc_sec_policy *policy;
2164         unsigned int prealloc;
2165         ENTRY;
2166
2167         LASSERT(rs->rs_svc_ctx);
2168         LASSERT(rs->rs_svc_ctx->sc_policy);
2169
2170         policy = rs->rs_svc_ctx->sc_policy;
2171         LASSERT(policy->sp_sops->free_rs);
2172
2173         prealloc = rs->rs_prealloc;
2174         policy->sp_sops->free_rs(rs);
2175
2176         if (prealloc)
2177                 lustre_put_emerg_rs(rs);
2178         EXIT;
2179 }
2180
2181 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
2182 {
2183         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2184
2185         if (ctx == NULL)
2186                 return;
2187
2188         LASSERT(cfs_atomic_read(&ctx->sc_refcount) > 0);
2189         cfs_atomic_inc(&ctx->sc_refcount);
2190 }
2191
2192 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req)
2193 {
2194         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2195
2196         if (ctx == NULL)
2197                 return;
2198
2199         LASSERT(cfs_atomic_read(&ctx->sc_refcount) > 0);
2200         if (cfs_atomic_dec_and_test(&ctx->sc_refcount)) {
2201                 if (ctx->sc_policy->sp_sops->free_ctx)
2202                         ctx->sc_policy->sp_sops->free_ctx(ctx);
2203         }
2204         req->rq_svc_ctx = NULL;
2205 }
2206
2207 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req)
2208 {
2209         struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx;
2210
2211         if (ctx == NULL)
2212                 return;
2213
2214         LASSERT(cfs_atomic_read(&ctx->sc_refcount) > 0);
2215         if (ctx->sc_policy->sp_sops->invalidate_ctx)
2216                 ctx->sc_policy->sp_sops->invalidate_ctx(ctx);
2217 }
2218 EXPORT_SYMBOL(sptlrpc_svc_ctx_invalidate);
2219
2220 /****************************************
2221  * bulk security                        *
2222  ****************************************/
2223
2224 /**
2225  * Perform transformation upon bulk data pointed by \a desc. This is called
2226  * before transforming the request message.
2227  */
2228 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
2229                           struct ptlrpc_bulk_desc *desc)
2230 {
2231         struct ptlrpc_cli_ctx *ctx;
2232
2233         LASSERT(req->rq_bulk_read || req->rq_bulk_write);
2234
2235         if (!req->rq_pack_bulk)
2236                 return 0;
2237
2238         ctx = req->rq_cli_ctx;
2239         if (ctx->cc_ops->wrap_bulk)
2240                 return ctx->cc_ops->wrap_bulk(ctx, req, desc);
2241         return 0;
2242 }
2243 EXPORT_SYMBOL(sptlrpc_cli_wrap_bulk);
2244
2245 /**
2246  * This is called after unwrap the reply message.
2247  * return nob of actual plain text size received, or error code.
2248  */
2249 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
2250                                  struct ptlrpc_bulk_desc *desc,
2251                                  int nob)
2252 {
2253         struct ptlrpc_cli_ctx  *ctx;
2254         int                     rc;
2255
2256         LASSERT(req->rq_bulk_read && !req->rq_bulk_write);
2257
2258         if (!req->rq_pack_bulk)
2259                 return desc->bd_nob_transferred;
2260
2261         ctx = req->rq_cli_ctx;
2262         if (ctx->cc_ops->unwrap_bulk) {
2263                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2264                 if (rc < 0)
2265                         return rc;
2266         }
2267         return desc->bd_nob_transferred;
2268 }
2269 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_read);
2270
2271 /**
2272  * This is called after unwrap the reply message.
2273  * return 0 for success or error code.
2274  */
2275 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
2276                                   struct ptlrpc_bulk_desc *desc)
2277 {
2278         struct ptlrpc_cli_ctx  *ctx;
2279         int                     rc;
2280
2281         LASSERT(!req->rq_bulk_read && req->rq_bulk_write);
2282
2283         if (!req->rq_pack_bulk)
2284                 return 0;
2285
2286         ctx = req->rq_cli_ctx;
2287         if (ctx->cc_ops->unwrap_bulk) {
2288                 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc);
2289                 if (rc < 0)
2290                         return rc;
2291         }
2292
2293         /*
2294          * if everything is going right, nob should equals to nob_transferred.
2295          * in case of privacy mode, nob_transferred needs to be adjusted.
2296          */
2297         if (desc->bd_nob != desc->bd_nob_transferred) {
2298                 CERROR("nob %d doesn't match transferred nob %d",
2299                        desc->bd_nob, desc->bd_nob_transferred);
2300                 return -EPROTO;
2301         }
2302
2303         return 0;
2304 }
2305 EXPORT_SYMBOL(sptlrpc_cli_unwrap_bulk_write);
2306
2307 /**
2308  * Performe transformation upon outgoing bulk read.
2309  */
2310 int sptlrpc_svc_wrap_bulk(struct ptlrpc_request *req,
2311                           struct ptlrpc_bulk_desc *desc)
2312 {
2313         struct ptlrpc_svc_ctx *ctx;
2314
2315         LASSERT(req->rq_bulk_read);
2316
2317         if (!req->rq_pack_bulk)
2318                 return 0;
2319
2320         ctx = req->rq_svc_ctx;
2321         if (ctx->sc_policy->sp_sops->wrap_bulk)
2322                 return ctx->sc_policy->sp_sops->wrap_bulk(req, desc);
2323
2324         return 0;
2325 }
2326 EXPORT_SYMBOL(sptlrpc_svc_wrap_bulk);
2327
2328 /**
2329  * Performe transformation upon incoming bulk write.
2330  */
2331 int sptlrpc_svc_unwrap_bulk(struct ptlrpc_request *req,
2332                             struct ptlrpc_bulk_desc *desc)
2333 {
2334         struct ptlrpc_svc_ctx *ctx;
2335         int                    rc;
2336
2337         LASSERT(req->rq_bulk_write);
2338
2339         /*
2340          * if it's in privacy mode, transferred should >= expected; otherwise
2341          * transferred should == expected.
2342          */
2343         if (desc->bd_nob_transferred < desc->bd_nob ||
2344             (desc->bd_nob_transferred > desc->bd_nob &&
2345              SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) !=
2346              SPTLRPC_BULK_SVC_PRIV)) {
2347                 DEBUG_REQ(D_ERROR, req, "truncated bulk GET %d(%d)",
2348                           desc->bd_nob_transferred, desc->bd_nob);
2349                 return -ETIMEDOUT;
2350         }
2351
2352         if (!req->rq_pack_bulk)
2353                 return 0;
2354
2355         ctx = req->rq_svc_ctx;
2356         if (ctx->sc_policy->sp_sops->unwrap_bulk) {
2357                 rc = ctx->sc_policy->sp_sops->unwrap_bulk(req, desc);
2358                 if (rc)
2359                         CERROR("error unwrap bulk: %d\n", rc);
2360         }
2361
2362         /* return 0 to allow reply be sent */
2363         return 0;
2364 }
2365 EXPORT_SYMBOL(sptlrpc_svc_unwrap_bulk);
2366
2367 /**
2368  * Prepare buffers for incoming bulk write.
2369  */
2370 int sptlrpc_svc_prep_bulk(struct ptlrpc_request *req,
2371                           struct ptlrpc_bulk_desc *desc)
2372 {
2373         struct ptlrpc_svc_ctx *ctx;
2374
2375         LASSERT(req->rq_bulk_write);
2376
2377         if (!req->rq_pack_bulk)
2378                 return 0;
2379
2380         ctx = req->rq_svc_ctx;
2381         if (ctx->sc_policy->sp_sops->prep_bulk)
2382                 return ctx->sc_policy->sp_sops->prep_bulk(req, desc);
2383
2384         return 0;
2385 }
2386 EXPORT_SYMBOL(sptlrpc_svc_prep_bulk);
2387
2388 /****************************************
2389  * user descriptor helpers              *
2390  ****************************************/
2391
2392 int sptlrpc_current_user_desc_size(void)
2393 {
2394         int ngroups;
2395
2396 #ifdef __KERNEL__
2397         ngroups = current_ngroups;
2398
2399         if (ngroups > LUSTRE_MAX_GROUPS)
2400                 ngroups = LUSTRE_MAX_GROUPS;
2401 #else
2402         ngroups = 0;
2403 #endif
2404         return sptlrpc_user_desc_size(ngroups);
2405 }
2406 EXPORT_SYMBOL(sptlrpc_current_user_desc_size);
2407
2408 int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
2409 {
2410         struct ptlrpc_user_desc *pud;
2411
2412         pud = lustre_msg_buf(msg, offset, 0);
2413
2414         pud->pud_uid = cfs_curproc_uid();
2415         pud->pud_gid = cfs_curproc_gid();
2416         pud->pud_fsuid = cfs_curproc_fsuid();
2417         pud->pud_fsgid = cfs_curproc_fsgid();
2418         pud->pud_cap = cfs_curproc_cap_pack();
2419         pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
2420
2421 #ifdef __KERNEL__
2422         task_lock(current);
2423         if (pud->pud_ngroups > current_ngroups)
2424                 pud->pud_ngroups = current_ngroups;
2425         memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
2426                pud->pud_ngroups * sizeof(__u32));
2427         task_unlock(current);
2428 #endif
2429
2430         return 0;
2431 }
2432 EXPORT_SYMBOL(sptlrpc_pack_user_desc);
2433
2434 int sptlrpc_unpack_user_desc(struct lustre_msg *msg, int offset, int swabbed)
2435 {
2436         struct ptlrpc_user_desc *pud;
2437         int                      i;
2438
2439         pud = lustre_msg_buf(msg, offset, sizeof(*pud));
2440         if (!pud)
2441                 return -EINVAL;
2442
2443         if (swabbed) {
2444                 __swab32s(&pud->pud_uid);
2445                 __swab32s(&pud->pud_gid);
2446                 __swab32s(&pud->pud_fsuid);
2447                 __swab32s(&pud->pud_fsgid);
2448                 __swab32s(&pud->pud_cap);
2449                 __swab32s(&pud->pud_ngroups);
2450         }
2451
2452         if (pud->pud_ngroups > LUSTRE_MAX_GROUPS) {
2453                 CERROR("%u groups is too large\n", pud->pud_ngroups);
2454                 return -EINVAL;
2455         }
2456
2457         if (sizeof(*pud) + pud->pud_ngroups * sizeof(__u32) >
2458             msg->lm_buflens[offset]) {
2459                 CERROR("%u groups are claimed but bufsize only %u\n",
2460                        pud->pud_ngroups, msg->lm_buflens[offset]);
2461                 return -EINVAL;
2462         }
2463
2464         if (swabbed) {
2465                 for (i = 0; i < pud->pud_ngroups; i++)
2466                         __swab32s(&pud->pud_groups[i]);
2467         }
2468
2469         return 0;
2470 }
2471 EXPORT_SYMBOL(sptlrpc_unpack_user_desc);
2472
2473 /****************************************
2474  * misc helpers                         *
2475  ****************************************/
2476
2477 const char * sec2target_str(struct ptlrpc_sec *sec)
2478 {
2479         if (!sec || !sec->ps_import || !sec->ps_import->imp_obd)
2480                 return "*";
2481         if (sec_is_reverse(sec))
2482                 return "c";
2483         return obd_uuid2str(&sec->ps_import->imp_obd->u.cli.cl_target_uuid);
2484 }
2485 EXPORT_SYMBOL(sec2target_str);
2486
2487 /*
2488  * return true if the bulk data is protected
2489  */
2490 int sptlrpc_flavor_has_bulk(struct sptlrpc_flavor *flvr)
2491 {
2492         switch (SPTLRPC_FLVR_BULK_SVC(flvr->sf_rpc)) {
2493         case SPTLRPC_BULK_SVC_INTG:
2494         case SPTLRPC_BULK_SVC_PRIV:
2495                 return 1;
2496         default:
2497                 return 0;
2498         }
2499 }
2500 EXPORT_SYMBOL(sptlrpc_flavor_has_bulk);
2501
2502 /****************************************
2503  * crypto API helper/alloc blkciper     *
2504  ****************************************/
2505
2506 /****************************************
2507  * initialize/finalize                  *
2508  ****************************************/
2509
2510 int __init sptlrpc_init(void)
2511 {
2512         int rc;
2513
2514         cfs_rwlock_init(&policy_lock);
2515
2516         rc = sptlrpc_gc_init();
2517         if (rc)
2518                 goto out;
2519
2520         rc = sptlrpc_conf_init();
2521         if (rc)
2522                 goto out_gc;
2523
2524         rc = sptlrpc_enc_pool_init();
2525         if (rc)
2526                 goto out_conf;
2527
2528         rc = sptlrpc_null_init();
2529         if (rc)
2530                 goto out_pool;
2531
2532         rc = sptlrpc_plain_init();
2533         if (rc)
2534                 goto out_null;
2535
2536         rc = sptlrpc_lproc_init();
2537         if (rc)
2538                 goto out_plain;
2539
2540         return 0;
2541
2542 out_plain:
2543         sptlrpc_plain_fini();
2544 out_null:
2545         sptlrpc_null_fini();
2546 out_pool:
2547         sptlrpc_enc_pool_fini();
2548 out_conf:
2549         sptlrpc_conf_fini();
2550 out_gc:
2551         sptlrpc_gc_fini();
2552 out:
2553         return rc;
2554 }
2555
2556 void __exit sptlrpc_fini(void)
2557 {
2558         sptlrpc_lproc_fini();
2559         sptlrpc_plain_fini();
2560         sptlrpc_null_fini();
2561         sptlrpc_enc_pool_fini();
2562         sptlrpc_conf_fini();
2563         sptlrpc_gc_fini();
2564 }