1 --- linux-2.6.7/Documentation/filesystems/00-INDEX.lsec 2004-06-15 23:20:26.000000000 -0600
2 +++ linux-2.6.7/Documentation/filesystems/00-INDEX 2005-03-23 14:28:24.576313528 -0700
3 @@ -28,6 +28,8 @@ jfs.txt
4 - info and mount options for the JFS filesystem.
6 - info on Novell Netware(tm) filesystem using NCP protocol.
8 + - info and mount options for the nfs4 filesystem.
10 - info and mount options for the NTFS filesystem (Windows NT).
12 --- linux-2.6.7/Documentation/filesystems/nfs4.txt.lsec 2005-03-23 14:28:24.576313528 -0700
13 +++ linux-2.6.7/Documentation/filesystems/nfs4.txt 2005-03-23 14:28:24.576313528 -0700
18 +NFS version 4 is specified by RFC3530. Compared to earlier NFS versions,
19 +it provides enhanced security and better client caching, among other features.
21 +In addition to basic file operations, the NFS client supports locking, kerberos
22 +(basic authentication and integrity), and reboot recovery.
24 +As this writing (July 2004), patches to nfs-utils and util-linux are required
25 +for NFSv4 support; see http://www.citi.umich.edu/projects/nfsv4/linux/ for
26 +patches and instructions.
28 +The kernel treats NFS version 4 as a separate filesystem type, nfs4, so it is
29 +mounted using "mount -tnfs4 server:/path /mntpoint", not by mounting the nfs
30 +filesystem with -onfsver=4.
35 --- linux-2.6.7/fs/locks.c.lsec 2004-06-15 23:20:03.000000000 -0600
36 +++ linux-2.6.7/fs/locks.c 2005-03-23 14:28:22.425640480 -0700
37 @@ -317,7 +317,7 @@ static int flock_to_posix_lock(struct fi
39 fl->fl_end = OFFSET_MAX;
41 - fl->fl_owner = current->files;
43 fl->fl_pid = current->tgid;
45 fl->fl_flags = FL_POSIX;
46 @@ -357,7 +357,7 @@ static int flock64_to_posix_lock(struct
48 fl->fl_end = OFFSET_MAX;
50 - fl->fl_owner = current->files;
52 fl->fl_pid = current->tgid;
54 fl->fl_flags = FL_POSIX;
55 @@ -920,7 +920,7 @@ int posix_lock_file(struct file *filp, s
57 int locks_mandatory_locked(struct inode *inode)
59 - fl_owner_t owner = current->files;
60 + unsigned int pid = current->tgid;
64 @@ -930,7 +930,9 @@ int locks_mandatory_locked(struct inode
65 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
68 - if (fl->fl_owner != owner)
69 + if (fl->fl_owner != 0)
71 + if (fl->fl_pid != pid)
75 @@ -958,7 +960,7 @@ int locks_mandatory_area(int read_write,
79 - fl.fl_owner = current->files;
81 fl.fl_pid = current->tgid;
83 fl.fl_flags = FL_POSIX | FL_ACCESS;
84 @@ -1684,7 +1686,7 @@ void locks_remove_posix(struct file *fil
86 while (*before != NULL) {
87 struct file_lock *fl = *before;
88 - if (IS_POSIX(fl) && (fl->fl_owner == owner)) {
89 + if (IS_POSIX(fl) && posix_same_owner(fl, &lock)) {
90 locks_delete_lock(before);
93 @@ -1982,18 +1984,6 @@ int lock_may_write(struct inode *inode,
95 EXPORT_SYMBOL(lock_may_write);
97 -static inline void __steal_locks(struct file *file, fl_owner_t from)
99 - struct inode *inode = file->f_dentry->d_inode;
100 - struct file_lock *fl = inode->i_flock;
103 - if (fl->fl_file == file && fl->fl_owner == from)
104 - fl->fl_owner = current->files;
109 /* When getting ready for executing a binary, we make sure that current
110 * has a files_struct on its own. Before dropping the old files_struct,
111 * we take over ownership of all locks for all file descriptors we own.
112 @@ -2002,31 +1992,6 @@ static inline void __steal_locks(struct
114 void steal_locks(fl_owner_t from)
116 - struct files_struct *files = current->files;
127 - if (i >= files->max_fdset || i >= files->max_fds)
129 - set = files->open_fds->fds_bits[j++];
132 - struct file *file = files->fd[i];
134 - __steal_locks(file, from);
142 EXPORT_SYMBOL(steal_locks);
144 --- linux-2.6.7/fs/hostfs/hostfs_kern.c.lsec 2005-03-23 14:25:58.982447160 -0700
145 +++ linux-2.6.7/fs/hostfs/hostfs_kern.c 2005-03-23 14:33:11.946626600 -0700
146 @@ -290,7 +290,6 @@ static void hostfs_delete_inode(struct i
148 if(HOSTFS_I(inode)->fd != -1) {
149 close_file(&HOSTFS_I(inode)->fd);
150 - printk("Closing host fd in .delete_inode\n");
151 HOSTFS_I(inode)->fd = -1;
154 @@ -303,7 +302,6 @@ static void hostfs_destroy_inode(struct
156 if(HOSTFS_I(inode)->fd != -1) {
157 close_file(&HOSTFS_I(inode)->fd);
158 - printk("Closing host fd in .destroy_inode\n");
161 kfree(HOSTFS_I(inode));
162 --- linux-2.6.7/fs/open.c.lsec 2005-03-23 14:26:01.774022776 -0700
163 +++ linux-2.6.7/fs/open.c 2005-03-23 14:28:23.226518728 -0700
164 @@ -1025,7 +1025,7 @@ int filp_close(struct file *filp, fl_own
167 dnotify_flush(filp, id);
168 - locks_remove_posix(filp, id);
169 + locks_remove_posix(filp, 0);
173 --- linux-2.6.7/fs/nfsd/export.c.lsec 2004-06-15 23:19:36.000000000 -0600
174 +++ linux-2.6.7/fs/nfsd/export.c 2005-03-23 14:28:24.686296808 -0700
175 @@ -255,7 +255,7 @@ static inline void svc_expkey_update(str
176 new->ek_export = item->ek_export;
179 -static DefineSimpleCacheLookup(svc_expkey,0) /* no inplace updates */
180 +static DefineSimpleCacheLookup(svc_expkey)
182 #define EXPORT_HASHBITS 8
183 #define EXPORT_HASHMAX (1<< EXPORT_HASHBITS)
184 @@ -487,8 +487,72 @@ static inline void svc_export_update(str
185 new->ex_fsid = item->ex_fsid;
188 -static DefineSimpleCacheLookup(svc_export,1) /* allow inplace updates */
190 +svc_export_lookup(struct svc_export *item, int set)
192 + struct svc_export *tmp, *new = NULL;
193 + struct cache_head **hp, **head;
195 + head = &svc_export_cache.hash_table[svc_export_hash(item)];
198 + write_lock(&svc_export_cache.hash_lock);
200 + read_lock(&svc_export_cache.hash_lock);
201 + for(hp=head; *hp != NULL; hp = &tmp->h.next) {
202 + tmp = container_of(*hp, struct svc_export, h);
203 + if (svc_export_match(item, tmp)) { /* found a match */
204 + cache_get(&tmp->h);
206 + if (test_bit(CACHE_NEGATIVE, &item->h.flags))
207 + set_bit(CACHE_NEGATIVE, &tmp->h.flags);
209 + clear_bit(CACHE_NEGATIVE, &tmp->h.flags);
210 + svc_export_update(tmp, item);
214 + write_unlock(&svc_export_cache.hash_lock);
216 + read_unlock(&svc_export_cache.hash_lock);
218 + cache_fresh(&svc_export_cache, &tmp->h,
219 + item->h.expiry_time);
221 + svc_export_put(&new->h, &svc_export_cache);
225 + /* Didn't find anything */
227 + svc_export_init(new, item);
228 + new->h.next = *head;
230 + set_bit(CACHE_HASHED, &new->h.flags);
231 + svc_export_cache.entries++;
234 + if (test_bit(CACHE_NEGATIVE, &item->h.flags))
235 + set_bit(CACHE_NEGATIVE, &tmp->h.flags);
237 + svc_export_update(tmp, item);
241 + write_unlock(&svc_export_cache.hash_lock);
243 + read_unlock(&svc_export_cache.hash_lock);
245 + cache_fresh(&svc_export_cache, &new->h, item->h.expiry_time);
248 + new = kmalloc(sizeof(*new), GFP_KERNEL);
250 + cache_init(&new->h);
257 exp_find_key(svc_client *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp)
258 --- linux-2.6.7/fs/nfsd/nfs4callback.c.lsec 2005-03-23 14:28:24.578313224 -0700
259 +++ linux-2.6.7/fs/nfsd/nfs4callback.c 2005-03-23 14:28:24.578313224 -0700
262 + * linux/fs/nfsd/nfs4callback.c
264 + * Copyright (c) 2001 The Regents of the University of Michigan.
265 + * All rights reserved.
267 + * Kendrick Smith <kmsmith@umich.edu>
268 + * Andy Adamson <andros@umich.edu>
270 + * Redistribution and use in source and binary forms, with or without
271 + * modification, are permitted provided that the following conditions
274 + * 1. Redistributions of source code must retain the above copyright
275 + * notice, this list of conditions and the following disclaimer.
276 + * 2. Redistributions in binary form must reproduce the above copyright
277 + * notice, this list of conditions and the following disclaimer in the
278 + * documentation and/or other materials provided with the distribution.
279 + * 3. Neither the name of the University nor the names of its
280 + * contributors may be used to endorse or promote products derived
281 + * from this software without specific prior written permission.
283 + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
284 + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
285 + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
286 + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
287 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
288 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
289 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
290 + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
291 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
292 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
293 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
296 +#include <linux/config.h>
297 +#include <linux/module.h>
298 +#include <linux/list.h>
299 +#include <linux/inet.h>
300 +#include <linux/errno.h>
301 +#include <linux/sunrpc/xdr.h>
302 +#include <linux/sunrpc/svc.h>
303 +#include <linux/sunrpc/clnt.h>
304 +#include <linux/nfsd/nfsd.h>
305 +#include <linux/nfsd/state.h>
306 +#include <linux/sunrpc/sched.h>
307 +#include <linux/nfs4.h>
309 +#define NFSDDBG_FACILITY NFSDDBG_PROC
311 +#define NFSPROC4_CB_NULL 0
312 +#define NFSPROC4_CB_COMPOUND 1
314 +/* forward declarations */
315 +static void nfs4_cb_null(struct rpc_task *task);
317 +/* Index of predefined Linux callback client operations */
320 + NFSPROC4_CLNT_CB_NULL = 0,
321 + NFSPROC4_CLNT_CB_GETATTR,
322 + NFSPROC4_CLNT_CB_RECALL,
325 +enum nfs_cb_opnum4 {
328 + OP_CB_ILLEGAL = 10044
332 +#define NFS4_MAXTAGLEN 20
334 +#define cb_compound_enc_hdr_sz 4
335 +#define cb_compound_dec_hdr_sz (3 + (NFS4_MAXTAGLEN >> 2))
338 +#define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2))
339 +#define enc_stateid_sz 16
341 +#define NFS4_enc_cb_getattr_sz (cb_compound_enc_hdr_sz + \
343 + enc_nfs4_fh_sz + 4)
345 +#define NFS4_dec_cb_getattr_sz (cb_compound_dec_hdr_sz + \
349 +#define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \
350 + 1 + enc_stateid_sz + \
353 +#define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \
357 +* Generic encode routines from fs/nfs/nfs4xdr.c
360 +xdr_writemem(u32 *p, const void *ptr, int nbytes)
362 + int tmp = XDR_QUADLEN(nbytes);
366 + memcpy(p, ptr, nbytes);
370 +#define WRITE32(n) *p++ = htonl(n)
371 +#define WRITEMEM(ptr,nbytes) do { \
372 + p = xdr_writemem(p, ptr, nbytes); \
374 +#define RESERVE_SPACE(nbytes) do { \
375 + p = xdr_reserve_space(xdr, nbytes); \
376 + if (!p) dprintk("NFSD: RESERVE_SPACE(%d) failed in function %s\n", (int) (nbytes), __FUNCTION__); \
381 + * Generic decode routines from fs/nfs/nfs4xdr.c
383 +#define DECODE_TAIL \
388 + dprintk("NFSD: xdr error! (%s:%d)\n", __FILE__, __LINE__); \
392 +#define READ32(x) (x) = ntohl(*p++)
393 +#define READ64(x) do { \
394 + (x) = (u64)ntohl(*p++) << 32; \
395 + (x) |= ntohl(*p++); \
397 +#define READTIME(x) do { \
399 + (x.tv_sec) = ntohl(*p++); \
400 + (x.tv_nsec) = ntohl(*p++); \
402 +#define READ_BUF(nbytes) do { \
403 + p = xdr_inline_decode(xdr, nbytes); \
405 + dprintk("NFSD: %s: reply buffer overflowed in line %d.", \
406 + __FUNCTION__, __LINE__); \
411 +struct nfs4_cb_compound_hdr {
419 +struct nfs4_cb_getattr {
425 + struct timespec mtime;
428 +struct nfs4_cb_recall {
429 + nfs4_stateid stateid;
437 +} nfs_cb_errtbl[] = {
439 + { NFS4ERR_PERM, EPERM },
440 + { NFS4ERR_NOENT, ENOENT },
441 + { NFS4ERR_IO, EIO },
442 + { NFS4ERR_NXIO, ENXIO },
443 + { NFS4ERR_ACCESS, EACCES },
444 + { NFS4ERR_EXIST, EEXIST },
445 + { NFS4ERR_XDEV, EXDEV },
446 + { NFS4ERR_NOTDIR, ENOTDIR },
447 + { NFS4ERR_ISDIR, EISDIR },
448 + { NFS4ERR_INVAL, EINVAL },
449 + { NFS4ERR_FBIG, EFBIG },
450 + { NFS4ERR_NOSPC, ENOSPC },
451 + { NFS4ERR_ROFS, EROFS },
452 + { NFS4ERR_MLINK, EMLINK },
453 + { NFS4ERR_NAMETOOLONG, ENAMETOOLONG },
454 + { NFS4ERR_NOTEMPTY, ENOTEMPTY },
455 + { NFS4ERR_DQUOT, EDQUOT },
456 + { NFS4ERR_STALE, ESTALE },
457 + { NFS4ERR_BADHANDLE, EBADHANDLE },
458 + { NFS4ERR_BAD_COOKIE, EBADCOOKIE },
459 + { NFS4ERR_NOTSUPP, ENOTSUPP },
460 + { NFS4ERR_TOOSMALL, ETOOSMALL },
461 + { NFS4ERR_SERVERFAULT, ESERVERFAULT },
462 + { NFS4ERR_BADTYPE, EBADTYPE },
463 + { NFS4ERR_LOCKED, EAGAIN },
464 + { NFS4ERR_RESOURCE, EREMOTEIO },
465 + { NFS4ERR_SYMLINK, ELOOP },
466 + { NFS4ERR_OP_ILLEGAL, EOPNOTSUPP },
467 + { NFS4ERR_DEADLOCK, EDEADLK },
472 +nfs_cb_stat_to_errno(int stat)
475 + for (i = 0; nfs_cb_errtbl[i].stat != -1; i++) {
476 + if (nfs_cb_errtbl[i].stat == stat)
477 + return nfs_cb_errtbl[i].errno;
479 + /* If we cannot translate the error, the recovery routines should
481 + * Note: remaining NFSv4 error codes have values > 10000, so should
482 + * not conflict with native Linux error codes.
492 +encode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr)
497 + WRITE32(0); /* tag length is always 0 */
498 + WRITE32(NFS4_MINOR_VERSION);
499 + WRITE32(hdr->ident);
500 + WRITE32(hdr->nops);
505 +encode_cb_getattr(struct xdr_stream *xdr, struct nfs4_cb_getattr *cb_get)
508 + int len = cb_get->fh.size;
510 + RESERVE_SPACE(20 + len);
511 + WRITE32(OP_CB_GETATTR);
513 + WRITEMEM(cb_get->fh.data, len);
515 + WRITE32(cb_get->bm0);
516 + WRITE32(cb_get->bm1);
521 +encode_cb_recall(struct xdr_stream *xdr, struct nfs4_cb_recall *cb_rec)
524 + int len = cb_rec->fh.size;
526 + RESERVE_SPACE(8+sizeof(cb_rec->stateid.data));
527 + WRITE32(OP_CB_RECALL);
528 + WRITEMEM(cb_rec->stateid.data, sizeof(cb_rec->stateid.data));
529 + WRITE32(cb_rec->trunc);
531 + WRITEMEM(cb_rec->fh.data, len);
536 +nfs4_xdr_enc_cb_getattr(struct rpc_rqst *req, u32 *p, struct nfs4_cb_getattr *args)
538 + struct xdr_stream xdr;
539 + struct nfs4_cb_compound_hdr hdr = {
543 + xdr_init_encode(&xdr, &req->rq_snd_buf, p);
544 + encode_cb_compound_hdr(&xdr, &hdr);
545 + return (encode_cb_getattr(&xdr, args));
549 +nfs4_xdr_enc_cb_recall(struct rpc_rqst *req, u32 *p, struct nfs4_cb_recall *args)
551 + struct xdr_stream xdr;
552 + struct nfs4_cb_compound_hdr hdr = {
556 + xdr_init_encode(&xdr, &req->rq_snd_buf, p);
557 + encode_cb_compound_hdr(&xdr, &hdr);
558 + return (encode_cb_recall(&xdr, args));
563 +decode_cb_compound_hdr(struct xdr_stream *xdr, struct nfs4_cb_compound_hdr *hdr){
567 + READ32(hdr->status);
568 + READ32(hdr->taglen);
569 + READ_BUF(hdr->taglen + 4);
570 + hdr->tag = (char *)p;
571 + p += XDR_QUADLEN(hdr->taglen);
577 +decode_cb_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
585 + if (op != expected) {
586 + dprintk("NFSD: decode_cb_op_hdr: Callback server returned operation"
587 + " %d but we issued a request for %d\n",
592 + if (nfserr != NFS_OK)
593 + return -nfs_cb_stat_to_errno(nfserr);
598 +decode_cb_getattr(struct xdr_stream *xdr, struct nfs4_cb_getattr *cb_get)
608 + status = decode_cb_op_hdr(xdr, OP_CB_GETATTR);
613 + if( (bmlen < 1) || (bmlen > 2))
615 + READ_BUF((bmlen << 2) + 4);
617 + if (bmval0 & ~(FATTR4_WORD0_CHANGE | FATTR4_WORD0_SIZE))
618 + goto out_bad_bitmap;
621 + if (bmval1 & ~ FATTR4_WORD1_TIME_MODIFY)
622 + goto out_bad_bitmap;
625 + if (bmval0 & FATTR4_WORD0_CHANGE) {
628 + READ64(cb_get->change_attr);
629 + dprintk("decode_cb_getattr: changeid=%Ld\n",
630 + (long long)cb_get->change_attr);
632 + if (bmval0 & FATTR4_WORD0_SIZE) {
635 + READ64(cb_get->size);
636 + dprintk("decode_cb_getattr: size=%Ld\n",
637 + (long long)cb_get->size);
639 + if (bmval1 & FATTR4_WORD1_TIME_MODIFY) {
642 + READTIME(cb_get->mtime);
643 + dprintk("decode_cb_gatattr: mtime=%ld\n",
644 + (long)cb_get->mtime.tv_sec);
646 + if (len != attrlen)
652 + dprintk("NFSD: %s Callback server returned bad attribute bitmap\n",
659 +nfs4_xdr_dec_cb_getattr(struct rpc_rqst *rqstp, u32 *p, struct nfs4_cb_getattr *res)
661 + struct xdr_stream xdr;
662 + struct nfs4_cb_compound_hdr hdr;
665 + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
666 + status = decode_cb_compound_hdr(&xdr, &hdr);
669 + status = decode_cb_getattr(&xdr, res);
675 +nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp, u32 *p)
677 + struct xdr_stream xdr;
678 + struct nfs4_cb_compound_hdr hdr;
681 + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
682 + status = decode_cb_compound_hdr(&xdr, &hdr);
685 + status = decode_cb_op_hdr(&xdr, OP_CB_RECALL);
691 +nfs4_xdr_enc_null(struct rpc_rqst *req, u32 *p)
693 + struct xdr_stream xdrs, *xdr = &xdrs;
695 + xdr_init_encode(&xdrs, &req->rq_snd_buf, p);
701 +nfs4_xdr_dec_null(struct rpc_rqst *req, u32 *p)
707 + * RPC procedure tables
710 +# define MAX(a, b) (((a) > (b))? (a) : (b))
713 +#define PROC(proc, argtype, restype) \
714 +[NFSPROC4_CLNT_##proc] = { \
715 + .p_proc = NFSPROC4_CB_COMPOUND, \
716 + .p_encode = (kxdrproc_t) nfs4_xdr_##argtype, \
717 + .p_decode = (kxdrproc_t) nfs4_xdr_##restype, \
718 + .p_bufsiz = MAX(NFS4_##argtype##_sz,NFS4_##restype##_sz) << 2, \
721 +struct rpc_procinfo nfs4_cb_procedures[] = {
722 + PROC(CB_GETATTR, enc_cb_getattr, dec_cb_getattr),
723 + PROC(CB_RECALL, enc_cb_recall, dec_cb_recall),
726 +struct rpc_version nfs_cb_version4 = {
728 + .nrprocs = sizeof(nfs4_cb_procedures)/sizeof(nfs4_cb_procedures[0]),
729 + .procs = nfs4_cb_procedures
732 +static struct rpc_version * nfs_cb_version[] = {
737 +struct rpc_procinfo nfs4_cb_null_proc= {
738 + .p_proc = NFSPROC4_CB_NULL,
739 + .p_encode = (kxdrproc_t)nfs4_xdr_enc_null,
740 + .p_decode = (kxdrproc_t) nfs4_xdr_dec_null,
745 + * Use the SETCLIENTID credential
748 +nfsd4_lookupcred(struct nfs4_client *clp, int taskflags)
750 + struct auth_cred acred;
751 + struct rpc_clnt *clnt = clp->cl_callback.cb_client;
752 + struct rpc_cred *ret = NULL;
756 + get_group_info(clp->cl_cred.cr_group_info);
757 + acred.uid = clp->cl_cred.cr_uid;
758 + acred.gid = clp->cl_cred.cr_gid;
759 + acred.group_info = clp->cl_cred.cr_group_info;
761 + dprintk("NFSD: looking up %s cred\n",
762 + clnt->cl_auth->au_ops->au_name);
763 + ret = rpcauth_lookup_credcache(clnt->cl_auth, &acred, taskflags);
764 + put_group_info(clp->cl_cred.cr_group_info);
770 + * Set up the callback client and put a NFSPROC4_CB_NULL on the wire...
773 +nfsd4_probe_callback(struct nfs4_client *clp)
775 + struct sockaddr_in addr;
776 + struct nfs4_callback *cb = &clp->cl_callback;
777 + struct rpc_timeout timeparms;
778 + struct rpc_xprt * xprt;
779 + struct rpc_program * program = &cb->cb_program;
780 + struct rpc_stat * stat = &cb->cb_stat;
781 + struct rpc_clnt * clnt;
782 + struct rpc_message msg = {
783 + .rpc_proc = &nfs4_cb_null_proc,
789 + dprintk("NFSD: probe_callback. cb_parsed %d cb_set %d 1\n",
790 + cb->cb_parsed, cb->cb_set);
791 + if (!cb->cb_parsed || cb->cb_set)
794 + /* Currently, we only support tcp for the callback channel */
795 + if (cb->cb_netid.len !=3 || memcmp((char *)cb->cb_netid.data, "tcp", 3))
798 + /* Initialize address */
799 + memset(&addr, 0, sizeof(addr));
800 + addr.sin_family = AF_INET;
801 + addr.sin_port = htons(cb->cb_port);
802 + addr.sin_addr.s_addr = htonl(cb->cb_addr);
804 + /* Initialize timeout */
805 + timeparms.to_initval = HZ;
806 + timeparms.to_retries = 5;
807 + timeparms.to_maxval = NFSD_LEASE_TIME*HZ;
808 + timeparms.to_exponential = 1;
810 + /* Create RPC transport */
811 + if (!(xprt = xprt_create_proto(IPPROTO_TCP, &addr, &timeparms))) {
812 + dprintk("NFSD: couldn't create callback transport!\n");
816 + /* Initialize rpc_program */
817 + program->name = "nfs4_cb";
818 + program->number = cb->cb_prog;
819 + program->nrvers = sizeof(nfs_cb_version)/sizeof(nfs_cb_version[0]);
820 + program->version = nfs_cb_version;
821 + program->stats = stat;
823 + /* Initialize rpc_stat */
824 + memset(stat, 0, sizeof(struct rpc_stat));
825 + stat->program = program;
827 + /* Create RPC client
829 + * XXX AUTH_UNIX only - need AUTH_GSS....
831 + sprintf(hostname, "%u.%u.%u.%u", NIPQUAD(addr.sin_addr.s_addr));
832 + if (!(clnt = rpc_create_client(xprt, hostname, program, 1, RPC_AUTH_UNIX))) {
833 + dprintk("NFSD: couldn't create callback client\n");
837 + clnt->cl_softrtry = 1;
838 + clnt->cl_chatty = 1;
839 + cb->cb_client = clnt;
841 + /* Kick rpciod, put the call on the wire. */
843 + if (rpciod_up() != 0) {
844 + dprintk("nfsd: couldn't start rpciod for callbacks!\n");
848 + /* the task holds a reference to the nfs4_client struct */
849 + atomic_inc(&clp->cl_count);
851 + msg.rpc_cred = nfsd4_lookupcred(clp,0);
852 + status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, nfs4_cb_null, 0);
855 + dprintk("NFSD: asynchronous NFSPROC4_CB_NULL failed!\n");
863 + rpc_shutdown_client(clnt);
866 + xprt_destroy(xprt);
868 + dprintk("NFSD: warning: no callback path to client %.*s\n",
869 + clp->cl_name.len, clp->cl_name.data);
870 + cb->cb_client = NULL;
874 +nfs4_cb_null(struct rpc_task *task)
876 + struct nfs4_client *clp = (struct nfs4_client *)task->tk_msg.rpc_argp;
877 + struct nfs4_callback *cb = &clp->cl_callback;
878 + u32 addr = htonl(cb->cb_addr);
880 + dprintk("NFSD: nfs4_cb_null task->tk_status %d\n", task->tk_status);
882 + if (task->tk_status < 0) {
883 + dprintk("NFSD: callback establishment to client %.*s failed\n",
884 + clp->cl_name.len, clp->cl_name.data);
888 + dprintk("NFSD: callback set to client %u.%u.%u.%u\n", NIPQUAD(addr));
890 + put_nfs4_client(clp);
892 --- linux-2.6.7/fs/nfsd/nfs4xdr.c.lsec 2004-06-15 23:19:52.000000000 -0600
893 +++ linux-2.6.7/fs/nfsd/nfs4xdr.c 2005-03-23 14:28:23.924412632 -0700
895 #include <linux/nfsd/state.h>
896 #include <linux/nfsd/xdr4.h>
897 #include <linux/nfsd_idmap.h>
898 +#include <linux/nfs4.h>
899 +#include <linux/nfs4_acl.h>
901 #define NFSDDBG_FACILITY NFSDDBG_XDR
903 @@ -287,27 +289,40 @@ u32 *read_buf(struct nfsd4_compoundargs
907 -char *savemem(struct nfsd4_compoundargs *argp, u32 *p, int nbytes)
909 +defer_free(struct nfsd4_compoundargs *argp,
910 + void (*release)(const void *), void *p)
914 + tb = kmalloc(sizeof(*tb), GFP_KERNEL);
918 + tb->release = release;
919 + tb->next = argp->to_free;
920 + argp->to_free = tb;
924 +char *savemem(struct nfsd4_compoundargs *argp, u32 *p, int nbytes)
927 if (p == argp->tmp) {
928 - p = kmalloc(nbytes, GFP_KERNEL);
929 - if (!p) return NULL;
930 + new = kmalloc(nbytes, GFP_KERNEL);
931 + if (!new) return NULL;
933 memcpy(p, argp->tmp, nbytes);
939 - tb = kmalloc(sizeof(*tb), GFP_KERNEL);
942 + if (defer_free(argp, kfree, p)) {
947 - tb->next = argp->to_free;
948 - argp->to_free = tb;
955 @@ -335,7 +350,8 @@ nfsd4_decode_bitmap(struct nfsd4_compoun
959 -nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, struct iattr *iattr)
960 +nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, struct iattr *iattr,
961 + struct nfs4_acl **acl)
963 int expected_len, len = 0;
965 @@ -364,6 +380,51 @@ nfsd4_decode_fattr(struct nfsd4_compound
966 READ64(iattr->ia_size);
967 iattr->ia_valid |= ATTR_SIZE;
969 + if (bmval[0] & FATTR4_WORD0_ACL) {
971 + struct nfs4_ace ace;
973 + READ_BUF(4); len += 4;
976 + *acl = nfs4_acl_new();
977 + if (*acl == NULL) {
981 + defer_free(argp, (void (*)(const void *))nfs4_acl_free, *acl);
983 + for (i = 0; i < nace; i++) {
984 + READ_BUF(16); len += 16;
987 + READ32(ace.access_mask);
990 + len += XDR_QUADLEN(dummy32) << 2;
991 + READMEM(buf, dummy32);
992 + if (check_utf8(buf, dummy32))
993 + return nfserr_inval;
994 + ace.whotype = nfs4_acl_get_whotype(buf, dummy32);
996 + if (ace.whotype != NFS4_ACL_WHO_NAMED)
998 + else if (ace.flag & NFS4_ACE_IDENTIFIER_GROUP)
999 + status = nfsd_map_name_to_gid(argp->rqstp,
1000 + buf, dummy32, &ace.who);
1002 + status = nfsd_map_name_to_uid(argp->rqstp,
1003 + buf, dummy32, &ace.who);
1006 + if (nfs4_acl_add_ace(*acl, ace.type, ace.flag,
1007 + ace.access_mask, ace.whotype, ace.who) != 0) {
1014 if (bmval[1] & FATTR4_WORD1_MODE) {
1017 @@ -549,7 +610,7 @@ nfsd4_decode_create(struct nfsd4_compoun
1018 if ((status = check_filename(create->cr_name, create->cr_namelen, nfserr_inval)))
1021 - if ((status = nfsd4_decode_fattr(argp, create->cr_bmval, &create->cr_iattr)))
1022 + if ((status = nfsd4_decode_fattr(argp, create->cr_bmval, &create->cr_iattr, &create->cr_acl)))
1026 @@ -698,7 +759,7 @@ nfsd4_decode_open(struct nfsd4_compounda
1027 switch (open->op_createmode) {
1028 case NFS4_CREATE_UNCHECKED:
1029 case NFS4_CREATE_GUARDED:
1030 - if ((status = nfsd4_decode_fattr(argp, open->op_bmval, &open->op_iattr)))
1031 + if ((status = nfsd4_decode_fattr(argp, open->op_bmval, &open->op_iattr, &open->op_acl)))
1034 case NFS4_CREATE_EXCLUSIVE:
1035 @@ -875,7 +936,7 @@ nfsd4_decode_setattr(struct nfsd4_compou
1036 READ_BUF(sizeof(stateid_t));
1037 READ32(setattr->sa_stateid.si_generation);
1038 COPYMEM(&setattr->sa_stateid.si_opaque, sizeof(stateid_opaque_t));
1039 - if ((status = nfsd4_decode_fattr(argp, setattr->sa_bmval, &setattr->sa_iattr)))
1040 + if ((status = nfsd4_decode_fattr(argp, setattr->sa_bmval, &setattr->sa_iattr, &setattr->sa_acl)))
1044 @@ -1288,32 +1349,24 @@ static u32 nfs4_ftypes[16] = {
1045 NF4SOCK, NF4BAD, NF4LNK, NF4BAD,
1051 - return 3 - ((l - 1) & 3); /* smallest i>=0 such that (l+i)%4 = 0 */
1055 -nfsd4_encode_name(struct svc_rqst *rqstp, int group, uid_t id,
1056 +nfsd4_encode_name(struct svc_rqst *rqstp, int whotype, uid_t id, int group,
1057 u32 **p, int *buflen)
1062 if (*buflen < (XDR_QUADLEN(IDMAP_NAMESZ) << 2) + 4)
1063 return nfserr_resource;
1065 + if (whotype != NFS4_ACL_WHO_NAMED)
1066 + status = nfs4_acl_write_who(whotype, (u8 *)(*p + 1));
1068 status = nfsd_map_gid_to_name(rqstp, id, (u8 *)(*p + 1));
1070 status = nfsd_map_uid_to_name(rqstp, id, (u8 *)(*p + 1));
1072 return nfserrno(status);
1073 - len = (unsigned)status;
1074 - *(*p)++ = htonl(len);
1075 - memset((u8 *)*p + len, 0, xdr_padding(len));
1076 - *p += XDR_QUADLEN(len);
1077 - *buflen -= (XDR_QUADLEN(len) << 2) + 4;
1078 + *p = xdr_encode_opaque(*p, NULL, status);
1079 + *buflen -= (XDR_QUADLEN(status) << 2) + 4;
1080 BUG_ON(*buflen < 0);
1083 @@ -1321,13 +1374,20 @@ nfsd4_encode_name(struct svc_rqst *rqstp
1085 nfsd4_encode_user(struct svc_rqst *rqstp, uid_t uid, u32 **p, int *buflen)
1087 - return nfsd4_encode_name(rqstp, uid, 0, p, buflen);
1088 + return nfsd4_encode_name(rqstp, NFS4_ACL_WHO_NAMED, uid, 0, p, buflen);
1092 nfsd4_encode_group(struct svc_rqst *rqstp, uid_t gid, u32 **p, int *buflen)
1094 - return nfsd4_encode_name(rqstp, gid, 1, p, buflen);
1095 + return nfsd4_encode_name(rqstp, NFS4_ACL_WHO_NAMED, gid, 1, p, buflen);
1099 +nfsd4_encode_aclname(struct svc_rqst *rqstp, int whotype, uid_t id, int group,
1100 + u32 **p, int *buflen)
1102 + return nfsd4_encode_name(rqstp, whotype, id, group, p, buflen);
1106 @@ -1354,6 +1414,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
1110 + int aclsupport = 0;
1111 + struct nfs4_acl *acl = NULL;
1113 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
1114 BUG_ON(bmval0 & ~NFSD_SUPPORTED_ATTRS_WORD0);
1115 @@ -1376,6 +1438,17 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
1119 + if (bmval0 & (FATTR4_WORD0_ACL | FATTR4_WORD0_ACLSUPPORT
1120 + | FATTR4_WORD0_SUPPORTED_ATTRS)) {
1121 + status = nfsd4_get_nfs4_acl(rqstp, dentry, &acl);
1122 + aclsupport = (status == 0);
1123 + if (bmval0 & FATTR4_WORD0_ACL) {
1124 + if (status == -EOPNOTSUPP)
1125 + bmval0 &= ~FATTR4_WORD0_ACL;
1126 + else if (status != 0)
1130 if ((buflen -= 16) < 0)
1133 @@ -1388,7 +1461,9 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
1134 if ((buflen -= 12) < 0)
1137 - WRITE32(NFSD_SUPPORTED_ATTRS_WORD0);
1138 + WRITE32(aclsupport ?
1139 + NFSD_SUPPORTED_ATTRS_WORD0 :
1140 + NFSD_SUPPORTED_ATTRS_WORD0 & ~FATTR4_WORD0_ACL);
1141 WRITE32(NFSD_SUPPORTED_ATTRS_WORD1);
1143 if (bmval0 & FATTR4_WORD0_TYPE) {
1144 @@ -1459,10 +1534,44 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
1148 + if (bmval0 & FATTR4_WORD0_ACL) {
1149 + struct nfs4_ace *ace;
1150 + struct list_head *h;
1152 + if (acl == NULL) {
1153 + if ((buflen -= 4) < 0)
1154 + goto out_resource;
1159 + if ((buflen -= 4) < 0)
1160 + goto out_resource;
1161 + WRITE32(acl->naces);
1163 + list_for_each(h, &acl->ace_head) {
1164 + ace = list_entry(h, struct nfs4_ace, l_ace);
1166 + if ((buflen -= 4*3) < 0)
1167 + goto out_resource;
1168 + WRITE32(ace->type);
1169 + WRITE32(ace->flag);
1170 + WRITE32(ace->access_mask & NFS4_ACE_MASK_ALL);
1171 + status = nfsd4_encode_aclname(rqstp, ace->whotype,
1172 + ace->who, ace->flag & NFS4_ACE_IDENTIFIER_GROUP,
1174 + if (status == nfserr_resource)
1175 + goto out_resource;
1181 if (bmval0 & FATTR4_WORD0_ACLSUPPORT) {
1182 if ((buflen -= 4) < 0)
1185 + WRITE32(aclsupport ?
1186 + ACL4_SUPPORT_ALLOW_ACL|ACL4_SUPPORT_DENY_ACL : 0);
1188 if (bmval0 & FATTR4_WORD0_CANSETTIME) {
1189 if ((buflen -= 4) < 0)
1190 @@ -1645,6 +1754,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
1194 + nfs4_acl_free(acl);
1198 @@ -2471,6 +2581,24 @@ nfs4svc_encode_voidres(struct svc_rqst *
1199 return xdr_ressize_check(rqstp, p);
1202 +void nfsd4_release_compoundargs(struct nfsd4_compoundargs *args)
1204 + if (args->ops != args->iops) {
1206 + args->ops = args->iops;
1209 + kfree(args->tmpp);
1210 + args->tmpp = NULL;
1212 + while (args->to_free) {
1213 + struct tmpbuf *tb = args->to_free;
1214 + args->to_free = tb->next;
1215 + tb->release(tb->buf);
1221 nfs4svc_decode_compoundargs(struct svc_rqst *rqstp, u32 *p, struct nfsd4_compoundargs *args)
1223 @@ -2487,20 +2615,7 @@ nfs4svc_decode_compoundargs(struct svc_r
1225 status = nfsd4_decode_compound(args);
1227 - if (args->ops != args->iops) {
1229 - args->ops = args->iops;
1232 - kfree(args->tmpp);
1233 - args->tmpp = NULL;
1235 - while (args->to_free) {
1236 - struct tmpbuf *tb = args->to_free;
1237 - args->to_free = tb->next;
1241 + nfsd4_release_compoundargs(args);
1245 --- linux-2.6.7/fs/nfsd/nfs4proc.c.lsec 2004-06-15 23:20:26.000000000 -0600
1246 +++ linux-2.6.7/fs/nfsd/nfs4proc.c 2005-03-23 14:28:24.080388920 -0700
1248 #include <linux/nfs4.h>
1249 #include <linux/nfsd/state.h>
1250 #include <linux/nfsd/xdr4.h>
1251 +#include <linux/nfs4_acl.h>
1253 #define NFSDDBG_FACILITY NFSDDBG_PROC
1255 @@ -135,9 +136,11 @@ do_open_fhandle(struct svc_rqst *rqstp,
1259 - dprintk("NFSD: do_open_fhandle\n");
1260 + /* Only reclaims from previously confirmed clients are valid */
1261 + if ((status = nfs4_check_open_reclaim(&open->op_clientid)))
1264 - /* we don't know the target directory, and therefore can not
1265 + /* We don't know the target directory, and therefore can not
1266 * set the change info
1269 @@ -172,8 +175,7 @@ nfsd4_open(struct svc_rqst *rqstp, struc
1270 if (nfs4_in_grace() && open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
1271 return nfserr_grace;
1273 - if (nfs4_in_no_grace() &&
1274 - open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
1275 + if (!nfs4_in_grace() && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
1276 return nfserr_no_grace;
1278 /* This check required by spec. */
1279 @@ -318,7 +320,7 @@ nfsd4_commit(struct svc_rqst *rqstp, str
1285 nfsd4_create(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_create *create)
1287 struct svc_fh resfh;
1288 @@ -435,7 +437,7 @@ nfsd4_link(struct svc_rqst *rqstp, struc
1294 nfsd4_lookupp(struct svc_rqst *rqstp, struct svc_fh *current_fh)
1296 struct svc_fh tmp_fh;
1297 @@ -619,7 +621,7 @@ nfsd4_setattr(struct svc_rqst *rqstp, st
1298 status = nfserr_bad_stateid;
1299 if (ZERO_STATEID(&setattr->sa_stateid) || ONE_STATEID(&setattr->sa_stateid)) {
1300 dprintk("NFSD: nfsd4_setattr: magic stateid!\n");
1306 @@ -627,17 +629,25 @@ nfsd4_setattr(struct svc_rqst *rqstp, st
1307 &setattr->sa_stateid,
1308 CHECK_FH | RDWR_STATE, &stp))) {
1309 dprintk("NFSD: nfsd4_setattr: couldn't process stateid!\n");
1313 status = nfserr_openmode;
1314 if (!access_bits_permit_write(stp->st_access_bmap)) {
1315 dprintk("NFSD: nfsd4_setattr: not opened for write!\n");
1319 nfs4_unlock_state();
1321 - return (nfsd_setattr(rqstp, current_fh, &setattr->sa_iattr, 0, (time_t)0));
1323 + if (setattr->sa_acl != NULL)
1324 + status = nfsd4_set_nfs4_acl(rqstp, current_fh, setattr->sa_acl);
1327 + status = nfsd_setattr(rqstp, current_fh, &setattr->sa_iattr,
1332 nfs4_unlock_state();
1335 @@ -773,13 +783,20 @@ nfsd4_proc_compound(struct svc_rqst *rqs
1336 struct nfsd4_compoundres *resp)
1338 struct nfsd4_op *op;
1339 - struct svc_fh current_fh;
1340 - struct svc_fh save_fh;
1341 + struct svc_fh *current_fh = NULL;
1342 + struct svc_fh *save_fh = NULL;
1343 int slack_space; /* in words, not bytes! */
1346 - fh_init(¤t_fh, NFS4_FHSIZE);
1347 - fh_init(&save_fh, NFS4_FHSIZE);
1348 + status = nfserr_resource;
1349 + current_fh = kmalloc(sizeof(*current_fh), GFP_KERNEL);
1350 + if (current_fh == NULL)
1352 + fh_init(current_fh, NFS4_FHSIZE);
1353 + save_fh = kmalloc(sizeof(*save_fh), GFP_KERNEL);
1354 + if (save_fh == NULL)
1356 + fh_init(save_fh, NFS4_FHSIZE);
1358 resp->xbuf = &rqstp->rq_res;
1359 resp->p = rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len;
1360 @@ -831,7 +848,7 @@ nfsd4_proc_compound(struct svc_rqst *rqs
1361 * SETATTR NOFILEHANDLE error handled in nfsd4_setattr
1362 * due to required returned bitmap argument
1364 - if ((!current_fh.fh_dentry) &&
1365 + if ((!current_fh->fh_dentry) &&
1366 !((op->opnum == OP_PUTFH) || (op->opnum == OP_PUTROOTFH) ||
1367 (op->opnum == OP_SETCLIENTID) ||
1368 (op->opnum == OP_SETCLIENTID_CONFIRM) ||
1369 @@ -843,105 +860,105 @@ nfsd4_proc_compound(struct svc_rqst *rqs
1371 switch (op->opnum) {
1373 - op->status = nfsd4_access(rqstp, ¤t_fh, &op->u.access);
1374 + op->status = nfsd4_access(rqstp, current_fh, &op->u.access);
1377 - op->status = nfsd4_close(rqstp, ¤t_fh, &op->u.close);
1378 + op->status = nfsd4_close(rqstp, current_fh, &op->u.close);
1379 if (op->u.close.cl_stateowner)
1381 &op->u.close.cl_stateowner->so_replay;
1384 - op->status = nfsd4_commit(rqstp, ¤t_fh, &op->u.commit);
1385 + op->status = nfsd4_commit(rqstp, current_fh, &op->u.commit);
1388 - op->status = nfsd4_create(rqstp, ¤t_fh, &op->u.create);
1389 + op->status = nfsd4_create(rqstp, current_fh, &op->u.create);
1392 - op->status = nfsd4_getattr(rqstp, ¤t_fh, &op->u.getattr);
1393 + op->status = nfsd4_getattr(rqstp, current_fh, &op->u.getattr);
1396 - op->status = nfsd4_getfh(¤t_fh, &op->u.getfh);
1397 + op->status = nfsd4_getfh(current_fh, &op->u.getfh);
1400 - op->status = nfsd4_link(rqstp, ¤t_fh, &save_fh, &op->u.link);
1401 + op->status = nfsd4_link(rqstp, current_fh, save_fh, &op->u.link);
1404 - op->status = nfsd4_lock(rqstp, ¤t_fh, &op->u.lock);
1405 + op->status = nfsd4_lock(rqstp, current_fh, &op->u.lock);
1406 if (op->u.lock.lk_stateowner)
1408 &op->u.lock.lk_stateowner->so_replay;
1411 - op->status = nfsd4_lockt(rqstp, ¤t_fh, &op->u.lockt);
1412 + op->status = nfsd4_lockt(rqstp, current_fh, &op->u.lockt);
1415 - op->status = nfsd4_locku(rqstp, ¤t_fh, &op->u.locku);
1416 + op->status = nfsd4_locku(rqstp, current_fh, &op->u.locku);
1417 if (op->u.locku.lu_stateowner)
1419 &op->u.locku.lu_stateowner->so_replay;
1422 - op->status = nfsd4_lookup(rqstp, ¤t_fh, &op->u.lookup);
1423 + op->status = nfsd4_lookup(rqstp, current_fh, &op->u.lookup);
1426 - op->status = nfsd4_lookupp(rqstp, ¤t_fh);
1427 + op->status = nfsd4_lookupp(rqstp, current_fh);
1430 - op->status = nfsd4_verify(rqstp, ¤t_fh, &op->u.nverify);
1431 + op->status = nfsd4_verify(rqstp, current_fh, &op->u.nverify);
1432 if (op->status == nfserr_not_same)
1433 op->status = nfs_ok;
1436 - op->status = nfsd4_open(rqstp, ¤t_fh, &op->u.open);
1437 + op->status = nfsd4_open(rqstp, current_fh, &op->u.open);
1438 if (op->u.open.op_stateowner)
1440 &op->u.open.op_stateowner->so_replay;
1442 case OP_OPEN_CONFIRM:
1443 - op->status = nfsd4_open_confirm(rqstp, ¤t_fh, &op->u.open_confirm);
1444 + op->status = nfsd4_open_confirm(rqstp, current_fh, &op->u.open_confirm);
1445 if (op->u.open_confirm.oc_stateowner)
1447 &op->u.open_confirm.oc_stateowner->so_replay;
1449 case OP_OPEN_DOWNGRADE:
1450 - op->status = nfsd4_open_downgrade(rqstp, ¤t_fh, &op->u.open_downgrade);
1451 + op->status = nfsd4_open_downgrade(rqstp, current_fh, &op->u.open_downgrade);
1452 if (op->u.open_downgrade.od_stateowner)
1454 &op->u.open_downgrade.od_stateowner->so_replay;
1457 - op->status = nfsd4_putfh(rqstp, ¤t_fh, &op->u.putfh);
1458 + op->status = nfsd4_putfh(rqstp, current_fh, &op->u.putfh);
1461 - op->status = nfsd4_putrootfh(rqstp, ¤t_fh);
1462 + op->status = nfsd4_putrootfh(rqstp, current_fh);
1465 - op->status = nfsd4_read(rqstp, ¤t_fh, &op->u.read);
1466 + op->status = nfsd4_read(rqstp, current_fh, &op->u.read);
1469 - op->status = nfsd4_readdir(rqstp, ¤t_fh, &op->u.readdir);
1470 + op->status = nfsd4_readdir(rqstp, current_fh, &op->u.readdir);
1473 - op->status = nfsd4_readlink(rqstp, ¤t_fh, &op->u.readlink);
1474 + op->status = nfsd4_readlink(rqstp, current_fh, &op->u.readlink);
1477 - op->status = nfsd4_remove(rqstp, ¤t_fh, &op->u.remove);
1478 + op->status = nfsd4_remove(rqstp, current_fh, &op->u.remove);
1481 - op->status = nfsd4_rename(rqstp, ¤t_fh, &save_fh, &op->u.rename);
1482 + op->status = nfsd4_rename(rqstp, current_fh, save_fh, &op->u.rename);
1485 op->status = nfsd4_renew(&op->u.renew);
1488 - op->status = nfsd4_restorefh(¤t_fh, &save_fh);
1489 + op->status = nfsd4_restorefh(current_fh, save_fh);
1492 - op->status = nfsd4_savefh(¤t_fh, &save_fh);
1493 + op->status = nfsd4_savefh(current_fh, save_fh);
1496 - op->status = nfsd4_setattr(rqstp, ¤t_fh, &op->u.setattr);
1497 + op->status = nfsd4_setattr(rqstp, current_fh, &op->u.setattr);
1499 case OP_SETCLIENTID:
1500 op->status = nfsd4_setclientid(rqstp, &op->u.setclientid);
1501 @@ -950,12 +967,12 @@ nfsd4_proc_compound(struct svc_rqst *rqs
1502 op->status = nfsd4_setclientid_confirm(rqstp, &op->u.setclientid_confirm);
1505 - op->status = nfsd4_verify(rqstp, ¤t_fh, &op->u.verify);
1506 + op->status = nfsd4_verify(rqstp, current_fh, &op->u.verify);
1507 if (op->status == nfserr_same)
1508 op->status = nfs_ok;
1511 - op->status = nfsd4_write(rqstp, ¤t_fh, &op->u.write);
1512 + op->status = nfsd4_write(rqstp, current_fh, &op->u.write);
1514 case OP_RELEASE_LOCKOWNER:
1515 op->status = nfsd4_release_lockowner(rqstp, &op->u.release_lockowner);
1516 @@ -976,22 +993,13 @@ encode_op:
1520 - if (args->ops != args->iops) {
1522 - args->ops = args->iops;
1525 - kfree(args->tmpp);
1526 - args->tmpp = NULL;
1528 - while (args->to_free) {
1529 - struct tmpbuf *tb = args->to_free;
1530 - args->to_free = tb->next;
1534 - fh_put(¤t_fh);
1536 + nfsd4_release_compoundargs(args);
1538 + fh_put(current_fh);
1539 + kfree(current_fh);
1546 --- linux-2.6.7/fs/nfsd/nfs4state.c.lsec 2004-06-15 23:19:43.000000000 -0600
1547 +++ linux-2.6.7/fs/nfsd/nfs4state.c 2005-03-23 14:28:24.028396824 -0700
1549 #define NFSDDBG_FACILITY NFSDDBG_PROC
1552 +static time_t lease_time = 90; /* default lease time */
1553 +static time_t old_lease_time = 90; /* past incarnation lease time */
1554 +static u32 nfs4_reclaim_init = 0;
1556 static time_t grace_end = 0;
1557 static u32 current_clientid = 1;
1558 @@ -82,7 +85,7 @@ struct nfs4_stateid * find_stateid(state
1559 * protects clientid_hashtbl[], clientstr_hashtbl[],
1560 * unconfstr_hashtbl[], uncofid_hashtbl[].
1562 -static struct semaphore client_sema;
1563 +static DECLARE_MUTEX(client_sema);
1566 nfs4_lock_state(void)
1567 @@ -131,8 +134,11 @@ static void release_file(struct nfs4_fil
1568 ((id) & CLIENT_HASH_MASK)
1569 #define clientstr_hashval(name, namelen) \
1570 (opaque_hashval((name), (namelen)) & CLIENT_HASH_MASK)
1572 -/* conf_id_hashtbl[], and conf_str_hashtbl[] hold confirmed
1574 + * reclaim_str_hashtbl[] holds known client info from previous reset/reboot
1575 + * used in reboot/reset lease grace period processing
1577 + * conf_id_hashtbl[], and conf_str_hashtbl[] hold confirmed
1578 * setclientid_confirmed info.
1580 * unconf_str_hastbl[] and unconf_id_hashtbl[] hold unconfirmed
1581 @@ -144,6 +150,8 @@ static void release_file(struct nfs4_fil
1582 * close_lru holds (open) stateowner queue ordered by nfs4_stateowner.so_time
1583 * for last close replay.
1585 +static struct list_head reclaim_str_hashtbl[CLIENT_HASH_SIZE];
1586 +static int reclaim_str_hashtbl_size;
1587 static struct list_head conf_id_hashtbl[CLIENT_HASH_SIZE];
1588 static struct list_head conf_str_hashtbl[CLIENT_HASH_SIZE];
1589 static struct list_head unconf_str_hashtbl[CLIENT_HASH_SIZE];
1590 @@ -208,12 +216,20 @@ free_client(struct nfs4_client *clp)
1596 +put_nfs4_client(struct nfs4_client *clp)
1598 + if (atomic_dec_and_test(&clp->cl_count))
1603 expire_client(struct nfs4_client *clp)
1605 struct nfs4_stateowner *sop;
1607 - dprintk("NFSD: expire_client\n");
1608 + dprintk("NFSD: expire_client cl_count %d\n",
1609 + atomic_read(&clp->cl_count));
1610 list_del(&clp->cl_idhash);
1611 list_del(&clp->cl_strhash);
1612 list_del(&clp->cl_lru);
1613 @@ -221,7 +237,7 @@ expire_client(struct nfs4_client *clp)
1614 sop = list_entry(clp->cl_perclient.next, struct nfs4_stateowner, so_perclient);
1615 release_stateowner(sop);
1618 + put_nfs4_client(clp);
1621 static struct nfs4_client *
1622 @@ -230,6 +246,7 @@ create_client(struct xdr_netobj name) {
1624 if(!(clp = alloc_client(name)))
1626 + atomic_set(&clp->cl_count, 1);
1627 INIT_LIST_HEAD(&clp->cl_idhash);
1628 INIT_LIST_HEAD(&clp->cl_strhash);
1629 INIT_LIST_HEAD(&clp->cl_perclient);
1630 @@ -339,6 +356,99 @@ move_to_confirmed(struct nfs4_client *cl
1635 +/* a helper function for parse_callback */
1637 +parse_octet(unsigned int *lenp, char **addrp)
1639 + unsigned int len = *lenp;
1651 + if ((c < '0') || (c > '9')) {
1657 + n = (n * 10) + (c - '0');
1668 +/* parse and set the setclientid ipv4 callback address */
1670 +parse_ipv4(unsigned int addr_len, char *addr_val, unsigned int *cbaddrp, unsigned short *cbportp)
1675 + u32 addrlen = addr_len;
1676 + char *addr = addr_val;
1681 + for(i = 4; i > 0 ; i--) {
1682 + if ((temp = parse_octet(&addrlen, &addr)) < 0) {
1685 + cbaddr |= (temp << shift);
1689 + *cbaddrp = cbaddr;
1693 + for(i = 2; i > 0 ; i--) {
1694 + if ((temp = parse_octet(&addrlen, &addr)) < 0) {
1697 + cbport |= (temp << shift);
1701 + *cbportp = cbport;
1706 +gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se)
1708 + struct nfs4_callback *cb = &clp->cl_callback;
1710 + if( !(parse_ipv4(se->se_callback_addr_len, se->se_callback_addr_val,
1711 + &cb->cb_addr, &cb->cb_port))) {
1712 + printk(KERN_INFO "NFSD: BAD callback address. client will not receive delegations\n");
1713 + printk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
1714 + "will not receive delegations\n",
1715 + clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
1717 + cb->cb_parsed = 0;
1720 + cb->cb_netid.len = se->se_callback_netid_len;
1721 + cb->cb_netid.data = se->se_callback_netid_val;
1722 + cb->cb_prog = se->se_callback_prog;
1723 + cb->cb_ident = se->se_callback_ident;
1724 + cb->cb_parsed = 1;
1728 * RFC 3010 has a complex implmentation description of processing a
1729 * SETCLIENTID request consisting of 5 bullets, labeled as
1730 @@ -450,6 +560,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp
1731 copy_cred(&new->cl_cred,&rqstp->rq_cred);
1734 + gen_callback(new, setclid);
1735 add_to_unconfirmed(new, strhashval);
1736 } else if (cmp_verf(&conf->cl_verifier, &clverifier)) {
1738 @@ -477,6 +588,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp
1739 copy_cred(&new->cl_cred,&rqstp->rq_cred);
1740 copy_clid(new, conf);
1742 + gen_callback(new, setclid);
1743 add_to_unconfirmed(new,strhashval);
1744 } else if (!unconf) {
1746 @@ -494,6 +606,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp
1747 copy_cred(&new->cl_cred,&rqstp->rq_cred);
1750 + gen_callback(new, setclid);
1751 add_to_unconfirmed(new, strhashval);
1752 } else if (!cmp_verf(&conf->cl_confirm, &unconf->cl_confirm)) {
1754 @@ -519,6 +632,7 @@ nfsd4_setclientid(struct svc_rqst *rqstp
1755 copy_cred(&new->cl_cred,&rqstp->rq_cred);
1758 + gen_callback(new, setclid);
1759 add_to_unconfirmed(new, strhashval);
1761 /* No cases hit !!! */
1762 @@ -529,7 +643,6 @@ nfsd4_setclientid(struct svc_rqst *rqstp
1763 setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
1764 setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
1765 memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
1766 - printk(KERN_INFO "NFSD: this client will not receive delegations\n");
1769 nfs4_unlock_state();
1770 @@ -575,7 +688,7 @@ nfsd4_setclientid_confirm(struct svc_rqs
1773 if (clp->cl_addr != ip_addr) {
1774 - printk("NFSD: setclientid: string in use by client"
1775 + dprintk("NFSD: setclientid: string in use by client"
1776 "(clientid %08x/%08x)\n",
1777 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
1779 @@ -588,7 +701,7 @@ nfsd4_setclientid_confirm(struct svc_rqs
1781 status = nfserr_inval;
1782 if (clp->cl_addr != ip_addr) {
1783 - printk("NFSD: setclientid: string in use by client"
1784 + dprintk("NFSD: setclientid: string in use by client"
1785 "(clientid %08x/%08x)\n",
1786 clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);
1788 @@ -610,6 +723,7 @@ nfsd4_setclientid_confirm(struct svc_rqs
1789 status = nfserr_clid_inuse;
1791 expire_client(conf);
1793 move_to_confirmed(unconf, idhashval);
1796 @@ -627,6 +741,7 @@ nfsd4_setclientid_confirm(struct svc_rqs
1797 if (!cmp_creds(&conf->cl_cred,&rqstp->rq_cred)) {
1798 status = nfserr_clid_inuse;
1804 @@ -641,6 +756,7 @@ nfsd4_setclientid_confirm(struct svc_rqs
1805 status = nfserr_clid_inuse;
1809 move_to_confirmed(unconf, idhashval);
1812 @@ -660,7 +776,9 @@ nfsd4_setclientid_confirm(struct svc_rqs
1813 status = nfserr_inval;
1816 - /* XXX if status == nfs_ok, probe callback path */
1818 + nfsd4_probe_callback(clp);
1820 nfs4_unlock_state();
1823 @@ -1510,10 +1628,12 @@ nfs4_preprocess_seqid_op(struct svc_fh *
1825 status = nfserr_bad_stateid;
1827 - /* for new lock stateowners, check that the lock->v.new.open_stateid
1828 - * refers to an open stateowner, and that the lockclid
1829 - * (nfs4_lock->v.new.clientid) is the same as the
1830 - * open_stateid->st_stateowner->so_client->clientid
1831 + /* for new lock stateowners:
1832 + * check that the lock->v.new.open_stateid
1833 + * refers to an open stateowner
1835 + * check that the lockclid (nfs4_lock->v.new.clientid) is the same
1836 + * as the open_stateid->st_stateowner->so_client->clientid
1839 struct nfs4_stateowner *sop = stp->st_stateowner;
1840 @@ -1599,6 +1719,17 @@ check_replay:
1844 + * eventually, this will perform an upcall to the 'state daemon' as well as
1845 + * set the cl_first_state field.
1848 +first_state(struct nfs4_client *clp)
1850 + if (!clp->cl_first_state)
1851 + clp->cl_first_state = get_seconds();
1855 * nfs4_unlock_state(); called in encode
1858 @@ -1635,6 +1766,7 @@ nfsd4_open_confirm(struct svc_rqst *rqst
1859 stp->st_stateid.si_fileid,
1860 stp->st_stateid.si_generation);
1862 + first_state(sop->so_client);
1866 @@ -1850,6 +1982,21 @@ nfs4_set_lock_denied(struct file_lock *f
1867 deny->ld_type = NFS4_WRITE_LT;
1870 +static struct nfs4_stateowner *
1871 +find_lockstateowner(struct xdr_netobj *owner, clientid_t *clid)
1873 + struct nfs4_stateowner *local = NULL;
1876 + for (i = 0; i < LOCK_HASH_SIZE; i++) {
1877 + list_for_each_entry(local, &lock_ownerid_hashtbl[i], so_idhash) {
1878 + if(!cmp_owner_str(local, owner, clid))
1887 find_lockstateowner_str(unsigned int hashval, struct xdr_netobj *owner, clientid_t *clid, struct nfs4_stateowner **op) {
1888 @@ -1969,7 +2116,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
1890 if (nfs4_in_grace() && !lock->lk_reclaim)
1891 return nfserr_grace;
1892 - if (nfs4_in_no_grace() && lock->lk_reclaim)
1893 + if (!nfs4_in_grace() && lock->lk_reclaim)
1894 return nfserr_no_grace;
1896 if (check_lock_length(lock->lk_offset, lock->lk_length))
1897 @@ -1992,7 +2139,11 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
1898 printk("NFSD: nfsd4_lock: clientid is stale!\n");
1901 - /* does the clientid in the lock owner own the open stateid? */
1903 + /* is the new lock seqid presented by the client zero? */
1904 + status = nfserr_bad_seqid;
1905 + if (lock->v.new.lock_seqid != 0)
1908 /* validate and update open stateid and open seqid */
1909 status = nfs4_preprocess_seqid_op(current_fh,
1910 @@ -2011,15 +2162,15 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
1911 strhashval = lock_ownerstr_hashval(fp->fi_inode,
1912 open_sop->so_client->cl_clientid.cl_id,
1916 * If we already have this lock owner, the client is in
1917 * error (or our bookeeping is wrong!)
1918 * for asking for a 'new lock'.
1920 status = nfserr_bad_stateid;
1921 - if (find_lockstateowner_str(strhashval, &lock->v.new.owner,
1922 - &lock->v.new.clientid, &lock_sop))
1923 + lock_sop = find_lockstateowner(&lock->v.new.owner,
1924 + &lock->v.new.clientid);
1927 status = nfserr_resource;
1928 if (!(lock->lk_stateowner = alloc_init_lock_stateowner(strhashval, open_sop->so_client, open_stp, lock)))
1929 @@ -2315,7 +2466,7 @@ nfsd4_release_lockowner(struct svc_rqst
1930 clientid_t *clid = &rlockowner->rl_clientid;
1931 struct nfs4_stateowner *local = NULL;
1932 struct xdr_netobj *owner = &rlockowner->rl_owner;
1936 dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
1937 clid->cl_boot, clid->cl_id);
1938 @@ -2330,34 +2481,136 @@ nfsd4_release_lockowner(struct svc_rqst
1942 - /* find the lockowner */
1944 - for (i=0; i < LOCK_HASH_SIZE; i++)
1945 - list_for_each_entry(local, &lock_ownerstr_hashtbl[i], so_strhash)
1946 - if(cmp_owner_str(local, owner, clid)) {
1947 - struct nfs4_stateid *stp;
1949 - /* check for any locks held by any stateid
1950 - * associated with the (lock) stateowner */
1951 - status = nfserr_locks_held;
1952 - list_for_each_entry(stp, &local->so_perfilestate,
1953 - st_perfilestate) {
1954 - if(stp->st_vfs_set) {
1955 - if (check_for_locks(&stp->st_vfs_file,
1960 - /* no locks held by (lock) stateowner */
1962 - release_stateowner(local);
1964 + local = find_lockstateowner(owner, clid);
1966 + struct nfs4_stateid *stp;
1968 + /* check for any locks held by any stateid
1969 + * associated with the (lock) stateowner */
1970 + status = nfserr_locks_held;
1971 + list_for_each_entry(stp, &local->so_perfilestate,
1972 + st_perfilestate) {
1973 + if(stp->st_vfs_set) {
1974 + if (check_for_locks(&stp->st_vfs_file, local))
1978 + /* no locks held by (lock) stateowner */
1980 + release_stateowner(local);
1983 nfs4_unlock_state();
1987 +static inline struct nfs4_client_reclaim *
1988 +alloc_reclaim(int namelen)
1990 + struct nfs4_client_reclaim *crp = NULL;
1992 + crp = kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
1995 + crp->cr_name.data = kmalloc(namelen, GFP_KERNEL);
1996 + if (!crp->cr_name.data) {
2004 + * failure => all reset bets are off, nfserr_no_grace...
2007 +nfs4_client_to_reclaim(struct nfs4_client *clp)
2009 + unsigned int strhashval;
2010 + struct nfs4_client_reclaim *crp = NULL;
2012 + crp = alloc_reclaim(clp->cl_name.len);
2015 + strhashval = clientstr_hashval(clp->cl_name.data, clp->cl_name.len);
2016 + INIT_LIST_HEAD(&crp->cr_strhash);
2017 + list_add(&crp->cr_strhash, &reclaim_str_hashtbl[strhashval]);
2018 + memcpy(crp->cr_name.data, clp->cl_name.data, clp->cl_name.len);
2019 + crp->cr_name.len = clp->cl_name.len;
2020 + crp->cr_first_state = clp->cl_first_state;
2021 + crp->cr_expired = 0;
2026 +nfs4_release_reclaim(void)
2028 + struct nfs4_client_reclaim *crp = NULL;
2031 + BUG_ON(!nfs4_reclaim_init);
2032 + for (i = 0; i < CLIENT_HASH_SIZE; i++) {
2033 + while (!list_empty(&reclaim_str_hashtbl[i])) {
2034 + crp = list_entry(reclaim_str_hashtbl[i].next,
2035 + struct nfs4_client_reclaim, cr_strhash);
2036 + list_del(&crp->cr_strhash);
2037 + kfree(crp->cr_name.data);
2039 + reclaim_str_hashtbl_size--;
2042 + BUG_ON(reclaim_str_hashtbl_size);
2046 + * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
2047 +struct nfs4_client_reclaim *
2048 +nfs4_find_reclaim_client(clientid_t *clid)
2050 + unsigned int idhashval = clientid_hashval(clid->cl_id);
2051 + unsigned int strhashval;
2052 + struct nfs4_client *clp, *client = NULL;
2053 + struct nfs4_client_reclaim *crp = NULL;
2056 + /* find clientid in conf_id_hashtbl */
2057 + list_for_each_entry(clp, &conf_id_hashtbl[idhashval], cl_idhash) {
2058 + if (cmp_clid(&clp->cl_clientid, clid)) {
2066 + /* find clp->cl_name in reclaim_str_hashtbl */
2067 + strhashval = clientstr_hashval(client->cl_name.data,
2068 + client->cl_name.len);
2069 + list_for_each_entry(crp, &reclaim_str_hashtbl[strhashval], cr_strhash) {
2070 + if(cmp_name(&crp->cr_name, &client->cl_name)) {
2078 +* Called from OPEN. Look for clientid in reclaim list.
2081 +nfs4_check_open_reclaim(clientid_t *clid)
2083 + struct nfs4_client_reclaim *crp;
2085 + if ((crp = nfs4_find_reclaim_client(clid)) == NULL)
2086 + return nfserr_reclaim_bad;
2087 + if (crp->cr_expired)
2088 + return nfserr_no_grace;
2094 * Start and stop routines
2096 @@ -2366,10 +2619,16 @@ void
2097 nfs4_state_init(void)
2100 - time_t start = get_seconds();
2101 + time_t grace_time;
2105 + if (!nfs4_reclaim_init) {
2106 + for (i = 0; i < CLIENT_HASH_SIZE; i++)
2107 + INIT_LIST_HEAD(&reclaim_str_hashtbl[i]);
2108 + reclaim_str_hashtbl_size = 0;
2109 + nfs4_reclaim_init = 1;
2111 for (i = 0; i < CLIENT_HASH_SIZE; i++) {
2112 INIT_LIST_HEAD(&conf_id_hashtbl[i]);
2113 INIT_LIST_HEAD(&conf_str_hashtbl[i]);
2114 @@ -2396,27 +2655,36 @@ nfs4_state_init(void)
2116 INIT_LIST_HEAD(&close_lru);
2117 INIT_LIST_HEAD(&client_lru);
2118 - init_MUTEX(&client_sema);
2119 - boot_time = start;
2120 - grace_end = start + NFSD_LEASE_TIME;
2121 + boot_time = get_seconds();
2122 + grace_time = max(old_lease_time, lease_time);
2123 + if (reclaim_str_hashtbl_size == 0)
2126 + printk("NFSD: starting %ld-second grace period\n", grace_time);
2127 + grace_end = boot_time + grace_time;
2128 INIT_WORK(&laundromat_work,laundromat_main, NULL);
2129 schedule_delayed_work(&laundromat_work, NFSD_LEASE_TIME*HZ);
2137 - return time_before(get_seconds(), (unsigned long)grace_end);
2138 + return get_seconds() < grace_end;
2142 -nfs4_in_no_grace(void)
2146 - return (grace_end < get_seconds());
2147 + printk("NFSD: ERROR in reboot recovery. State reclaims will fail.\n");
2148 + grace_end = get_seconds();
2152 +nfs4_lease_time(void)
2154 + return lease_time;
2158 __nfs4_state_shutdown(void)
2159 @@ -2454,6 +2722,61 @@ void
2160 nfs4_state_shutdown(void)
2163 + nfs4_release_reclaim();
2164 __nfs4_state_shutdown();
2165 nfs4_unlock_state();
2169 + * Called when leasetime is changed.
2171 + * if nfsd is not started, simply set the global lease.
2173 + * if nfsd(s) are running, lease change requires nfsv4 state to be reset.
2174 + * e.g: boot_time is reset, existing nfs4_client structs are
2175 + * used to fill reclaim_str_hashtbl, then all state (except for the
2176 + * reclaim_str_hashtbl) is re-initialized.
2178 + * if the old lease time is greater than the new lease time, the grace
2179 + * period needs to be set to the old lease time to allow clients to reclaim
2180 + * their state. XXX - we may want to set the grace period == lease time
2181 + * after an initial grace period == old lease time
2183 + * if an error occurs in this process, the new lease is set, but the server
2184 + * will not honor OPEN or LOCK reclaims, and will return nfserr_no_grace
2185 + * which means OPEN/LOCK/READ/WRITE will fail during grace period.
2187 + * clients will attempt to reset all state with SETCLIENTID/CONFIRM, and
2188 + * OPEN and LOCK reclaims.
2191 +nfs4_reset_lease(time_t leasetime)
2193 + struct nfs4_client *clp;
2196 + printk("NFSD: New leasetime %ld\n",leasetime);
2199 + nfs4_lock_state();
2200 + old_lease_time = lease_time;
2201 + lease_time = leasetime;
2203 + nfs4_release_reclaim();
2205 + /* populate reclaim_str_hashtbl with current confirmed nfs4_clientid */
2206 + for (i = 0; i < CLIENT_HASH_SIZE; i++) {
2207 + list_for_each_entry(clp, &conf_id_hashtbl[i], cl_idhash) {
2208 + if (!nfs4_client_to_reclaim(clp)) {
2209 + nfs4_release_reclaim();
2212 + reclaim_str_hashtbl_size++;
2216 + __nfs4_state_shutdown();
2217 + nfs4_state_init();
2218 + nfs4_unlock_state();
2221 --- linux-2.6.7/fs/nfsd/vfs.c.lsec 2004-06-15 23:19:13.000000000 -0600
2222 +++ linux-2.6.7/fs/nfsd/vfs.c 2005-03-23 14:28:24.520322040 -0700
2224 #include <linux/nfsd/nfsfh.h>
2225 #include <linux/quotaops.h>
2226 #include <linux/dnotify.h>
2227 +#ifdef CONFIG_NFSD_V4
2228 +#include <linux/posix_acl.h>
2229 +#include <linux/posix_acl_xattr.h>
2230 +#include <linux/xattr_acl.h>
2231 +#include <linux/xattr.h>
2232 +#include <linux/nfs4.h>
2233 +#include <linux/nfs4_acl.h>
2234 +#include <linux/nfsd_idmap.h>
2235 +#include <linux/security.h>
2236 +#endif /* CONFIG_NFSD_V4 */
2238 #include <asm/uaccess.h>
2240 @@ -344,6 +354,177 @@ out_nfserr:
2244 +#if defined(CONFIG_NFSD_V4)
2247 +set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
2253 + struct inode *inode = dentry->d_inode;
2255 + buflen = posix_acl_xattr_size(pacl->a_count);
2256 + buf = kmalloc(buflen, GFP_KERNEL);
2261 + len = posix_acl_to_xattr(pacl, buf, buflen);
2267 + error = -EOPNOTSUPP;
2268 + if (inode->i_op && inode->i_op->setxattr) {
2269 + down(&inode->i_sem);
2270 + security_inode_setxattr(dentry, key, buf, len, 0);
2271 + error = inode->i_op->setxattr(dentry, key, buf, len, 0);
2273 + security_inode_post_setxattr(dentry, key, buf, len, 0);
2274 + up(&inode->i_sem);
2282 +nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
2283 + struct nfs4_acl *acl)
2286 + struct dentry *dentry;
2287 + struct inode *inode;
2288 + struct posix_acl *pacl = NULL, *dpacl = NULL;
2289 + unsigned int flags = 0;
2292 + error = fh_verify(rqstp, fhp, 0 /* S_IFREG */, MAY_SATTR);
2296 + dentry = fhp->fh_dentry;
2297 + inode = dentry->d_inode;
2298 + if (S_ISDIR(inode->i_mode))
2299 + flags = NFS4_ACL_DIR;
2301 + error = nfs4_acl_nfsv4_to_posix(acl, &pacl, &dpacl, flags);
2306 + error = set_nfsv4_acl_one(dentry, pacl, XATTR_NAME_ACL_ACCESS);
2312 + error = set_nfsv4_acl_one(dentry, dpacl, XATTR_NAME_ACL_DEFAULT);
2320 + posix_acl_release(pacl);
2321 + posix_acl_release(dpacl);
2324 + error = nfserrno(error);
2328 +static struct posix_acl *
2329 +_get_posix_acl(struct dentry *dentry, char *key)
2331 + struct inode *inode = dentry->d_inode;
2333 + int buflen, error = 0;
2334 + struct posix_acl *pacl = NULL;
2336 + down(&inode->i_sem);
2338 + buflen = inode->i_op->getxattr(dentry, key, NULL, 0);
2339 + if (buflen <= 0) {
2340 + error = buflen < 0 ? buflen : -ENODATA;
2344 + buf = kmalloc(buflen, GFP_KERNEL);
2345 + if (buf == NULL) {
2350 + error = -EOPNOTSUPP;
2351 + if (inode->i_op && inode->i_op->getxattr) {
2352 + error = security_inode_getxattr(dentry, key);
2355 + error = inode->i_op->getxattr(dentry, key, buf, buflen);
2361 + up(&inode->i_sem);
2363 + pacl = posix_acl_from_xattr(buf, buflen);
2368 + up(&inode->i_sem);
2369 + pacl = ERR_PTR(error);
2374 +nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, struct nfs4_acl **acl)
2376 + struct inode *inode = dentry->d_inode;
2378 + struct posix_acl *pacl = NULL, *dpacl = NULL;
2379 + unsigned int flags = 0;
2381 + pacl = _get_posix_acl(dentry, XATTR_NAME_ACL_ACCESS);
2382 + if (IS_ERR(pacl) && PTR_ERR(pacl) == -ENODATA)
2383 + pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
2384 + if (IS_ERR(pacl)) {
2385 + error = PTR_ERR(pacl);
2390 + if (S_ISDIR(inode->i_mode)) {
2391 + dpacl = _get_posix_acl(dentry, XATTR_NAME_ACL_DEFAULT);
2392 + if (IS_ERR(dpacl) && PTR_ERR(dpacl) == -ENODATA)
2394 + else if (IS_ERR(dpacl)) {
2395 + error = PTR_ERR(dpacl);
2399 + flags = NFS4_ACL_DIR;
2402 + *acl = nfs4_acl_posix_to_nfsv4(pacl, dpacl, flags);
2403 + if (IS_ERR(*acl)) {
2404 + error = PTR_ERR(*acl);
2408 + posix_acl_release(pacl);
2409 + posix_acl_release(dpacl);
2413 +#endif /* defined(CONFIG_NFS_V4) */
2415 #ifdef CONFIG_NFSD_V3
2417 * Check server access rights to a file system object
2418 --- linux-2.6.7/fs/nfsd/nfs4idmap.c.lsec 2004-06-15 23:19:43.000000000 -0600
2419 +++ linux-2.6.7/fs/nfsd/nfs4idmap.c 2005-03-23 14:28:24.687296656 -0700
2420 @@ -78,9 +78,9 @@ struct ent {
2422 #define DefineSimpleCacheLookupMap(STRUCT, FUNC) \
2423 DefineCacheLookup(struct STRUCT, h, FUNC##_lookup, \
2424 - (struct STRUCT *item, int set), /*no setup */, \
2425 + (struct STRUCT *item, int set), \
2426 & FUNC##_cache, FUNC##_hash(item), FUNC##_match(item, tmp), \
2427 - STRUCT##_init(new, item), STRUCT##_update(tmp, item), 0)
2428 + STRUCT##_init(new, item), STRUCT##_update(tmp, item))
2430 /* Common entry handling */
2432 --- linux-2.6.7/fs/nfsd/nfs4acl.c.lsec 2005-03-23 14:28:24.463330704 -0700
2433 +++ linux-2.6.7/fs/nfsd/nfs4acl.c 2005-03-23 14:28:24.463330704 -0700
2436 + * fs/nfs4acl/acl.c
2438 + * Common NFSv4 ACL handling code.
2440 + * Copyright (c) 2002, 2003 The Regents of the University of Michigan.
2441 + * All rights reserved.
2443 + * Marius Aamodt Eriksen <marius@umich.edu>
2444 + * Jeff Sedlak <jsedlak@umich.edu>
2445 + * J. Bruce Fields <bfields@umich.edu>
2447 + * Redistribution and use in source and binary forms, with or without
2448 + * modification, are permitted provided that the following conditions
2451 + * 1. Redistributions of source code must retain the above copyright
2452 + * notice, this list of conditions and the following disclaimer.
2453 + * 2. Redistributions in binary form must reproduce the above copyright
2454 + * notice, this list of conditions and the following disclaimer in the
2455 + * documentation and/or other materials provided with the distribution.
2456 + * 3. Neither the name of the University nor the names of its
2457 + * contributors may be used to endorse or promote products derived
2458 + * from this software without specific prior written permission.
2460 + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
2461 + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
2462 + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
2463 + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
2464 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2465 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2466 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
2467 + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
2468 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
2469 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
2470 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2473 +#include <linux/string.h>
2474 +#include <linux/slab.h>
2475 +#include <linux/list.h>
2476 +#include <linux/types.h>
2477 +#include <linux/fs.h>
2478 +#include <linux/module.h>
2479 +#include <linux/nfs_fs.h>
2480 +#include <linux/posix_acl.h>
2481 +#include <linux/nfs4.h>
2482 +#include <linux/nfs4_acl.h>
2485 +/* mode bit translations: */
2486 +#define NFS4_READ_MODE (NFS4_ACE_READ_DATA | NFS4_ACE_READ_NAMED_ATTRS)
2487 +#define NFS4_WRITE_MODE (NFS4_ACE_WRITE_DATA | NFS4_ACE_WRITE_NAMED_ATTRS | NFS4_ACE_APPEND_DATA)
2488 +#define NFS4_EXECUTE_MODE NFS4_ACE_EXECUTE
2489 +#define NFS4_ANYONE_MODE (NFS4_ACE_READ_ATTRIBUTES | NFS4_ACE_READ_ACL | NFS4_ACE_SYNCHRONIZE)
2490 +#define NFS4_OWNER_MODE (NFS4_ACE_WRITE_ATTRIBUTES | NFS4_ACE_WRITE_ACL)
2492 +/* flags used to simulate posix default ACLs */
2493 +#define NFS4_INHERITANCE_FLAGS (NFS4_ACE_FILE_INHERIT_ACE \
2494 + | NFS4_ACE_DIRECTORY_INHERIT_ACE | NFS4_ACE_INHERIT_ONLY_ACE)
2496 +#define MASK_EQUAL(mask1, mask2) \
2497 + ( ((mask1) & NFS4_ACE_MASK_ALL) == ((mask2) & NFS4_ACE_MASK_ALL) )
2500 +mask_from_posix(unsigned short perm, unsigned int flags)
2502 + int mask = NFS4_ANYONE_MODE;
2504 + if (flags & NFS4_ACL_OWNER)
2505 + mask |= NFS4_OWNER_MODE;
2506 + if (perm & ACL_READ)
2507 + mask |= NFS4_READ_MODE;
2508 + if (perm & ACL_WRITE)
2509 + mask |= NFS4_WRITE_MODE;
2510 + if ((perm & ACL_WRITE) && (flags & NFS4_ACL_DIR))
2511 + mask |= NFS4_ACE_DELETE_CHILD;
2512 + if (perm & ACL_EXECUTE)
2513 + mask |= NFS4_EXECUTE_MODE;
2518 +deny_mask(u32 allow_mask, unsigned int flags)
2520 + u32 ret = ~allow_mask & ~NFS4_ACE_DELETE;
2521 + if (!(flags & NFS4_ACL_DIR))
2522 + ret &= ~NFS4_ACE_DELETE_CHILD;
2527 +mode_from_nfs4(u32 perm, unsigned short *mode, unsigned int flags)
2531 + if (!(flags & NFS4_ACL_DIR))
2532 + ignore |= NFS4_ACE_DELETE_CHILD; /* ignore it */
2535 + if ((perm & NFS4_READ_MODE) == NFS4_READ_MODE)
2536 + *mode |= ACL_READ;
2537 + if ((perm & NFS4_WRITE_MODE) == NFS4_WRITE_MODE)
2538 + *mode |= ACL_WRITE;
2539 + if ((perm & NFS4_EXECUTE_MODE) == NFS4_EXECUTE_MODE)
2540 + *mode |= ACL_EXECUTE;
2541 + if (!MASK_EQUAL(perm, ignore|mask_from_posix(*mode, flags)))
2546 +struct ace_container {
2547 + struct nfs4_ace *ace;
2548 + struct list_head ace_l;
2551 +static short ace2type(struct nfs4_ace *);
2552 +static int _posix_to_nfsv4_one(struct posix_acl *, struct nfs4_acl *, unsigned int);
2553 +static struct posix_acl *_nfsv4_to_posix_one(struct nfs4_acl *, unsigned int);
2554 +int nfs4_acl_add_ace(struct nfs4_acl *, u32, u32, u32, int, uid_t);
2555 +int nfs4_acl_split(struct nfs4_acl *, struct nfs4_acl *);
2558 +nfs4_acl_posix_to_nfsv4(struct posix_acl *pacl, struct posix_acl *dpacl,
2559 + unsigned int flags)
2561 + struct nfs4_acl *acl;
2562 + int error = -EINVAL;
2564 + if ((pacl != NULL &&
2565 + (posix_acl_valid(pacl) < 0 || pacl->a_count == 0)) ||
2567 + (posix_acl_valid(dpacl) < 0 || dpacl->a_count == 0)))
2570 + acl = nfs4_acl_new();
2571 + if (acl == NULL) {
2576 + if (pacl != NULL) {
2577 + error = _posix_to_nfsv4_one(pacl, acl,
2578 + flags & ~NFS4_ACL_TYPE_DEFAULT);
2583 + if (dpacl != NULL) {
2584 + error = _posix_to_nfsv4_one(dpacl, acl,
2585 + flags | NFS4_ACL_TYPE_DEFAULT);
2593 + nfs4_acl_free(acl);
2595 + acl = ERR_PTR(error);
2601 +nfs4_acl_add_pair(struct nfs4_acl *acl, int eflag, u32 mask, int whotype,
2602 + uid_t owner, unsigned int flags)
2606 + error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE,
2607 + eflag, mask, whotype, owner);
2610 + error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_DENIED_ACE_TYPE,
2611 + eflag, deny_mask(mask, flags), whotype, owner);
2615 +/* We assume the acl has been verified with posix_acl_valid. */
2617 +_posix_to_nfsv4_one(struct posix_acl *pacl, struct nfs4_acl *acl,
2618 + unsigned int flags)
2620 + struct posix_acl_entry *pa, *pe, *group_owner_entry;
2621 + int error = -EINVAL;
2622 + u32 mask, mask_mask;
2623 + int eflag = ((flags & NFS4_ACL_TYPE_DEFAULT) ?
2624 + NFS4_INHERITANCE_FLAGS : 0);
2626 + BUG_ON(pacl->a_count < 3);
2627 + pe = pacl->a_entries + pacl->a_count;
2628 + pa = pe - 2; /* if mask entry exists, it's second from the last. */
2629 + if (pa->e_tag == ACL_MASK)
2630 + mask_mask = deny_mask(mask_from_posix(pa->e_perm, flags), flags);
2634 + pa = pacl->a_entries;
2635 + BUG_ON(pa->e_tag != ACL_USER_OBJ);
2636 + mask = mask_from_posix(pa->e_perm, flags | NFS4_ACL_OWNER);
2637 + error = nfs4_acl_add_pair(acl, eflag, mask, NFS4_ACL_WHO_OWNER, 0, flags);
2642 + while (pa->e_tag == ACL_USER) {
2643 + mask = mask_from_posix(pa->e_perm, flags);
2644 + error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_DENIED_ACE_TYPE,
2645 + eflag, mask_mask, NFS4_ACL_WHO_NAMED, pa->e_id);
2650 + error = nfs4_acl_add_pair(acl, eflag, mask,
2651 + NFS4_ACL_WHO_NAMED, pa->e_id, flags);
2657 + /* In the case of groups, we apply allow ACEs first, then deny ACEs,
2658 + * since a user can be in more than one group. */
2662 + if (pacl->a_count > 3) {
2663 + BUG_ON(pa->e_tag != ACL_GROUP_OBJ);
2664 + error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_DENIED_ACE_TYPE,
2665 + NFS4_ACE_IDENTIFIER_GROUP | eflag, mask_mask,
2666 + NFS4_ACL_WHO_GROUP, 0);
2670 + group_owner_entry = pa;
2671 + mask = mask_from_posix(pa->e_perm, flags);
2672 + error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE,
2673 + NFS4_ACE_IDENTIFIER_GROUP | eflag, mask,
2674 + NFS4_ACL_WHO_GROUP, 0);
2679 + while (pa->e_tag == ACL_GROUP) {
2680 + mask = mask_from_posix(pa->e_perm, flags);
2681 + error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_DENIED_ACE_TYPE,
2682 + NFS4_ACE_IDENTIFIER_GROUP | eflag, mask_mask,
2683 + NFS4_ACL_WHO_NAMED, pa->e_id);
2687 + error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE,
2688 + NFS4_ACE_IDENTIFIER_GROUP | eflag, mask,
2689 + NFS4_ACL_WHO_NAMED, pa->e_id);
2697 + pa = group_owner_entry;
2698 + mask = mask_from_posix(pa->e_perm, flags);
2699 + error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_DENIED_ACE_TYPE,
2700 + NFS4_ACE_IDENTIFIER_GROUP | eflag,
2701 + deny_mask(mask, flags), NFS4_ACL_WHO_GROUP, 0);
2705 + while (pa->e_tag == ACL_GROUP) {
2706 + mask = mask_from_posix(pa->e_perm, flags);
2707 + error = nfs4_acl_add_ace(acl, NFS4_ACE_ACCESS_DENIED_ACE_TYPE,
2708 + NFS4_ACE_IDENTIFIER_GROUP | eflag,
2709 + deny_mask(mask, flags), NFS4_ACL_WHO_NAMED, pa->e_id);
2715 + if (pa->e_tag == ACL_MASK)
2717 + BUG_ON(pa->e_tag != ACL_OTHER);
2718 + mask = mask_from_posix(pa->e_perm, flags);
2719 + error = nfs4_acl_add_pair(acl, eflag, mask, NFS4_ACL_WHO_EVERYONE, 0, flags);
2726 +sort_pacl_range(struct posix_acl *pacl, int start, int end) {
2727 + int sorted = 0, i;
2728 + struct posix_acl_entry tmp;
2730 + /* We just do a bubble sort; easy to do in place, and we're not
2731 + * expecting acl's to be long enough to justify anything more. */
2734 + for (i = start; i < end; i++) {
2735 + if (pacl->a_entries[i].e_id
2736 + > pacl->a_entries[i+1].e_id) {
2738 + tmp = pacl->a_entries[i];
2739 + pacl->a_entries[i] = pacl->a_entries[i+1];
2740 + pacl->a_entries[i+1] = tmp;
2747 +sort_pacl(struct posix_acl *pacl)
2749 + /* posix_acl_valid requires that users and groups be in order
2753 + if (pacl->a_count <= 4)
2754 + return; /* no users or groups */
2756 + while (pacl->a_entries[i].e_tag == ACL_USER)
2758 + sort_pacl_range(pacl, 1, i-1);
2760 + BUG_ON(pacl->a_entries[i].e_tag != ACL_GROUP_OBJ);
2762 + while (pacl->a_entries[j].e_tag == ACL_GROUP)
2764 + sort_pacl_range(pacl, i, j-1);
2769 +write_pace(struct nfs4_ace *ace, struct posix_acl *pacl,
2770 + struct posix_acl_entry **pace, short tag, unsigned int flags)
2772 + struct posix_acl_entry *this = *pace;
2774 + if (*pace == pacl->a_entries + pacl->a_count)
2775 + return -EINVAL; /* fell off the end */
2777 + this->e_tag = tag;
2778 + if (tag == ACL_USER_OBJ)
2779 + flags |= NFS4_ACL_OWNER;
2780 + if (mode_from_nfs4(ace->access_mask, &this->e_perm, flags))
2782 + this->e_id = (tag == ACL_USER || tag == ACL_GROUP ?
2783 + ace->who : ACL_UNDEFINED_ID);
2787 +static struct nfs4_ace *
2788 +get_next_v4_ace(struct list_head **p, struct list_head *head)
2790 + struct nfs4_ace *ace;
2795 + ace = list_entry(*p, struct nfs4_ace, l_ace);
2801 +nfs4_acl_nfsv4_to_posix(struct nfs4_acl *acl, struct posix_acl **pacl,
2802 + struct posix_acl **dpacl, unsigned int flags)
2804 + struct nfs4_acl *dacl;
2805 + int error = -ENOMEM;
2810 + dacl = nfs4_acl_new();
2814 + error = nfs4_acl_split(acl, dacl);
2818 + if (pacl != NULL) {
2819 + if (acl->naces == 0) {
2824 + *pacl = _nfsv4_to_posix_one(acl, flags);
2825 + if (IS_ERR(*pacl)) {
2826 + error = PTR_ERR(*pacl);
2833 + if (dpacl != NULL) {
2834 + if (dacl->naces == 0) {
2835 + if (pacl == NULL || *pacl == NULL)
2841 + *dpacl = _nfsv4_to_posix_one(dacl, flags);
2842 + if (IS_ERR(*dpacl)) {
2843 + error = PTR_ERR(*dpacl);
2850 + if (error && pacl) {
2851 + posix_acl_release(*pacl);
2854 + nfs4_acl_free(dacl);
2860 +same_who(struct nfs4_ace *a, struct nfs4_ace *b)
2862 + return a->whotype == b->whotype &&
2863 + (a->whotype != NFS4_ACL_WHO_NAMED || a->who == b->who);
2867 +complementary_ace_pair(struct nfs4_ace *allow, struct nfs4_ace *deny,
2868 + unsigned int flags)
2871 + if (!(flags & NFS4_ACL_DIR))
2872 + ignore |= NFS4_ACE_DELETE_CHILD;
2873 + return MASK_EQUAL(ignore|deny_mask(allow->access_mask, flags),
2874 + ignore|deny->access_mask) &&
2875 + allow->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE &&
2876 + deny->type == NFS4_ACE_ACCESS_DENIED_ACE_TYPE &&
2877 + allow->flag == deny->flag &&
2878 + same_who(allow, deny);
2882 +user_obj_from_v4(struct nfs4_acl *n4acl, struct list_head **p,
2883 + struct posix_acl *pacl, struct posix_acl_entry **pace,
2884 + unsigned int flags)
2886 + int error = -EINVAL;
2887 + struct nfs4_ace *ace, *ace2;
2889 + ace = get_next_v4_ace(p, &n4acl->ace_head);
2892 + if (ace2type(ace) != ACL_USER_OBJ)
2894 + error = write_pace(ace, pacl, pace, ACL_USER_OBJ, flags);
2898 + ace2 = get_next_v4_ace(p, &n4acl->ace_head);
2901 + if (!complementary_ace_pair(ace, ace2, flags))
2909 +users_from_v4(struct nfs4_acl *n4acl, struct list_head **p,
2910 + struct nfs4_ace **mask_ace,
2911 + struct posix_acl *pacl, struct posix_acl_entry **pace,
2912 + unsigned int flags)
2914 + int error = -EINVAL;
2915 + struct nfs4_ace *ace, *ace2;
2917 + ace = get_next_v4_ace(p, &n4acl->ace_head);
2920 + while (ace2type(ace) == ACL_USER) {
2921 + if (ace->type != NFS4_ACE_ACCESS_DENIED_ACE_TYPE)
2924 + !MASK_EQUAL(ace->access_mask, (*mask_ace)->access_mask))
2927 + ace = get_next_v4_ace(p, &n4acl->ace_head);
2930 + if (ace->type != NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE)
2932 + error = write_pace(ace, pacl, pace, ACL_USER, flags);
2936 + ace2 = get_next_v4_ace(p, &n4acl->ace_head);
2939 + if (!complementary_ace_pair(ace, ace2, flags))
2941 + if ((*mask_ace)->flag != ace2->flag ||
2942 + !same_who(*mask_ace, ace2))
2944 + ace = get_next_v4_ace(p, &n4acl->ace_head);
2954 +group_obj_and_groups_from_v4(struct nfs4_acl *n4acl, struct list_head **p,
2955 + struct nfs4_ace **mask_ace,
2956 + struct posix_acl *pacl, struct posix_acl_entry **pace,
2957 + unsigned int flags)
2959 + int error = -EINVAL;
2960 + struct nfs4_ace *ace, *ace2;
2961 + struct ace_container *ac;
2962 + struct list_head group_l;
2964 + INIT_LIST_HEAD(&group_l);
2965 + ace = list_entry(*p, struct nfs4_ace, l_ace);
2967 + /* group owner (mask and allow aces) */
2969 + if (pacl->a_count != 3) {
2970 + /* then the group owner should be preceded by mask */
2971 + if (ace->type != NFS4_ACE_ACCESS_DENIED_ACE_TYPE)
2974 + !MASK_EQUAL(ace->access_mask, (*mask_ace)->access_mask))
2977 + ace = get_next_v4_ace(p, &n4acl->ace_head);
2981 + if ((*mask_ace)->flag != ace->flag || !same_who(*mask_ace, ace))
2985 + if (ace2type(ace) != ACL_GROUP_OBJ)
2988 + ac = kmalloc(sizeof(*ac), GFP_KERNEL);
2993 + list_add_tail(&ac->ace_l, &group_l);
2996 + if (ace->type != NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE)
2999 + error = write_pace(ace, pacl, pace, ACL_GROUP_OBJ, flags);
3004 + ace = get_next_v4_ace(p, &n4acl->ace_head);
3008 + /* groups (mask and allow aces) */
3010 + while (ace2type(ace) == ACL_GROUP) {
3011 + if (*mask_ace == NULL)
3014 + if (ace->type != NFS4_ACE_ACCESS_DENIED_ACE_TYPE ||
3015 + !MASK_EQUAL(ace->access_mask, (*mask_ace)->access_mask))
3019 + ace = get_next_v4_ace(p, &n4acl->ace_head);
3022 + ac = kmalloc(sizeof(*ac), GFP_KERNEL);
3027 + if (ace->type != NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE ||
3028 + !same_who(ace, *mask_ace))
3032 + list_add_tail(&ac->ace_l, &group_l);
3034 + error = write_pace(ace, pacl, pace, ACL_GROUP, flags);
3038 + ace = get_next_v4_ace(p, &n4acl->ace_head);
3043 + /* group owner (deny ace) */
3045 + if (ace2type(ace) != ACL_GROUP_OBJ)
3047 + ac = list_entry(group_l.next, struct ace_container, ace_l);
3049 + if (!complementary_ace_pair(ace2, ace, flags))
3051 + list_del(group_l.next);
3054 + /* groups (deny aces) */
3056 + while (!list_empty(&group_l)) {
3057 + ace = get_next_v4_ace(p, &n4acl->ace_head);
3060 + if (ace2type(ace) != ACL_GROUP)
3062 + ac = list_entry(group_l.next, struct ace_container, ace_l);
3064 + if (!complementary_ace_pair(ace2, ace, flags))
3066 + list_del(group_l.next);
3070 + ace = get_next_v4_ace(p, &n4acl->ace_head);
3073 + if (ace2type(ace) != ACL_OTHER)
3077 + while (!list_empty(&group_l)) {
3078 + ac = list_entry(group_l.next, struct ace_container, ace_l);
3079 + list_del(group_l.next);
3086 +mask_from_v4(struct nfs4_acl *n4acl, struct list_head **p,
3087 + struct nfs4_ace **mask_ace,
3088 + struct posix_acl *pacl, struct posix_acl_entry **pace,
3089 + unsigned int flags)
3091 + int error = -EINVAL;
3092 + struct nfs4_ace *ace;
3094 + ace = list_entry(*p, struct nfs4_ace, l_ace);
3095 + if (pacl->a_count != 3) {
3096 + if (*mask_ace == NULL)
3098 + (*mask_ace)->access_mask = deny_mask((*mask_ace)->access_mask, flags);
3099 + write_pace(*mask_ace, pacl, pace, ACL_MASK, flags);
3107 +other_from_v4(struct nfs4_acl *n4acl, struct list_head **p,
3108 + struct posix_acl *pacl, struct posix_acl_entry **pace,
3109 + unsigned int flags)
3111 + int error = -EINVAL;
3112 + struct nfs4_ace *ace, *ace2;
3114 + ace = list_entry(*p, struct nfs4_ace, l_ace);
3115 + if (ace->type != NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE)
3117 + error = write_pace(ace, pacl, pace, ACL_OTHER, flags);
3121 + ace2 = get_next_v4_ace(p, &n4acl->ace_head);
3124 + if (!complementary_ace_pair(ace, ace2, flags))
3132 +calculate_posix_ace_count(struct nfs4_acl *n4acl)
3134 + if (n4acl->naces == 6) /* owner, owner group, and other only */
3136 + else { /* Otherwise there must be a mask entry. */
3137 + /* Also, the remaining entries are for named users and
3138 + * groups, and come in threes (mask, allow, deny): */
3139 + if (n4acl->naces < 7)
3141 + if ((n4acl->naces - 7) % 3)
3143 + return 4 + (n4acl->naces - 7)/3;
3148 +static struct posix_acl *
3149 +_nfsv4_to_posix_one(struct nfs4_acl *n4acl, unsigned int flags)
3151 + struct posix_acl *pacl;
3152 + int error = -EINVAL, nace = 0;
3153 + struct list_head *p;
3154 + struct nfs4_ace *mask_ace = NULL;
3155 + struct posix_acl_entry *pace;
3157 + nace = calculate_posix_ace_count(n4acl);
3161 + pacl = posix_acl_alloc(nace, GFP_KERNEL);
3166 + pace = &pacl->a_entries[0];
3167 + p = &n4acl->ace_head;
3169 + error = user_obj_from_v4(n4acl, &p, pacl, &pace, flags);
3173 + error = users_from_v4(n4acl, &p, &mask_ace, pacl, &pace, flags);
3177 + error = group_obj_and_groups_from_v4(n4acl, &p, &mask_ace, pacl, &pace,
3182 + error = mask_from_v4(n4acl, &p, &mask_ace, pacl, &pace, flags);
3185 + error = other_from_v4(n4acl, &p, pacl, &pace, flags);
3190 + if (p->next != &n4acl->ace_head)
3192 + if (pace != pacl->a_entries + pacl->a_count)
3199 + posix_acl_release(pacl);
3201 + pacl = ERR_PTR(error);
3206 +nfs4_acl_split(struct nfs4_acl *acl, struct nfs4_acl *dacl)
3208 + struct list_head *h, *n;
3209 + struct nfs4_ace *ace;
3212 + list_for_each_safe(h, n, &acl->ace_head) {
3213 + ace = list_entry(h, struct nfs4_ace, l_ace);
3215 + if ((ace->flag & NFS4_INHERITANCE_FLAGS)
3216 + != NFS4_INHERITANCE_FLAGS)
3219 + error = nfs4_acl_add_ace(dacl, ace->type, ace->flag,
3220 + ace->access_mask, ace->whotype, ace->who) == -1;
3234 +ace2type(struct nfs4_ace *ace)
3236 + switch (ace->whotype) {
3237 + case NFS4_ACL_WHO_NAMED:
3238 + return (ace->flag & NFS4_ACE_IDENTIFIER_GROUP ?
3239 + ACL_GROUP : ACL_USER);
3240 + case NFS4_ACL_WHO_OWNER:
3241 + return ACL_USER_OBJ;
3242 + case NFS4_ACL_WHO_GROUP:
3243 + return ACL_GROUP_OBJ;
3244 + case NFS4_ACL_WHO_EVERYONE:
3251 +EXPORT_SYMBOL(nfs4_acl_posix_to_nfsv4);
3252 +EXPORT_SYMBOL(nfs4_acl_nfsv4_to_posix);
3257 + struct nfs4_acl *acl;
3259 + if ((acl = kmalloc(sizeof(*acl), GFP_KERNEL)) == NULL)
3263 + INIT_LIST_HEAD(&acl->ace_head);
3269 +nfs4_acl_free(struct nfs4_acl *acl)
3271 + struct list_head *h;
3272 + struct nfs4_ace *ace;
3277 + while (!list_empty(&acl->ace_head)) {
3278 + h = acl->ace_head.next;
3280 + ace = list_entry(h, struct nfs4_ace, l_ace);
3290 +nfs4_acl_add_ace(struct nfs4_acl *acl, u32 type, u32 flag, u32 access_mask,
3291 + int whotype, uid_t who)
3293 + struct nfs4_ace *ace;
3295 + if ((ace = kmalloc(sizeof(*ace), GFP_KERNEL)) == NULL)
3300 + ace->access_mask = access_mask;
3301 + ace->whotype = whotype;
3304 + list_add_tail(&ace->l_ace, &acl->ace_head);
3316 + .string = "OWNER@",
3317 + .stringlen = sizeof("OWNER@") - 1,
3318 + .type = NFS4_ACL_WHO_OWNER,
3321 + .string = "GROUP@",
3322 + .stringlen = sizeof("GROUP@") - 1,
3323 + .type = NFS4_ACL_WHO_GROUP,
3326 + .string = "EVERYONE@",
3327 + .stringlen = sizeof("EVERYONE@") - 1,
3328 + .type = NFS4_ACL_WHO_EVERYONE,
3333 +nfs4_acl_get_whotype(char *p, u32 len)
3337 + for (i=0; i < sizeof(s2t_map) / sizeof(*s2t_map); i++) {
3338 + if (s2t_map[i].stringlen == len &&
3339 + 0 == memcmp(s2t_map[i].string, p, len))
3340 + return s2t_map[i].type;
3342 + return NFS4_ACL_WHO_NAMED;
3346 +nfs4_acl_write_who(int who, char *p)
3350 + for (i=0; i < sizeof(s2t_map) / sizeof(*s2t_map); i++) {
3351 + if (s2t_map[i].type == who) {
3352 + memcpy(p, s2t_map[i].string, s2t_map[i].stringlen);
3353 + return s2t_map[i].stringlen;
3361 +match_who(struct nfs4_ace *ace, uid_t owner, gid_t group, uid_t who)
3363 + switch (ace->whotype) {
3364 + case NFS4_ACL_WHO_NAMED:
3365 + return who == ace->who;
3366 + case NFS4_ACL_WHO_OWNER:
3367 + return who == owner;
3368 + case NFS4_ACL_WHO_GROUP:
3369 + return who == group;
3370 + case NFS4_ACL_WHO_EVERYONE:
3377 +/* 0 = granted, -EACCES = denied; mask is an nfsv4 mask, not mode bits */
3379 +nfs4_acl_permission(struct nfs4_acl *acl, uid_t owner, gid_t group,
3380 + uid_t who, u32 mask)
3382 + struct nfs4_ace *ace;
3385 + list_for_each_entry(ace, &acl->ace_head, l_ace) {
3386 + if (!match_who(ace, group, owner, who))
3388 + switch (ace->type) {
3389 + case NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE:
3390 + allowed |= ace->access_mask;
3391 + if ((allowed & mask) == mask)
3394 + case NFS4_ACE_ACCESS_DENIED_ACE_TYPE:
3395 + if (ace->access_mask & mask)
3403 +EXPORT_SYMBOL(nfs4_acl_new);
3404 +EXPORT_SYMBOL(nfs4_acl_free);
3405 +EXPORT_SYMBOL(nfs4_acl_add_ace);
3406 +EXPORT_SYMBOL(nfs4_acl_get_whotype);
3407 +EXPORT_SYMBOL(nfs4_acl_write_who);
3408 +EXPORT_SYMBOL(nfs4_acl_permission);
3409 --- linux-2.6.7/fs/nfsd/Makefile.lsec 2004-06-15 23:19:13.000000000 -0600
3410 +++ linux-2.6.7/fs/nfsd/Makefile 2005-03-23 14:28:24.461331008 -0700
3411 @@ -7,5 +7,6 @@ obj-$(CONFIG_NFSD) += nfsd.o
3412 nfsd-y := nfssvc.o nfsctl.o nfsproc.o nfsfh.o vfs.o \
3413 export.o auth.o lockd.o nfscache.o nfsxdr.o stats.o
3414 nfsd-$(CONFIG_NFSD_V3) += nfs3proc.o nfs3xdr.o
3415 -nfsd-$(CONFIG_NFSD_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4idmap.o
3416 +nfsd-$(CONFIG_NFSD_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4idmap.o \
3417 + nfs4acl.o nfs4callback.o
3418 nfsd-objs := $(nfsd-y)
3419 --- linux-2.6.7/fs/nfsd/nfsctl.c.lsec 2004-06-15 23:19:01.000000000 -0600
3420 +++ linux-2.6.7/fs/nfsd/nfsctl.c 2005-03-23 14:28:24.132381016 -0700
3422 #include <asm/uaccess.h>
3425 - * We have a single directory with 8 nodes in it.
3426 + * We have a single directory with 9 nodes in it.
3430 @@ -50,6 +50,7 @@ enum {
3438 @@ -64,6 +65,7 @@ static ssize_t write_getfd(struct file *
3439 static ssize_t write_getfs(struct file *file, char *buf, size_t size);
3440 static ssize_t write_filehandle(struct file *file, char *buf, size_t size);
3441 static ssize_t write_threads(struct file *file, char *buf, size_t size);
3442 +static ssize_t write_leasetime(struct file *file, char *buf, size_t size);
3444 static ssize_t (*write_op[])(struct file *, char *, size_t) = {
3445 [NFSD_Svc] = write_svc,
3446 @@ -75,6 +77,7 @@ static ssize_t (*write_op[])(struct file
3447 [NFSD_Getfs] = write_getfs,
3448 [NFSD_Fh] = write_filehandle,
3449 [NFSD_Threads] = write_threads,
3450 + [NFSD_Leasetime] = write_leasetime,
3453 /* an argresp is stored in an allocated page and holds the
3454 @@ -393,6 +396,29 @@ static ssize_t write_threads(struct file
3458 +extern time_t nfs4_leasetime(void);
3460 +static ssize_t write_leasetime(struct file *file, char *buf, size_t size)
3462 + /* if size > 10 seconds, call
3463 + * nfs4_reset_lease() then write out the new lease (seconds) as reply
3470 + rv = get_int(&mesg, &lease);
3473 + if (lease < 10 || lease > 3600)
3475 + nfs4_reset_lease(lease);
3477 + sprintf(buf, "%ld\n", nfs4_lease_time());
3478 + return strlen(buf);
3481 /*----------------------------------------------------------------------------*/
3483 * populating the filesystem.
3484 @@ -411,6 +437,7 @@ static int nfsd_fill_super(struct super_
3485 [NFSD_List] = {"exports", &exports_operations, S_IRUGO},
3486 [NFSD_Fh] = {"filehandle", &transaction_ops, S_IWUSR|S_IRUSR},
3487 [NFSD_Threads] = {"threads", &transaction_ops, S_IWUSR|S_IRUSR},
3488 + [NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
3491 return simple_fill_super(sb, 0x6e667364, nfsd_files);
3492 --- linux-2.6.7/fs/nfs/callback_proc.c.lsec 2005-03-23 14:28:22.485631360 -0700
3493 +++ linux-2.6.7/fs/nfs/callback_proc.c 2005-03-23 14:28:22.485631360 -0700
3496 + * linux/fs/nfs/callback_proc.c
3498 + * Copyright (C) 2004 Trond Myklebust
3500 + * NFSv4 callback procedures
3502 +#include <linux/config.h>
3503 +#include <linux/nfs4.h>
3504 +#include <linux/nfs_fs.h>
3505 +#include "callback.h"
3506 +#include "delegation.h"
3508 +#define NFSDBG_FACILITY NFSDBG_CALLBACK
3510 +unsigned nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res)
3512 + struct nfs4_client *clp;
3513 + struct nfs_delegation *delegation;
3514 + struct nfs_inode *nfsi;
3515 + struct inode *inode;
3517 + res->bitmap[0] = res->bitmap[1] = 0;
3518 + res->status = htonl(NFS4ERR_BADHANDLE);
3519 + clp = nfs4_find_client(&args->addr->sin_addr);
3522 + inode = nfs_delegation_find_inode(clp, &args->fh);
3523 + if (inode == NULL)
3524 + goto out_putclient;
3525 + nfsi = NFS_I(inode);
3526 + down_read(&nfsi->rwsem);
3527 + delegation = nfsi->delegation;
3528 + if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
3530 + res->size = i_size_read(inode);
3531 + res->change_attr = NFS_CHANGE_ATTR(inode);
3532 + res->ctime = inode->i_ctime;
3533 + res->mtime = inode->i_mtime;
3534 + res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
3536 + res->bitmap[1] = (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY) &
3540 + up_read(&nfsi->rwsem);
3543 + nfs4_put_client(clp);
3545 + dprintk("%s: exit with status = %d\n", __FUNCTION__, ntohl(res->status));
3546 + return res->status;
3549 +unsigned nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
3551 + struct nfs4_client *clp;
3552 + struct inode *inode;
3555 + res = htonl(NFS4ERR_BADHANDLE);
3556 + clp = nfs4_find_client(&args->addr->sin_addr);
3559 + inode = nfs_delegation_find_inode(clp, &args->fh);
3560 + if (inode == NULL)
3561 + goto out_putclient;
3562 + /* Set up a helper thread to actually return the delegation */
3563 + switch(nfs_async_inode_return_delegation(inode, &args->stateid)) {
3568 + res = htonl(NFS4ERR_BAD_STATEID);
3571 + res = htonl(NFS4ERR_RESOURCE);
3575 + nfs4_put_client(clp);
3577 + dprintk("%s: exit with status = %d\n", __FUNCTION__, ntohl(res));
3580 --- linux-2.6.7/fs/nfs/delegation.c.lsec 2005-03-23 14:28:22.546622088 -0700
3581 +++ linux-2.6.7/fs/nfs/delegation.c 2005-03-23 14:28:22.545622240 -0700
3584 + * linux/fs/nfs/delegation.c
3586 + * Copyright (C) 2004 Trond Myklebust
3588 + * NFS file delegation management
3591 +#include <linux/config.h>
3592 +#include <linux/completion.h>
3593 +#include <linux/module.h>
3594 +#include <linux/sched.h>
3595 +#include <linux/spinlock.h>
3597 +#include <linux/nfs4.h>
3598 +#include <linux/nfs_fs.h>
3599 +#include <linux/nfs_xdr.h>
3601 +#include "delegation.h"
3603 +static struct nfs_delegation *nfs_alloc_delegation(void)
3605 + return (struct nfs_delegation *)kmalloc(sizeof(struct nfs_delegation), GFP_KERNEL);
3608 +static void nfs_free_delegation(struct nfs_delegation *delegation)
3610 + if (delegation->cred)
3611 + put_rpccred(delegation->cred);
3612 + kfree(delegation);
3615 +static void nfs_delegation_claim_opens(struct inode *inode)
3617 + struct nfs_inode *nfsi = NFS_I(inode);
3618 + struct nfs_open_context *ctx;
3619 + struct nfs4_state *state;
3622 + spin_lock(&inode->i_lock);
3623 + list_for_each_entry(ctx, &nfsi->open_files, list) {
3624 + state = ctx->state;
3625 + if (state == NULL)
3627 + if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
3629 + get_nfs_open_context(ctx);
3630 + spin_unlock(&inode->i_lock);
3631 + if (nfs4_open_delegation_recall(ctx->dentry, state) < 0)
3633 + put_nfs_open_context(ctx);
3636 + spin_unlock(&inode->i_lock);
3640 + * Set up a delegation on an inode
3642 +void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
3644 + struct nfs_delegation *delegation = NFS_I(inode)->delegation;
3646 + if (delegation == NULL)
3648 + memcpy(delegation->stateid.data, res->delegation.data,
3649 + sizeof(delegation->stateid.data));
3650 + delegation->type = res->delegation_type;
3651 + delegation->maxsize = res->maxsize;
3652 + put_rpccred(cred);
3653 + delegation->cred = get_rpccred(cred);
3654 + delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
3655 + NFS_I(inode)->delegation_state = delegation->type;
3660 + * Set up a delegation on an inode
3662 +int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
3664 + struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
3665 + struct nfs_inode *nfsi = NFS_I(inode);
3666 + struct nfs_delegation *delegation;
3669 + delegation = nfs_alloc_delegation();
3670 + if (delegation == NULL)
3672 + memcpy(delegation->stateid.data, res->delegation.data,
3673 + sizeof(delegation->stateid.data));
3674 + delegation->type = res->delegation_type;
3675 + delegation->maxsize = res->maxsize;
3676 + delegation->cred = get_rpccred(cred);
3677 + delegation->inode = inode;
3679 + spin_lock(&clp->cl_lock);
3680 + if (nfsi->delegation == NULL) {
3681 + list_add(&delegation->super_list, &clp->cl_delegations);
3682 + nfsi->delegation = delegation;
3683 + nfsi->delegation_state = delegation->type;
3684 + delegation = NULL;
3686 + if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
3687 + sizeof(delegation->stateid)) != 0 ||
3688 + delegation->type != nfsi->delegation->type) {
3689 + printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
3690 + __FUNCTION__, NIPQUAD(clp->cl_addr));
3694 + spin_unlock(&clp->cl_lock);
3695 + if (delegation != NULL)
3696 + kfree(delegation);
3700 +static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
3704 + __nfs_revalidate_inode(NFS_SERVER(inode), inode);
3706 + res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
3707 + nfs_free_delegation(delegation);
3711 +/* Sync all data to disk upon delegation return */
3712 +static void nfs_msync_inode(struct inode *inode)
3714 + down(&inode->i_sem);
3715 + filemap_fdatawrite(inode->i_mapping);
3716 + nfs_wb_all(inode);
3717 + filemap_fdatawait(inode->i_mapping);
3718 + up(&inode->i_sem);
3722 + * Basic procedure for returning a delegation to the server
3724 +int nfs_inode_return_delegation(struct inode *inode)
3726 + struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
3727 + struct nfs_inode *nfsi = NFS_I(inode);
3728 + struct nfs_delegation *delegation;
3731 + nfs_msync_inode(inode);
3732 + down_read(&clp->cl_sem);
3733 + /* Guard against new delegated open calls */
3734 + down_write(&nfsi->rwsem);
3735 + spin_lock(&clp->cl_lock);
3736 + delegation = nfsi->delegation;
3737 + if (delegation != NULL) {
3738 + list_del_init(&delegation->super_list);
3739 + nfsi->delegation = NULL;
3740 + nfsi->delegation_state = 0;
3742 + spin_unlock(&clp->cl_lock);
3743 + nfs_delegation_claim_opens(inode);
3744 + up_write(&nfsi->rwsem);
3745 + up_read(&clp->cl_sem);
3746 + nfs_msync_inode(inode);
3748 + if (delegation != NULL)
3749 + res = nfs_do_return_delegation(inode, delegation);
3754 + * Return all delegations associated to a super block
3756 +void nfs_return_all_delegations(struct super_block *sb)
3758 + struct nfs4_client *clp = NFS_SB(sb)->nfs4_state;
3759 + struct nfs_delegation *delegation;
3760 + struct inode *inode;
3765 + spin_lock(&clp->cl_lock);
3766 + list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
3767 + if (delegation->inode->i_sb != sb)
3769 + inode = igrab(delegation->inode);
3770 + if (inode == NULL)
3772 + spin_unlock(&clp->cl_lock);
3773 + nfs_inode_return_delegation(inode);
3777 + spin_unlock(&clp->cl_lock);
3780 +struct recall_threadargs {
3781 + struct inode *inode;
3782 + struct nfs4_client *clp;
3783 + const nfs4_stateid *stateid;
3785 + struct completion started;
3789 +static int recall_thread(void *data)
3791 + struct recall_threadargs *args = (struct recall_threadargs *)data;
3792 + struct inode *inode = igrab(args->inode);
3793 + struct nfs4_client *clp = NFS_SERVER(inode)->nfs4_state;
3794 + struct nfs_inode *nfsi = NFS_I(inode);
3795 + struct nfs_delegation *delegation;
3797 + daemonize("nfsv4-delegreturn");
3799 + nfs_msync_inode(inode);
3800 + down_read(&clp->cl_sem);
3801 + down_write(&nfsi->rwsem);
3802 + spin_lock(&clp->cl_lock);
3803 + delegation = nfsi->delegation;
3804 + if (delegation != NULL && memcmp(delegation->stateid.data,
3805 + args->stateid->data,
3806 + sizeof(delegation->stateid.data)) == 0) {
3807 + list_del_init(&delegation->super_list);
3808 + nfsi->delegation = NULL;
3809 + nfsi->delegation_state = 0;
3812 + delegation = NULL;
3813 + args->result = -ENOENT;
3815 + spin_unlock(&clp->cl_lock);
3816 + complete(&args->started);
3817 + nfs_delegation_claim_opens(inode);
3818 + up_write(&nfsi->rwsem);
3819 + up_read(&clp->cl_sem);
3820 + nfs_msync_inode(inode);
3822 + if (delegation != NULL)
3823 + nfs_do_return_delegation(inode, delegation);
3825 + module_put_and_exit(0);
3829 + * Asynchronous delegation recall!
3831 +int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
3833 + struct recall_threadargs data = {
3835 + .stateid = stateid,
3839 + init_completion(&data.started);
3840 + __module_get(THIS_MODULE);
3841 + status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
3843 + goto out_module_put;
3844 + wait_for_completion(&data.started);
3845 + return data.result;
3847 + module_put(THIS_MODULE);
3852 + * Retrieve the inode associated with a delegation
3854 +struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle)
3856 + struct nfs_delegation *delegation;
3857 + struct inode *res = NULL;
3858 + spin_lock(&clp->cl_lock);
3859 + list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
3860 + if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
3861 + res = igrab(delegation->inode);
3865 + spin_unlock(&clp->cl_lock);
3870 + * Mark all delegations as needing to be reclaimed
3872 +void nfs_delegation_mark_reclaim(struct nfs4_client *clp)
3874 + struct nfs_delegation *delegation;
3875 + spin_lock(&clp->cl_lock);
3876 + list_for_each_entry(delegation, &clp->cl_delegations, super_list)
3877 + delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
3878 + spin_unlock(&clp->cl_lock);
3882 + * Reap all unclaimed delegations after reboot recovery is done
3884 +void nfs_delegation_reap_unclaimed(struct nfs4_client *clp)
3886 + struct nfs_delegation *delegation, *n;
3888 + spin_lock(&clp->cl_lock);
3889 + list_for_each_entry_safe(delegation, n, &clp->cl_delegations, super_list) {
3890 + if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
3892 + list_move(&delegation->super_list, &head);
3893 + NFS_I(delegation->inode)->delegation = NULL;
3894 + NFS_I(delegation->inode)->delegation_state = 0;
3896 + spin_unlock(&clp->cl_lock);
3897 + while(!list_empty(&head)) {
3898 + delegation = list_entry(head.next, struct nfs_delegation, super_list);
3899 + list_del(&delegation->super_list);
3900 + nfs_free_delegation(delegation);
3903 --- linux-2.6.7/fs/nfs/delegation.h.lsec 2005-03-23 14:28:22.546622088 -0700
3904 +++ linux-2.6.7/fs/nfs/delegation.h 2005-03-23 14:28:22.546622088 -0700
3907 + * linux/fs/nfs/delegation.h
3909 + * Copyright (c) Trond Myklebust
3911 + * Definitions pertaining to NFS delegated files
3913 +#ifndef FS_NFS_DELEGATION_H
3914 +#define FS_NFS_DELEGATION_H
3916 +#if defined(CONFIG_NFS_V4)
3918 + * NFSv4 delegation
3920 +struct nfs_delegation {
3921 + struct list_head super_list;
3922 + struct rpc_cred *cred;
3923 + struct inode *inode;
3924 + nfs4_stateid stateid;
3926 +#define NFS_DELEGATION_NEED_RECLAIM 1
3931 +int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
3932 +void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
3933 +int nfs_inode_return_delegation(struct inode *inode);
3934 +int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
3936 +struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle);
3937 +void nfs_return_all_delegations(struct super_block *sb);
3939 +void nfs_delegation_mark_reclaim(struct nfs4_client *clp);
3940 +void nfs_delegation_reap_unclaimed(struct nfs4_client *clp);
3942 +/* NFSv4 delegation-related procedures */
3943 +int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid);
3944 +int nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state);
3946 +static inline int nfs_have_delegation(struct inode *inode, int flags)
3948 + flags &= FMODE_READ|FMODE_WRITE;
3950 + if ((NFS_I(inode)->delegation_state & flags) == flags)
3955 +static inline int nfs_have_delegation(struct inode *inode, int flags)
3962 --- linux-2.6.7/fs/nfs/nfs3proc.c.lsec 2004-06-15 23:19:23.000000000 -0600
3963 +++ linux-2.6.7/fs/nfs/nfs3proc.c 2005-03-23 14:28:22.820580440 -0700
3964 @@ -68,18 +68,6 @@ nfs3_async_handle_jukebox(struct rpc_tas
3968 -static struct rpc_cred *
3969 -nfs_cred(struct inode *inode, struct file *filp)
3971 - struct rpc_cred *cred = NULL;
3974 - cred = (struct rpc_cred *)filp->private_data;
3976 - cred = NFS_I(inode)->mm_cred;
3981 * Bare-bones access to getattr: this is for nfs_read_super.
3983 @@ -164,8 +152,7 @@ nfs3_proc_lookup(struct inode *dir, stru
3988 -nfs3_proc_access(struct inode *inode, struct rpc_cred *cred, int mode)
3989 +static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
3991 struct nfs_fattr fattr;
3992 struct nfs3_accessargs arg = {
3993 @@ -178,9 +165,10 @@ nfs3_proc_access(struct inode *inode, st
3994 .rpc_proc = &nfs3_procedures[NFS3PROC_ACCESS],
3998 + .rpc_cred = entry->cred
4001 + int mode = entry->mask;
4004 dprintk("NFS call access\n");
4006 @@ -200,10 +188,16 @@ nfs3_proc_access(struct inode *inode, st
4008 status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
4009 nfs_refresh_inode(inode, &fattr);
4010 - dprintk("NFS reply access\n");
4012 - if (status == 0 && (arg.access & res.access) != arg.access)
4014 + if (status == 0) {
4016 + if (res.access & NFS3_ACCESS_READ)
4017 + entry->mask |= MAY_READ;
4018 + if (res.access & (NFS3_ACCESS_MODIFY | NFS3_ACCESS_EXTEND | NFS3_ACCESS_DELETE))
4019 + entry->mask |= MAY_WRITE;
4020 + if (res.access & (NFS3_ACCESS_LOOKUP|NFS3_ACCESS_EXECUTE))
4021 + entry->mask |= MAY_EXEC;
4023 + dprintk("NFS reply access, status = %d\n", status);
4027 @@ -227,8 +221,7 @@ nfs3_proc_readlink(struct inode *inode,
4032 -nfs3_proc_read(struct nfs_read_data *rdata, struct file *filp)
4033 +static int nfs3_proc_read(struct nfs_read_data *rdata)
4035 int flags = rdata->flags;
4036 struct inode * inode = rdata->inode;
4037 @@ -237,13 +230,13 @@ nfs3_proc_read(struct nfs_read_data *rda
4038 .rpc_proc = &nfs3_procedures[NFS3PROC_READ],
4039 .rpc_argp = &rdata->args,
4040 .rpc_resp = &rdata->res,
4041 + .rpc_cred = rdata->cred,
4045 dprintk("NFS call read %d @ %Ld\n", rdata->args.count,
4046 (long long) rdata->args.offset);
4048 - msg.rpc_cred = nfs_cred(inode, filp);
4049 status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags);
4051 nfs_refresh_inode(inode, fattr);
4052 @@ -251,8 +244,7 @@ nfs3_proc_read(struct nfs_read_data *rda
4057 -nfs3_proc_write(struct nfs_write_data *wdata, struct file *filp)
4058 +static int nfs3_proc_write(struct nfs_write_data *wdata)
4060 int rpcflags = wdata->flags;
4061 struct inode * inode = wdata->inode;
4062 @@ -261,13 +253,13 @@ nfs3_proc_write(struct nfs_write_data *w
4063 .rpc_proc = &nfs3_procedures[NFS3PROC_WRITE],
4064 .rpc_argp = &wdata->args,
4065 .rpc_resp = &wdata->res,
4066 + .rpc_cred = wdata->cred,
4070 dprintk("NFS call write %d @ %Ld\n", wdata->args.count,
4071 (long long) wdata->args.offset);
4073 - msg.rpc_cred = nfs_cred(inode, filp);
4074 status = rpc_call_sync(NFS_CLIENT(inode), &msg, rpcflags);
4076 nfs_refresh_inode(inode, fattr);
4077 @@ -275,8 +267,7 @@ nfs3_proc_write(struct nfs_write_data *w
4078 return status < 0? status : wdata->res.count;
4082 -nfs3_proc_commit(struct nfs_write_data *cdata, struct file *filp)
4083 +static int nfs3_proc_commit(struct nfs_write_data *cdata)
4085 struct inode * inode = cdata->inode;
4086 struct nfs_fattr * fattr = cdata->res.fattr;
4087 @@ -284,13 +275,13 @@ nfs3_proc_commit(struct nfs_write_data *
4088 .rpc_proc = &nfs3_procedures[NFS3PROC_COMMIT],
4089 .rpc_argp = &cdata->args,
4090 .rpc_resp = &cdata->res,
4091 + .rpc_cred = cdata->cred,
4095 dprintk("NFS call commit %d @ %Ld\n", cdata->args.count,
4096 (long long) cdata->args.offset);
4098 - msg.rpc_cred = nfs_cred(inode, filp);
4099 status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
4101 nfs_refresh_inode(inode, fattr);
4102 @@ -534,6 +525,8 @@ nfs3_proc_symlink(struct inode *dir, str
4106 + if (path->len > NFS3_MAXPATHLEN)
4107 + return -ENAMETOOLONG;
4108 dprintk("NFS call symlink %s -> %s\n", name->name, path->name);
4111 @@ -832,27 +825,6 @@ nfs3_proc_commit_setup(struct nfs_write_
4112 rpc_call_setup(task, &msg, 0);
4116 - * Set up the nfspage struct with the right credentials
4119 -nfs3_request_init(struct nfs_page *req, struct file *filp)
4121 - req->wb_cred = get_rpccred(nfs_cred(req->wb_inode, filp));
4125 -nfs3_request_compatible(struct nfs_page *req, struct file *filp, struct page *page)
4127 - if (req->wb_file != filp)
4129 - if (req->wb_page != page)
4131 - if (req->wb_cred != nfs_file_cred(filp))
4137 nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
4139 @@ -863,6 +835,7 @@ struct nfs_rpc_ops nfs_v3_clientops = {
4140 .version = 3, /* protocol version */
4141 .dentry_ops = &nfs_dentry_operations,
4142 .dir_inode_ops = &nfs_dir_inode_operations,
4143 + .file_inode_ops = &nfs_file_inode_operations,
4144 .getroot = nfs3_proc_get_root,
4145 .getattr = nfs3_proc_getattr,
4146 .setattr = nfs3_proc_setattr,
4147 @@ -892,7 +865,5 @@ struct nfs_rpc_ops nfs_v3_clientops = {
4148 .commit_setup = nfs3_proc_commit_setup,
4149 .file_open = nfs_open,
4150 .file_release = nfs_release,
4151 - .request_init = nfs3_request_init,
4152 - .request_compatible = nfs3_request_compatible,
4153 .lock = nfs3_proc_lock,
4155 --- linux-2.6.7/fs/nfs/proc.c.lsec 2004-06-15 23:20:03.000000000 -0600
4156 +++ linux-2.6.7/fs/nfs/proc.c 2005-03-23 14:28:23.058544264 -0700
4159 extern struct rpc_procinfo nfs_procedures[];
4161 -static struct rpc_cred *
4162 -nfs_cred(struct inode *inode, struct file *filp)
4164 - struct rpc_cred *cred = NULL;
4167 - cred = (struct rpc_cred *)filp->private_data;
4169 - cred = NFS_I(inode)->mm_cred;
4174 * Bare-bones access to getattr: this is for nfs_read_super.
4176 @@ -167,8 +155,7 @@ nfs_proc_readlink(struct inode *inode, s
4181 -nfs_proc_read(struct nfs_read_data *rdata, struct file *filp)
4182 +static int nfs_proc_read(struct nfs_read_data *rdata)
4184 int flags = rdata->flags;
4185 struct inode * inode = rdata->inode;
4186 @@ -177,15 +164,14 @@ nfs_proc_read(struct nfs_read_data *rdat
4187 .rpc_proc = &nfs_procedures[NFSPROC_READ],
4188 .rpc_argp = &rdata->args,
4189 .rpc_resp = &rdata->res,
4190 + .rpc_resp = rdata->cred,
4194 dprintk("NFS call read %d @ %Ld\n", rdata->args.count,
4195 (long long) rdata->args.offset);
4197 - msg.rpc_cred = nfs_cred(inode, filp);
4198 status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags);
4201 nfs_refresh_inode(inode, fattr);
4202 /* Emulate the eof flag, which isn't normally needed in NFSv2
4203 @@ -198,8 +184,7 @@ nfs_proc_read(struct nfs_read_data *rdat
4208 -nfs_proc_write(struct nfs_write_data *wdata, struct file *filp)
4209 +static int nfs_proc_write(struct nfs_write_data *wdata)
4211 int flags = wdata->flags;
4212 struct inode * inode = wdata->inode;
4213 @@ -208,13 +193,13 @@ nfs_proc_write(struct nfs_write_data *wd
4214 .rpc_proc = &nfs_procedures[NFSPROC_WRITE],
4215 .rpc_argp = &wdata->args,
4216 .rpc_resp = &wdata->res,
4217 + .rpc_resp = wdata->cred,
4221 dprintk("NFS call write %d @ %Ld\n", wdata->args.count,
4222 (long long) wdata->args.offset);
4224 - msg.rpc_cred = nfs_cred(inode, filp);
4225 status = rpc_call_sync(NFS_CLIENT(inode), &msg, flags);
4227 nfs_refresh_inode(inode, fattr);
4228 @@ -400,6 +385,8 @@ nfs_proc_symlink(struct inode *dir, stru
4232 + if (path->len > NFS2_MAXPATHLEN)
4233 + return -ENAMETOOLONG;
4234 dprintk("NFS call symlink %s -> %s\n", name->name, path->name);
4236 status = rpc_call(NFS_CLIENT(dir), NFSPROC_SYMLINK, &arg, NULL, 0);
4237 @@ -619,27 +606,6 @@ nfs_proc_commit_setup(struct nfs_write_d
4242 - * Set up the nfspage struct with the right credentials
4245 -nfs_request_init(struct nfs_page *req, struct file *filp)
4247 - req->wb_cred = get_rpccred(nfs_cred(req->wb_inode, filp));
4251 -nfs_request_compatible(struct nfs_page *req, struct file *filp, struct page *page)
4253 - if (req->wb_file != filp)
4255 - if (req->wb_page != page)
4257 - if (req->wb_cred != nfs_file_cred(filp))
4263 nfs_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
4265 @@ -651,6 +617,7 @@ struct nfs_rpc_ops nfs_v2_clientops = {
4266 .version = 2, /* protocol version */
4267 .dentry_ops = &nfs_dentry_operations,
4268 .dir_inode_ops = &nfs_dir_inode_operations,
4269 + .file_inode_ops = &nfs_file_inode_operations,
4270 .getroot = nfs_proc_get_root,
4271 .getattr = nfs_proc_getattr,
4272 .setattr = nfs_proc_setattr,
4273 @@ -680,7 +647,5 @@ struct nfs_rpc_ops nfs_v2_clientops = {
4274 .commit_setup = nfs_proc_commit_setup,
4275 .file_open = nfs_open,
4276 .file_release = nfs_release,
4277 - .request_init = nfs_request_init,
4278 - .request_compatible = nfs_request_compatible,
4279 .lock = nfs_proc_lock,
4281 --- linux-2.6.7/fs/nfs/file.c.lsec 2004-06-15 23:19:37.000000000 -0600
4282 +++ linux-2.6.7/fs/nfs/file.c 2005-03-23 14:28:22.760589560 -0700
4284 #include <asm/uaccess.h>
4285 #include <asm/system.h>
4287 +#include "delegation.h"
4289 #define NFSDBG_FACILITY NFSDBG_FILE
4291 static long nfs_file_fcntl(int fd, unsigned int cmd,
4292 @@ -66,6 +68,19 @@ struct inode_operations nfs_file_inode_o
4293 .setattr = nfs_setattr,
4296 +#ifdef CONFIG_NFS_V4
4298 +struct inode_operations nfs4_file_inode_operations = {
4299 + .permission = nfs_permission,
4300 + .getattr = nfs_getattr,
4301 + .setattr = nfs_setattr,
4302 + .getxattr = nfs_getxattr,
4303 + .setxattr = nfs_setxattr,
4304 + .listxattr = nfs_listxattr,
4307 +#endif /* CONFIG_NFS_V4 */
4309 /* Hack for future NFS swap support */
4311 # define IS_SWAPFILE(inode) (0)
4312 @@ -127,6 +142,7 @@ nfs_file_release(struct inode *inode, st
4314 nfs_file_flush(struct file *file)
4316 + struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
4317 struct inode *inode = file->f_dentry->d_inode;
4320 @@ -138,9 +154,9 @@ nfs_file_flush(struct file *file)
4321 /* Ensure that data+attribute caches are up to date after close() */
4322 status = nfs_wb_all(inode);
4324 - status = file->f_error;
4325 - file->f_error = 0;
4327 + status = ctx->error;
4329 + if (!status && !nfs_have_delegation(inode, FMODE_READ))
4330 __nfs_revalidate_inode(NFS_SERVER(inode), inode);
4333 @@ -211,6 +227,7 @@ nfs_file_mmap(struct file * file, struct
4335 nfs_fsync(struct file *file, struct dentry *dentry, int datasync)
4337 + struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
4338 struct inode *inode = dentry->d_inode;
4341 @@ -219,8 +236,8 @@ nfs_fsync(struct file *file, struct dent
4343 status = nfs_wb_all(inode);
4345 - status = file->f_error;
4346 - file->f_error = 0;
4347 + status = ctx->error;
4352 @@ -302,6 +319,90 @@ out_swapfile:
4356 +static int do_getlk(struct file *filp, int cmd, struct file_lock *fl)
4358 + struct inode *inode = filp->f_mapping->host;
4362 + status = NFS_PROTO(inode)->lock(filp, cmd, fl);
4367 +static int do_unlk(struct file *filp, int cmd, struct file_lock *fl)
4369 + struct inode *inode = filp->f_mapping->host;
4373 + rpc_clnt_sigmask(NFS_CLIENT(inode), &oldset);
4375 + * Flush all pending writes before doing anything
4378 + filemap_fdatawrite(filp->f_mapping);
4379 + down(&inode->i_sem);
4380 + nfs_wb_all(inode);
4381 + up(&inode->i_sem);
4382 + filemap_fdatawait(filp->f_mapping);
4384 + /* NOTE: special case
4385 + * If we're signalled while cleaning up locks on process exit, we
4386 + * still need to complete the unlock.
4389 + status = NFS_PROTO(inode)->lock(filp, cmd, fl);
4390 + rpc_clnt_sigunmask(NFS_CLIENT(inode), &oldset);
4394 +static int do_setlk(struct file *filp, int cmd, struct file_lock *fl)
4396 + struct inode *inode = filp->f_mapping->host;
4400 + * Flush all pending writes before doing anything
4403 + status = filemap_fdatawrite(filp->f_mapping);
4404 + if (status == 0) {
4405 + down(&inode->i_sem);
4406 + status = nfs_wb_all(inode);
4407 + up(&inode->i_sem);
4409 + status = filemap_fdatawait(filp->f_mapping);
4415 + status = NFS_PROTO(inode)->lock(filp, cmd, fl);
4416 + /* If we were signalled we still need to ensure that
4417 + * we clean up any state on the server. We therefore
4418 + * record the lock call as having succeeded in order to
4419 + * ensure that locks_remove_posix() cleans it out when
4420 + * the process exits.
4422 + if (status == -EINTR || status == -ERESTARTSYS)
4423 + posix_lock_file(filp, fl);
4428 + * Make sure we clear the cache whenever we try to get the lock.
4429 + * This makes locking act as a cache coherency point.
4431 + filemap_fdatawrite(filp->f_mapping);
4432 + down(&inode->i_sem);
4433 + nfs_wb_all(inode); /* we may have slept */
4434 + up(&inode->i_sem);
4435 + filemap_fdatawait(filp->f_mapping);
4436 + nfs_zap_caches(inode);
4441 * Lock a (portion of) a file
4443 @@ -309,8 +410,6 @@ int
4444 nfs_lock(struct file *filp, int cmd, struct file_lock *fl)
4446 struct inode * inode = filp->f_mapping->host;
4450 dprintk("NFS: nfs_lock(f=%s/%ld, t=%x, fl=%x, r=%Ld:%Ld)\n",
4451 inode->i_sb->s_id, inode->i_ino,
4452 @@ -328,8 +427,8 @@ nfs_lock(struct file *filp, int cmd, str
4453 /* Fake OK code if mounted without NLM support */
4454 if (NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM) {
4456 - status = LOCK_USE_CLNT;
4458 + return LOCK_USE_CLNT;
4463 @@ -340,45 +439,12 @@ nfs_lock(struct file *filp, int cmd, str
4464 * Not sure whether that would be unique, though, or whether
4465 * that would break in other places.
4467 - if (!fl->fl_owner || !(fl->fl_flags & FL_POSIX))
4468 + if (!(fl->fl_flags & FL_POSIX))
4472 - * Flush all pending writes before doing anything
4475 - status = filemap_fdatawrite(filp->f_mapping);
4476 - down(&inode->i_sem);
4477 - status2 = nfs_wb_all(inode);
4480 - up(&inode->i_sem);
4481 - status2 = filemap_fdatawait(filp->f_mapping);
4488 - status = NFS_PROTO(inode)->lock(filp, cmd, fl);
4496 - * Make sure we clear the cache whenever we try to get the lock.
4497 - * This makes locking act as a cache coherency point.
4500 - if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) {
4501 - filemap_fdatawrite(filp->f_mapping);
4502 - down(&inode->i_sem);
4503 - nfs_wb_all(inode); /* we may have slept */
4504 - up(&inode->i_sem);
4505 - filemap_fdatawait(filp->f_mapping);
4506 - nfs_zap_caches(inode);
4509 + if (IS_GETLK(cmd))
4510 + return do_getlk(filp, cmd, fl);
4511 + if (fl->fl_type == F_UNLCK)
4512 + return do_unlk(filp, cmd, fl);
4513 + return do_setlk(filp, cmd, fl);
4515 --- linux-2.6.7/fs/nfs/write.c.lsec 2004-06-15 23:19:43.000000000 -0600
4516 +++ linux-2.6.7/fs/nfs/write.c 2005-03-23 14:28:23.225518880 -0700
4518 #include <linux/smp_lock.h>
4519 #include <linux/mempool.h>
4521 +#include "delegation.h"
4523 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
4525 #define MIN_POOL_WRITE (32)
4528 * Local function declarations
4530 -static struct nfs_page * nfs_update_request(struct file*, struct inode *,
4531 +static struct nfs_page * nfs_update_request(struct nfs_open_context*,
4534 unsigned int, unsigned int);
4535 static void nfs_writeback_done_partial(struct nfs_write_data *, int);
4536 @@ -173,7 +176,7 @@ static void nfs_mark_uptodate(struct pag
4537 * Write a page synchronously.
4538 * Offset is the data offset within the page.
4540 -static int nfs_writepage_sync(struct file *file, struct inode *inode,
4541 +static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
4542 struct page *page, unsigned int offset, unsigned int count,
4545 @@ -187,9 +190,10 @@ static int nfs_writepage_sync(struct fil
4547 memset(wdata, 0, sizeof(*wdata));
4549 + wdata->cred = ctx->cred;
4550 wdata->inode = inode;
4551 wdata->args.fh = NFS_FH(inode);
4552 - wdata->args.lockowner = current->files;
4553 + wdata->args.context = ctx;
4554 wdata->args.pages = &page;
4555 wdata->args.stable = NFS_FILE_SYNC;
4556 wdata->args.pgbase = offset;
4557 @@ -208,7 +212,7 @@ static int nfs_writepage_sync(struct fil
4558 wdata->args.count = count;
4559 wdata->args.offset = page_offset(page) + wdata->args.pgbase;
4561 - result = NFS_PROTO(inode)->write(wdata, file);
4562 + result = NFS_PROTO(inode)->write(wdata);
4565 /* Must mark the page invalid after I/O error */
4566 @@ -241,13 +245,14 @@ io_error:
4567 return written ? written : result;
4570 -static int nfs_writepage_async(struct file *file, struct inode *inode,
4571 - struct page *page, unsigned int offset, unsigned int count)
4572 +static int nfs_writepage_async(struct nfs_open_context *ctx,
4573 + struct inode *inode, struct page *page,
4574 + unsigned int offset, unsigned int count)
4576 struct nfs_page *req;
4579 - req = nfs_update_request(file, inode, page, offset, count);
4580 + req = nfs_update_request(ctx, inode, page, offset, count);
4581 status = (IS_ERR(req)) ? PTR_ERR(req) : 0;
4584 @@ -274,6 +279,7 @@ static int wb_priority(struct writeback_
4586 int nfs_writepage(struct page *page, struct writeback_control *wbc)
4588 + struct nfs_open_context *ctx;
4589 struct inode *inode = page->mapping->host;
4590 unsigned long end_index;
4591 unsigned offset = PAGE_CACHE_SIZE;
4592 @@ -308,16 +314,21 @@ int nfs_writepage(struct page *page, str
4593 if (page->index >= end_index+1 || !offset)
4596 + ctx = nfs_find_open_context(inode, FMODE_WRITE);
4597 + if (ctx == NULL) {
4602 if (!IS_SYNC(inode) && inode_referenced) {
4603 - err = nfs_writepage_async(NULL, inode, page, 0, offset);
4604 + err = nfs_writepage_async(ctx, inode, page, 0, offset);
4607 if (wbc->for_reclaim)
4608 nfs_flush_inode(inode, 0, 0, FLUSH_STABLE);
4611 - err = nfs_writepage_sync(NULL, inode, page, 0,
4612 + err = nfs_writepage_sync(ctx, inode, page, 0,
4616 @@ -326,6 +337,7 @@ do_it:
4620 + put_nfs_open_context(ctx);
4623 if (inode_referenced)
4624 @@ -374,8 +386,7 @@ out:
4626 * Insert a write request into an inode
4629 -nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
4630 +static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
4632 struct nfs_inode *nfsi = NFS_I(inode);
4634 @@ -387,6 +398,8 @@ nfs_inode_add_request(struct inode *inod
4635 if (!nfsi->npages) {
4637 nfs_begin_data_update(inode);
4638 + if (nfs_have_delegation(inode, FMODE_WRITE))
4639 + nfsi->change_attr++;
4643 @@ -404,7 +417,7 @@ nfs_inode_remove_request(struct nfs_page
4645 BUG_ON (!NFS_WBACK_BUSY(req));
4646 spin_lock(&nfs_wreq_lock);
4647 - inode = req->wb_inode;
4648 + inode = req->wb_context->dentry->d_inode;
4649 nfsi = NFS_I(inode);
4650 radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
4652 @@ -450,7 +463,7 @@ nfs_find_request(struct inode *inode, un
4654 nfs_mark_request_dirty(struct nfs_page *req)
4656 - struct inode *inode = req->wb_inode;
4657 + struct inode *inode = req->wb_context->dentry->d_inode;
4658 struct nfs_inode *nfsi = NFS_I(inode);
4660 spin_lock(&nfs_wreq_lock);
4661 @@ -467,7 +480,7 @@ nfs_mark_request_dirty(struct nfs_page *
4663 nfs_dirty_request(struct nfs_page *req)
4665 - struct nfs_inode *nfsi = NFS_I(req->wb_inode);
4666 + struct nfs_inode *nfsi = NFS_I(req->wb_context->dentry->d_inode);
4667 return !list_empty(&req->wb_list) && req->wb_list_head == &nfsi->dirty;
4670 @@ -478,7 +491,7 @@ nfs_dirty_request(struct nfs_page *req)
4672 nfs_mark_request_commit(struct nfs_page *req)
4674 - struct inode *inode = req->wb_inode;
4675 + struct inode *inode = req->wb_context->dentry->d_inode;
4676 struct nfs_inode *nfsi = NFS_I(inode);
4678 spin_lock(&nfs_wreq_lock);
4679 @@ -619,9 +632,9 @@ static int nfs_wait_on_write_congestion(
4681 * Note: Should always be called with the Page Lock held!
4683 -static struct nfs_page *
4684 -nfs_update_request(struct file* file, struct inode *inode, struct page *page,
4685 - unsigned int offset, unsigned int bytes)
4686 +static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
4687 + struct inode *inode, struct page *page,
4688 + unsigned int offset, unsigned int bytes)
4690 struct nfs_server *server = NFS_SERVER(inode);
4691 struct nfs_page *req, *new = NULL;
4692 @@ -668,13 +681,9 @@ nfs_update_request(struct file* file, st
4694 spin_unlock(&nfs_wreq_lock);
4696 - new = nfs_create_request(file, inode, page, offset, bytes);
4697 + new = nfs_create_request(ctx, inode, page, offset, bytes);
4701 - new->wb_file = file;
4706 /* We have a request for our page.
4707 @@ -684,7 +693,7 @@ nfs_update_request(struct file* file, st
4710 rqend = req->wb_offset + req->wb_bytes;
4711 - if (req->wb_file != file
4712 + if (req->wb_context != ctx
4713 || req->wb_page != page
4714 || !nfs_dirty_request(req)
4715 || offset > rqend || end < req->wb_offset) {
4716 @@ -705,9 +714,9 @@ nfs_update_request(struct file* file, st
4721 -nfs_flush_incompatible(struct file *file, struct page *page)
4722 +int nfs_flush_incompatible(struct file *file, struct page *page)
4724 + struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
4725 struct inode *inode = page->mapping->host;
4726 struct nfs_page *req;
4728 @@ -721,7 +730,7 @@ nfs_flush_incompatible(struct file *file
4730 req = nfs_find_request(inode, page->index);
4732 - if (!NFS_PROTO(inode)->request_compatible(req, file, page))
4733 + if (req->wb_page != page || ctx != req->wb_context)
4734 status = nfs_wb_page(inode, page);
4735 nfs_release_request(req);
4737 @@ -737,6 +746,7 @@ nfs_flush_incompatible(struct file *file
4738 int nfs_updatepage(struct file *file, struct page *page,
4739 unsigned int offset, unsigned int count)
4741 + struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
4742 struct dentry *dentry = file->f_dentry;
4743 struct inode *inode = page->mapping->host;
4744 struct nfs_page *req;
4745 @@ -747,7 +757,7 @@ int nfs_updatepage(struct file *file, st
4746 count, (long long)(page_offset(page) +offset));
4748 if (IS_SYNC(inode)) {
4749 - status = nfs_writepage_sync(file, inode, page, offset, count, 0);
4750 + status = nfs_writepage_sync(ctx, inode, page, offset, count, 0);
4752 if (offset == 0 && status == PAGE_CACHE_SIZE)
4753 SetPageUptodate(page);
4754 @@ -784,7 +794,7 @@ int nfs_updatepage(struct file *file, st
4758 - req = nfs_update_request(file, inode, page, offset, count);
4759 + req = nfs_update_request(ctx, inode, page, offset, count);
4760 status = (IS_ERR(req)) ? PTR_ERR(req) : 0;
4761 if (status != -EBUSY)
4763 @@ -860,16 +870,15 @@ static void nfs_write_rpcsetup(struct nf
4764 * NB: take care not to mess about with data->commit et al. */
4767 - data->inode = inode = req->wb_inode;
4768 - data->cred = req->wb_cred;
4769 + data->inode = inode = req->wb_context->dentry->d_inode;
4770 + data->cred = req->wb_context->cred;
4772 data->args.fh = NFS_FH(inode);
4773 data->args.offset = req_offset(req) + offset;
4774 data->args.pgbase = req->wb_pgbase + offset;
4775 data->args.pages = data->pagevec;
4776 data->args.count = count;
4777 - data->args.lockowner = req->wb_lockowner;
4778 - data->args.state = req->wb_state;
4779 + data->args.context = req->wb_context;
4781 data->res.fattr = &data->fattr;
4782 data->res.count = count;
4783 @@ -1029,7 +1038,7 @@ nfs_flush_list(struct list_head *head, i
4784 while (!list_empty(head)) {
4785 pages += nfs_coalesce_requests(head, &one_request, wpages);
4786 req = nfs_list_entry(one_request.next);
4787 - error = nfs_flush_one(&one_request, req->wb_inode, how);
4788 + error = nfs_flush_one(&one_request, req->wb_context->dentry->d_inode, how);
4792 @@ -1054,16 +1063,15 @@ static void nfs_writeback_done_partial(s
4793 struct page *page = req->wb_page;
4795 dprintk("NFS: write (%s/%Ld %d@%Ld)",
4796 - req->wb_inode->i_sb->s_id,
4797 - (long long)NFS_FILEID(req->wb_inode),
4798 + req->wb_context->dentry->d_inode->i_sb->s_id,
4799 + (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
4801 (long long)req_offset(req));
4804 ClearPageUptodate(page);
4807 - req->wb_file->f_error = status;
4808 + req->wb_context->error = status;
4809 dprintk(", error = %d\n", status);
4811 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
4812 @@ -1104,16 +1112,15 @@ static void nfs_writeback_done_full(stru
4813 page = req->wb_page;
4815 dprintk("NFS: write (%s/%Ld %d@%Ld)",
4816 - req->wb_inode->i_sb->s_id,
4817 - (long long)NFS_FILEID(req->wb_inode),
4818 + req->wb_context->dentry->d_inode->i_sb->s_id,
4819 + (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
4821 (long long)req_offset(req));
4824 ClearPageUptodate(page);
4827 - req->wb_file->f_error = status;
4828 + req->wb_context->error = status;
4829 end_page_writeback(page);
4830 nfs_inode_remove_request(req);
4831 dprintk(", error = %d\n", status);
4832 @@ -1232,7 +1239,7 @@ static void nfs_commit_rpcsetup(struct l
4833 list_splice_init(head, &data->pages);
4834 first = nfs_list_entry(data->pages.next);
4835 last = nfs_list_entry(data->pages.prev);
4836 - inode = first->wb_inode;
4837 + inode = first->wb_context->dentry->d_inode;
4840 * Determine the offset range of requests in the COMMIT call.
4841 @@ -1246,7 +1253,7 @@ static void nfs_commit_rpcsetup(struct l
4844 data->inode = inode;
4845 - data->cred = first->wb_cred;
4846 + data->cred = first->wb_context->cred;
4848 data->args.fh = NFS_FH(data->inode);
4849 data->args.offset = start;
4850 @@ -1313,13 +1320,12 @@ nfs_commit_done(struct rpc_task *task)
4851 nfs_list_remove_request(req);
4853 dprintk("NFS: commit (%s/%Ld %d@%Ld)",
4854 - req->wb_inode->i_sb->s_id,
4855 - (long long)NFS_FILEID(req->wb_inode),
4856 + req->wb_context->dentry->d_inode->i_sb->s_id,
4857 + (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
4859 (long long)req_offset(req));
4860 if (task->tk_status < 0) {
4862 - req->wb_file->f_error = task->tk_status;
4863 + req->wb_context->error = task->tk_status;
4864 nfs_inode_remove_request(req);
4865 dprintk(", error = %d\n", task->tk_status);
4867 --- linux-2.6.7/fs/nfs/nfs4xdr.c.lsec 2004-06-15 23:20:26.000000000 -0600
4868 +++ linux-2.6.7/fs/nfs/nfs4xdr.c 2005-03-23 14:28:23.056544568 -0700
4869 @@ -84,9 +84,13 @@ static int nfs_stat_to_errno(int);
4870 ((3+NFS4_FHSIZE) >> 2))
4871 #define encode_getattr_maxsz (op_encode_hdr_maxsz + 3)
4872 #define nfs4_name_maxsz (1 + ((3 + NFS4_MAXNAMLEN) >> 2))
4873 +#define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2))
4874 #define nfs4_fattr_bitmap_maxsz (36 + 2 * nfs4_name_maxsz)
4875 #define decode_getattr_maxsz (op_decode_hdr_maxsz + 3 + \
4876 nfs4_fattr_bitmap_maxsz)
4877 +#define encode_setattr_maxsz (op_decode_hdr_maxsz + 4 + \
4878 + nfs4_fattr_bitmap_maxsz)
4879 +#define decode_setattr_maxsz (op_decode_hdr_maxsz + 3)
4880 #define encode_savefh_maxsz (op_encode_hdr_maxsz)
4881 #define decode_savefh_maxsz (op_decode_hdr_maxsz)
4882 #define encode_fsinfo_maxsz (op_encode_hdr_maxsz + 2)
4883 @@ -118,10 +122,17 @@ static int nfs_stat_to_errno(int);
4884 #define encode_link_maxsz (op_encode_hdr_maxsz + \
4886 #define decode_link_maxsz (op_decode_hdr_maxsz + 5)
4887 +#define encode_symlink_maxsz (op_encode_hdr_maxsz + \
4888 + 1 + nfs4_name_maxsz + \
4889 + nfs4_path_maxsz + \
4890 + nfs4_fattr_bitmap_maxsz)
4891 +#define decode_symlink_maxsz (op_decode_hdr_maxsz + 8)
4892 #define encode_create_maxsz (op_encode_hdr_maxsz + \
4893 - 2 + 2 * nfs4_name_maxsz + \
4894 + 2 + nfs4_name_maxsz + \
4895 nfs4_fattr_bitmap_maxsz)
4896 #define decode_create_maxsz (op_decode_hdr_maxsz + 8)
4897 +#define encode_delegreturn_maxsz (op_encode_hdr_maxsz + 4)
4898 +#define decode_delegreturn_maxsz (op_decode_hdr_maxsz)
4899 #define NFS4_enc_compound_sz (1024) /* XXX: large enough? */
4900 #define NFS4_dec_compound_sz (1024) /* XXX: large enough? */
4901 #define NFS4_enc_read_sz (compound_encode_hdr_maxsz + \
4902 @@ -172,16 +183,14 @@ static int nfs_stat_to_errno(int);
4903 #define NFS4_dec_open_confirm_sz (compound_decode_hdr_maxsz + \
4904 decode_putfh_maxsz + \
4905 op_decode_hdr_maxsz + 4)
4906 -#define NFS4_enc_open_reclaim_sz (compound_encode_hdr_maxsz + \
4907 +#define NFS4_enc_open_noattr_sz (compound_encode_hdr_maxsz + \
4908 encode_putfh_maxsz + \
4909 op_encode_hdr_maxsz + \
4911 - encode_getattr_maxsz)
4912 -#define NFS4_dec_open_reclaim_sz (compound_decode_hdr_maxsz + \
4914 +#define NFS4_dec_open_noattr_sz (compound_decode_hdr_maxsz + \
4915 decode_putfh_maxsz + \
4916 op_decode_hdr_maxsz + \
4918 - decode_getattr_maxsz)
4920 #define NFS4_enc_open_downgrade_sz \
4921 (compound_encode_hdr_maxsz + \
4922 encode_putfh_maxsz + \
4923 @@ -313,6 +322,16 @@ static int nfs_stat_to_errno(int);
4924 decode_savefh_maxsz + \
4925 decode_putfh_maxsz + \
4927 +#define NFS4_enc_symlink_sz (compound_encode_hdr_maxsz + \
4928 + encode_putfh_maxsz + \
4929 + encode_symlink_maxsz + \
4930 + encode_getattr_maxsz + \
4931 + encode_getfh_maxsz)
4932 +#define NFS4_dec_symlink_sz (compound_decode_hdr_maxsz + \
4933 + decode_putfh_maxsz + \
4934 + decode_symlink_maxsz + \
4935 + decode_getattr_maxsz + \
4936 + decode_getfh_maxsz)
4937 #define NFS4_enc_create_sz (compound_encode_hdr_maxsz + \
4938 encode_putfh_maxsz + \
4939 encode_create_maxsz + \
4940 @@ -339,6 +358,33 @@ static int nfs_stat_to_errno(int);
4941 encode_getattr_maxsz)
4942 #define NFS4_dec_server_caps_sz (compound_decode_hdr_maxsz + \
4943 decode_getattr_maxsz)
4944 +#define NFS4_enc_delegreturn_sz (compound_encode_hdr_maxsz + \
4945 + encode_putfh_maxsz + \
4946 + encode_delegreturn_maxsz)
4947 +#define NFS4_dec_delegreturn_sz (compound_decode_hdr_maxsz + \
4948 + decode_delegreturn_maxsz)
4949 +#define username_maxsz (1 + ((IDMAP_NAMESZ + 3) >> 2))
4950 +/* XXX: fix ACL bounds */
4951 +#define ace_maxsz (3 + username_maxsz)
4952 +#define NFS_ACL_MAX_ENTRIES 32
4953 +#define acl_maxentries ((NFS_ACL_MAX_ENTRIES - 3) * 3 + 6)
4954 +#define acl_maxsz (1 + acl_maxentries * ace_maxsz)
4955 +#define NFS4_enc_getacl_sz compound_encode_hdr_maxsz + \
4956 + encode_putfh_maxsz + \
4957 + encode_getattr_maxsz
4958 +#define username_maxsz (1 + ((IDMAP_NAMESZ + 3) >> 2))
4959 +#define ace_maxsz (3 + username_maxsz)
4960 +#define acl_maxentries ((NFS_ACL_MAX_ENTRIES - 3) * 3 + 6)
4961 +#define acl_maxsz (1 + acl_maxentries * ace_maxsz)
4962 +#define NFS4_dec_getacl_sz (compound_decode_hdr_maxsz + \
4963 + decode_putfh_maxsz + \
4964 + op_decode_hdr_maxsz + 3 + 1 + acl_maxsz)
4965 +#define NFS4_enc_setacl_sz (compound_encode_hdr_maxsz + \
4966 + encode_putfh_maxsz + \
4967 + op_encode_hdr_maxsz + 4 + 1 + acl_maxsz)
4968 +#define NFS4_dec_setacl_sz (compound_decode_hdr_maxsz + \
4969 + decode_putfh_maxsz + \
4970 + decode_setattr_maxsz)
4974 @@ -388,6 +434,15 @@ struct compound_hdr {
4978 +static void encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
4982 + p = xdr_reserve_space(xdr, 4 + len);
4983 + BUG_ON(p == NULL);
4984 + xdr_encode_opaque(p, str, len);
4987 static int encode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr)
4990 @@ -402,6 +457,15 @@ static int encode_compound_hdr(struct xd
4994 +static void encode_nfs4_verifier(struct xdr_stream *xdr, const nfs4_verifier *verf)
4998 + p = xdr_reserve_space(xdr, NFS4_VERIFIER_SIZE);
4999 + BUG_ON(p == NULL);
5000 + xdr_encode_opaque_fixed(p, verf->data, NFS4_VERIFIER_SIZE);
5003 static int encode_attrs(struct xdr_stream *xdr, const struct iattr *iap, const struct nfs_server *server)
5005 char owner_name[IDMAP_NAMESZ];
5006 @@ -420,7 +484,7 @@ static int encode_attrs(struct xdr_strea
5007 * In the worst-case, this would be
5008 * 12(bitmap) + 4(attrlen) + 8(size) + 4(mode) + 4(atime) + 4(mtime)
5009 * = 36 bytes, plus any contribution from variable-length fields
5010 - * such as owner/group/acl's.
5011 + * such as owner/group.
5015 @@ -742,19 +806,12 @@ static int encode_lookup(struct xdr_stre
5019 -static int encode_open(struct xdr_stream *xdr, const struct nfs_openargs *arg)
5020 +static void encode_share_access(struct xdr_stream *xdr, int open_flags)
5026 - * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4,
5027 - * owner 4, opentype 4 = 36
5029 - RESERVE_SPACE(36);
5031 - WRITE32(arg->seqid);
5032 - switch (arg->share_access) {
5034 + switch (open_flags & (FMODE_READ|FMODE_WRITE)) {
5036 WRITE32(NFS4_SHARE_ACCESS_READ);
5038 @@ -767,84 +824,135 @@ static int encode_open(struct xdr_stream
5042 - WRITE32(0); /* for linux, share_deny = 0 always */
5043 + WRITE32(0); /* for linux, share_deny = 0 always */
5046 +static inline void encode_openhdr(struct xdr_stream *xdr, const struct nfs_openargs *arg)
5050 + * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4,
5055 + WRITE32(arg->seqid);
5056 + encode_share_access(xdr, arg->open_flags);
5057 + RESERVE_SPACE(16);
5058 WRITE64(arg->clientid);
5061 - WRITE32(arg->opentype);
5064 - if (arg->opentype == NFS4_OPEN_CREATE) {
5065 - if (arg->createmode == NFS4_CREATE_EXCLUSIVE) {
5066 - RESERVE_SPACE(12);
5067 - WRITE32(arg->createmode);
5068 - WRITEMEM(arg->u.verifier.data, sizeof(arg->u.verifier.data));
5070 - else if (arg->u.attrs) {
5072 - WRITE32(arg->createmode);
5073 - if ((status = encode_attrs(xdr, arg->u.attrs, arg->server)))
5077 - RESERVE_SPACE(12);
5078 - WRITE32(arg->createmode);
5082 +static inline void encode_createmode(struct xdr_stream *xdr, const struct nfs_openargs *arg)
5087 + switch(arg->open_flags & O_EXCL) {
5089 + WRITE32(NFS4_CREATE_UNCHECKED);
5090 + encode_attrs(xdr, arg->u.attrs, arg->server);
5093 + WRITE32(NFS4_CREATE_EXCLUSIVE);
5094 + encode_nfs4_verifier(xdr, &arg->u.verifier);
5098 - RESERVE_SPACE(8 + arg->name->len);
5099 - WRITE32(NFS4_OPEN_CLAIM_NULL);
5100 - WRITE32(arg->name->len);
5101 - WRITEMEM(arg->name->name, arg->name->len);
5102 +static void encode_opentype(struct xdr_stream *xdr, const struct nfs_openargs *arg)
5108 + switch (arg->open_flags & O_CREAT) {
5110 + WRITE32(NFS4_OPEN_NOCREATE);
5113 + BUG_ON(arg->claim != NFS4_OPEN_CLAIM_NULL);
5114 + WRITE32(NFS4_OPEN_CREATE);
5115 + encode_createmode(xdr, arg);
5119 -static int encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_confirmargs *arg)
5120 +static inline void encode_delegation_type(struct xdr_stream *xdr, int delegation_type)
5124 - RESERVE_SPACE(8+sizeof(arg->stateid.data));
5125 - WRITE32(OP_OPEN_CONFIRM);
5126 - WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data));
5127 - WRITE32(arg->seqid);
5129 + switch (delegation_type) {
5131 + WRITE32(NFS4_OPEN_DELEGATE_NONE);
5134 + WRITE32(NFS4_OPEN_DELEGATE_READ);
5136 + case FMODE_WRITE|FMODE_READ:
5137 + WRITE32(NFS4_OPEN_DELEGATE_WRITE);
5145 +static inline void encode_claim_null(struct xdr_stream *xdr, const struct qstr *name)
5150 + WRITE32(NFS4_OPEN_CLAIM_NULL);
5151 + encode_string(xdr, name->len, name->name);
5154 +static inline void encode_claim_previous(struct xdr_stream *xdr, int type)
5159 + WRITE32(NFS4_OPEN_CLAIM_PREVIOUS);
5160 + encode_delegation_type(xdr, type);
5163 -static int encode_open_reclaim(struct xdr_stream *xdr, const struct nfs_open_reclaimargs *arg)
5164 +static inline void encode_claim_delegate_cur(struct xdr_stream *xdr, const struct qstr *name, const nfs4_stateid *stateid)
5169 - * opcode 4, seqid 4, share_access 4, share_deny 4, clientid 8, ownerlen 4,
5170 - * owner 4, opentype 4, claim 4, delegation_type 4 = 44
5172 - RESERVE_SPACE(44);
5174 - WRITE32(arg->seqid);
5175 - switch (arg->share_access) {
5177 - WRITE32(NFS4_SHARE_ACCESS_READ);
5178 + RESERVE_SPACE(4+sizeof(stateid->data));
5179 + WRITE32(NFS4_OPEN_CLAIM_DELEGATE_CUR);
5180 + WRITEMEM(stateid->data, sizeof(stateid->data));
5181 + encode_string(xdr, name->len, name->name);
5184 +static int encode_open(struct xdr_stream *xdr, const struct nfs_openargs *arg)
5186 + encode_openhdr(xdr, arg);
5187 + encode_opentype(xdr, arg);
5188 + switch (arg->claim) {
5189 + case NFS4_OPEN_CLAIM_NULL:
5190 + encode_claim_null(xdr, arg->name);
5193 - WRITE32(NFS4_SHARE_ACCESS_WRITE);
5194 + case NFS4_OPEN_CLAIM_PREVIOUS:
5195 + encode_claim_previous(xdr, arg->u.delegation_type);
5197 - case FMODE_READ|FMODE_WRITE:
5198 - WRITE32(NFS4_SHARE_ACCESS_BOTH);
5199 + case NFS4_OPEN_CLAIM_DELEGATE_CUR:
5200 + encode_claim_delegate_cur(xdr, arg->name, &arg->u.delegation);
5205 - WRITE32(0); /* for linux, share_deny = 0 always */
5206 - WRITE64(arg->clientid);
5209 - WRITE32(NFS4_OPEN_NOCREATE);
5210 - WRITE32(NFS4_OPEN_CLAIM_PREVIOUS);
5211 - WRITE32(NFS4_OPEN_DELEGATE_NONE);
5215 +static int encode_open_confirm(struct xdr_stream *xdr, const struct nfs_open_confirmargs *arg)
5219 + RESERVE_SPACE(8+sizeof(arg->stateid.data));
5220 + WRITE32(OP_OPEN_CONFIRM);
5221 + WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data));
5222 + WRITE32(arg->seqid);
5227 @@ -852,14 +960,11 @@ static int encode_open_downgrade(struct
5231 - RESERVE_SPACE(16+sizeof(arg->stateid.data));
5232 + RESERVE_SPACE(8+sizeof(arg->stateid.data));
5233 WRITE32(OP_OPEN_DOWNGRADE);
5234 WRITEMEM(arg->stateid.data, sizeof(arg->stateid.data));
5235 WRITE32(arg->seqid);
5236 - WRITE32(arg->share_access);
5237 - /* No deny modes */
5240 + encode_share_access(xdr, arg->open_flags);
5244 @@ -887,15 +992,15 @@ static int encode_putrootfh(struct xdr_s
5248 -static void encode_stateid(struct xdr_stream *xdr, struct nfs4_state *state, fl_owner_t lockowner)
5249 +static void encode_stateid(struct xdr_stream *xdr, const struct nfs_open_context *ctx)
5251 extern nfs4_stateid zero_stateid;
5252 nfs4_stateid stateid;
5256 - if (state != NULL) {
5257 - nfs4_copy_stateid(&stateid, state, lockowner);
5258 + if (ctx->state != NULL) {
5259 + nfs4_copy_stateid(&stateid, ctx->state, ctx->pid);
5260 WRITEMEM(stateid.data, sizeof(stateid.data));
5262 WRITEMEM(zero_stateid.data, sizeof(zero_stateid.data));
5263 @@ -908,7 +1013,7 @@ static int encode_read(struct xdr_stream
5267 - encode_stateid(xdr, args->state, args->lockowner);
5268 + encode_stateid(xdr, args->context);
5271 WRITE64(args->offset);
5272 @@ -1003,6 +1108,45 @@ static int encode_renew(struct xdr_strea
5276 +extern nfs4_stateid zero_stateid;
5279 +encode_setacl(struct xdr_stream *xdr, struct nfs_setaclargs *arg)
5282 + uint32_t *q = (uint32_t *)arg->acl;
5283 + uint32_t *end = (uint32_t *)(arg->acl + arg->acl_len);
5287 + RESERVE_SPACE(4+sizeof(zero_stateid.data));
5288 + WRITE32(OP_SETATTR);
5289 + WRITEMEM(zero_stateid.data, sizeof(zero_stateid.data));
5290 + RESERVE_SPACE(4*4);
5292 + WRITE32(FATTR4_WORD0_ACL);
5293 + WRITE32(arg->acl_len);
5296 + naces = ntohl(*q++);
5298 + for (i = 0; i < naces; i++) {
5301 + RESERVE_SPACE(3*4);
5302 + memcpy(p, q, 3*4); /* type, flag, access_mask, length */
5304 + tmp = ntohl(*q++); /* length */
5305 + if (tmp > XDR_MAX_NETOBJ)
5307 + if (q + XDR_QUADLEN(tmp) > end)
5309 + RESERVE_SPACE((XDR_QUADLEN(tmp) << 2) + 4);
5310 + p = xdr_encode_opaque(p, q, tmp);
5316 encode_savefh(struct xdr_stream *xdr)
5318 @@ -1031,26 +1175,18 @@ static int encode_setattr(struct xdr_str
5320 static int encode_setclientid(struct xdr_stream *xdr, const struct nfs4_setclientid *setclientid)
5322 - uint32_t total_len;
5323 - uint32_t len1, len2, len3;
5326 - len1 = strlen(setclientid->sc_name);
5327 - len2 = strlen(setclientid->sc_netid);
5328 - len3 = strlen(setclientid->sc_uaddr);
5329 - total_len = XDR_QUADLEN(len1) + XDR_QUADLEN(len2) + XDR_QUADLEN(len3);
5330 - total_len = (total_len << 2) + 24 + sizeof(setclientid->sc_verifier.data);
5332 - RESERVE_SPACE(total_len);
5333 + RESERVE_SPACE(4 + sizeof(setclientid->sc_verifier->data));
5334 WRITE32(OP_SETCLIENTID);
5335 - WRITEMEM(setclientid->sc_verifier.data, sizeof(setclientid->sc_verifier.data));
5337 - WRITEMEM(setclientid->sc_name, len1);
5338 + WRITEMEM(setclientid->sc_verifier->data, sizeof(setclientid->sc_verifier->data));
5340 + encode_string(xdr, setclientid->sc_name_len, setclientid->sc_name);
5342 WRITE32(setclientid->sc_prog);
5344 - WRITEMEM(setclientid->sc_netid, len2);
5346 - WRITEMEM(setclientid->sc_uaddr, len3);
5347 + encode_string(xdr, setclientid->sc_netid_len, setclientid->sc_netid);
5348 + encode_string(xdr, setclientid->sc_uaddr_len, setclientid->sc_uaddr);
5350 WRITE32(setclientid->sc_cb_ident);
5353 @@ -1075,7 +1211,7 @@ static int encode_write(struct xdr_strea
5357 - encode_stateid(xdr, args->state, args->lockowner);
5358 + encode_stateid(xdr, args->context);
5361 WRITE64(args->offset);
5362 @@ -1086,6 +1222,18 @@ static int encode_write(struct xdr_strea
5367 +static int encode_delegreturn(struct xdr_stream *xdr, const nfs4_stateid *stateid)
5371 + RESERVE_SPACE(20);
5373 + WRITE32(OP_DELEGRETURN);
5374 + WRITEMEM(stateid->data, sizeof(stateid->data));
5379 * END OF "GENERIC" ENCODE ROUTINES.
5381 @@ -1244,6 +1392,14 @@ out:
5385 + * Encode SYMLINK request
5387 +static int nfs4_xdr_enc_symlink(struct rpc_rqst *req, uint32_t *p, const struct nfs4_create_arg *args)
5389 + return nfs4_xdr_enc_create(req, p, args);
5393 * Encode GETATTR request
5395 static int nfs4_xdr_enc_getattr(struct rpc_rqst *req, uint32_t *p, const struct nfs4_getattr_arg *args)
5396 @@ -1331,13 +1487,13 @@ out:
5400 - * Encode an OPEN request
5401 + * Encode an OPEN request with no attributes.
5403 -static int nfs4_xdr_enc_open_reclaim(struct rpc_rqst *req, uint32_t *p, struct nfs_open_reclaimargs *args)
5404 +static int nfs4_xdr_enc_open_noattr(struct rpc_rqst *req, uint32_t *p, struct nfs_openargs *args)
5406 struct xdr_stream xdr;
5407 struct compound_hdr hdr = {
5413 @@ -1346,10 +1502,7 @@ static int nfs4_xdr_enc_open_reclaim(str
5414 status = encode_putfh(&xdr, args->fh);
5417 - status = encode_open_reclaim(&xdr, args);
5420 - status = encode_getfattr(&xdr, args->bitmask);
5421 + status = encode_open(&xdr, args);
5425 @@ -1538,6 +1691,52 @@ out:
5429 + * Encode an SETACL request
5432 +nfs4_xdr_enc_setacl(struct rpc_rqst *req, uint32_t *p, struct nfs_setaclargs *args)
5435 + struct xdr_stream xdr;
5436 + struct compound_hdr hdr = {
5441 + xdr_init_encode(&xdr, &req->rq_snd_buf, p);
5442 + encode_compound_hdr(&xdr, &hdr);
5443 + status = encode_putfh(&xdr, args->fh);
5446 + status = encode_setacl(&xdr, args);
5452 + * Encode a GETACL request
5455 +nfs4_xdr_enc_getacl(struct rpc_rqst *req, uint32_t *p,struct nfs_fh *fhandle)
5457 + struct xdr_stream xdr;
5458 + struct compound_hdr hdr = {
5463 + xdr_init_encode(&xdr, &req->rq_snd_buf, p);
5464 + encode_compound_hdr(&xdr, &hdr);
5465 + status = encode_putfh(&xdr, fhandle);
5468 + status = encode_getattr_two(&xdr, FATTR4_WORD0_ACL, 0);
5475 * Encode a WRITE request
5477 static int nfs4_xdr_enc_write(struct rpc_rqst *req, uint32_t *p, struct nfs_writeargs *args)
5478 @@ -1716,6 +1915,24 @@ static int nfs4_xdr_enc_setclientid_conf
5482 + * DELEGRETURN request
5484 +static int nfs4_xdr_enc_delegreturn(struct rpc_rqst *req, uint32_t *p, const struct nfs4_delegreturnargs *args)
5486 + struct xdr_stream xdr;
5487 + struct compound_hdr hdr = {
5492 + xdr_init_encode(&xdr, &req->rq_snd_buf, p);
5493 + encode_compound_hdr(&xdr, &hdr);
5494 + if ((status = encode_putfh(&xdr, args->fhandle)) == 0)
5495 + status = encode_delegreturn(&xdr, args->stateid);
5500 * START OF "GENERIC" DECODE ROUTINES.
5501 * These may look a little ugly since they are imported from a "generic"
5502 * set of XDR encode/decode routines which are intended to be shared by
5503 @@ -1749,6 +1966,17 @@ static int nfs4_xdr_enc_setclientid_conf
5507 +static int decode_opaque_inline(struct xdr_stream *xdr, uint32_t *len, char **string)
5514 + *string = (char *)p;
5518 static int decode_compound_hdr(struct xdr_stream *xdr, struct compound_hdr *hdr)
5521 @@ -1785,6 +2013,17 @@ static int decode_op_hdr(struct xdr_stre
5525 +/* Dummy routine */
5526 +static int decode_ace(struct xdr_stream *xdr, void *ace, struct nfs4_client *clp)
5533 + return decode_opaque_inline(xdr, &strlen, &str);
5536 static int decode_attr_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
5539 @@ -2717,10 +2956,56 @@ static int decode_lookup(struct xdr_stre
5540 return decode_op_hdr(xdr, OP_LOOKUP);
5543 +/* This is too sick! */
5544 +static int decode_space_limit(struct xdr_stream *xdr, u64 *maxsize)
5547 + uint32_t limit_type, nblocks, blocksize;
5550 + READ32(limit_type);
5551 + switch (limit_type) {
5557 + READ32(blocksize);
5558 + *maxsize = (uint64_t)nblocks * (uint64_t)blocksize;
5563 +static int decode_delegation(struct xdr_stream *xdr, struct nfs_openres *res)
5566 + uint32_t delegation_type;
5569 + READ32(delegation_type);
5570 + if (delegation_type == NFS4_OPEN_DELEGATE_NONE) {
5571 + res->delegation_type = 0;
5575 + COPYMEM(res->delegation.data, sizeof(res->delegation.data));
5576 + READ32(res->do_recall);
5577 + switch (delegation_type) {
5578 + case NFS4_OPEN_DELEGATE_READ:
5579 + res->delegation_type = FMODE_READ;
5581 + case NFS4_OPEN_DELEGATE_WRITE:
5582 + res->delegation_type = FMODE_WRITE|FMODE_READ;
5583 + if (decode_space_limit(xdr, &res->maxsize) < 0)
5586 + return decode_ace(xdr, NULL, res->server->nfs4_state);
5589 static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
5592 - uint32_t bmlen, delegation_type;
5596 status = decode_op_hdr(xdr, OP_OPEN);
5597 @@ -2737,11 +3022,9 @@ static int decode_open(struct xdr_stream
5601 - READ_BUF((bmlen << 2) + 4);
5602 + READ_BUF(bmlen << 2);
5604 - READ32(delegation_type);
5605 - if (delegation_type == NFS4_OPEN_DELEGATE_NONE)
5607 + return decode_delegation(xdr, res);
5609 printk(KERN_NOTICE "%s: xdr error!\n", __FUNCTION__);
5611 @@ -2967,6 +3250,72 @@ static int decode_renew(struct xdr_strea
5612 return decode_op_hdr(xdr, OP_RENEW);
5615 +static int decode_attr_acl(struct xdr_stream *xdr, uint32_t *bitmap,
5616 + struct nfs_getaclres *res)
5620 + if (unlikely(bitmap[0] & (FATTR4_WORD0_ACL - 1U)))
5622 + if (likely(bitmap[0] & FATTR4_WORD0_ACL)) {
5623 + ssize_t size = res->acl_len;
5624 + uint32_t nace, tmp;
5632 + res->acl_len += 4;
5634 + for (i = 0; i < nace; i++) {
5636 + res->acl_len += 4*4;
5638 + READ32(tmp); /* namelen */
5640 + if (tmp > XDR_MAX_NETOBJ) {
5641 + printk(KERN_WARNING "%s: name too long (%u)!\n",
5642 + __FUNCTION__, tmp);
5645 + res->acl_len += XDR_QUADLEN(tmp) << 2;
5647 + if (size && res->acl_len > size)
5649 + if (size == 0 && res->acl_len <= XATTR_SIZE_MAX)
5650 + res->acl = kmalloc(res->acl_len, GFP_KERNEL);
5652 + memcpy(res->acl, start, res->acl_len);
5657 +static int decode_getacl(struct xdr_stream *xdr, struct nfs_getaclres *res)
5664 + if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0)
5666 + if ((status = decode_attr_bitmap(xdr, bitmap)) != 0)
5668 + if ((status = decode_attr_length(xdr, &attrlen, &savep)) != 0)
5671 + if ((status = decode_attr_acl(xdr, bitmap, res)) != 0)
5674 + status = verify_attr_len(xdr, savep, attrlen);
5677 + printk(KERN_NOTICE "%s: xdr error %d!\n", __FUNCTION__, -status);
5682 decode_savefh(struct xdr_stream *xdr)
5684 @@ -3048,6 +3397,11 @@ static int decode_write(struct xdr_strea
5688 +static int decode_delegreturn(struct xdr_stream *xdr)
5690 + return decode_op_hdr(xdr, OP_DELEGRETURN);
5694 * Decode OPEN_DOWNGRADE response
5696 @@ -3222,6 +3576,14 @@ out:
5700 + * Decode SYMLINK response
5702 +static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_create_res *res)
5704 + return nfs4_xdr_dec_create(rqstp, p, res);
5708 * Decode GETATTR response
5710 static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, uint32_t *p, struct nfs4_getattr_res *res)
5711 @@ -3243,6 +3605,50 @@ out:
5716 + * Decode SETACL response
5719 +nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp, uint32_t *p, void *res)
5721 + struct xdr_stream xdr;
5722 + struct compound_hdr hdr;
5725 + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
5726 + status = decode_compound_hdr(&xdr, &hdr);
5729 + status = decode_putfh(&xdr);
5732 + status = decode_setattr(&xdr, res);
5738 + * Decode GETACL response
5741 +nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_getaclres *res)
5743 + struct xdr_stream xdr;
5744 + struct compound_hdr hdr;
5747 + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
5748 + status = decode_compound_hdr(&xdr, &hdr);
5751 + status = decode_putfh(&xdr);
5754 + status = decode_getacl(&xdr, res);
5761 * Decode CLOSE response
5762 @@ -3314,9 +3720,9 @@ out:
5766 - * Decode OPEN_RECLAIM response
5767 + * Decode OPEN response
5769 -static int nfs4_xdr_dec_open_reclaim(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_openres *res)
5770 +static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp, uint32_t *p, struct nfs_openres *res)
5772 struct xdr_stream xdr;
5773 struct compound_hdr hdr;
5774 @@ -3330,9 +3736,6 @@ static int nfs4_xdr_dec_open_reclaim(str
5777 status = decode_open(&xdr, res);
5780 - status = decode_getfattr(&xdr, res->f_attr, res->server);
5784 @@ -3665,6 +4068,25 @@ static int nfs4_xdr_dec_setclientid_conf
5789 + * DELEGRETURN request
5791 +static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp, uint32_t *p, void *dummy)
5793 + struct xdr_stream xdr;
5794 + struct compound_hdr hdr;
5797 + xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
5798 + status = decode_compound_hdr(&xdr, &hdr);
5799 + if (status == 0) {
5800 + status = decode_putfh(&xdr);
5802 + status = decode_delegreturn(&xdr);
5807 uint32_t *nfs4_decode_dirent(uint32_t *p, struct nfs_entry *entry, int plus)
5810 @@ -3756,7 +4178,7 @@ nfs_stat_to_errno(int stat)
5811 if (nfs_errtbl[i].stat == stat)
5812 return nfs_errtbl[i].errno;
5815 + if (stat <= 10000 || stat > 10100) {
5816 /* The server is looney tunes. */
5817 return ESERVERFAULT;
5819 @@ -3786,7 +4208,7 @@ struct rpc_procinfo nfs4_procedures[] =
5820 PROC(COMMIT, enc_commit, dec_commit),
5821 PROC(OPEN, enc_open, dec_open),
5822 PROC(OPEN_CONFIRM, enc_open_confirm, dec_open_confirm),
5823 - PROC(OPEN_RECLAIM, enc_open_reclaim, dec_open_reclaim),
5824 + PROC(OPEN_NOATTR, enc_open_noattr, dec_open_noattr),
5825 PROC(OPEN_DOWNGRADE, enc_open_downgrade, dec_open_downgrade),
5826 PROC(CLOSE, enc_close, dec_close),
5827 PROC(SETATTR, enc_setattr, dec_setattr),
5828 @@ -3804,12 +4226,16 @@ struct rpc_procinfo nfs4_procedures[] =
5829 PROC(REMOVE, enc_remove, dec_remove),
5830 PROC(RENAME, enc_rename, dec_rename),
5831 PROC(LINK, enc_link, dec_link),
5832 + PROC(SYMLINK, enc_symlink, dec_symlink),
5833 PROC(CREATE, enc_create, dec_create),
5834 PROC(PATHCONF, enc_pathconf, dec_pathconf),
5835 PROC(STATFS, enc_statfs, dec_statfs),
5836 PROC(READLINK, enc_readlink, dec_readlink),
5837 PROC(READDIR, enc_readdir, dec_readdir),
5838 PROC(SERVER_CAPS, enc_server_caps, dec_server_caps),
5839 + PROC(DELEGRETURN, enc_delegreturn, dec_delegreturn),
5840 + PROC(GETACL, enc_getacl, dec_getacl),
5841 + PROC(SETACL, enc_setacl, dec_setacl),
5844 struct rpc_version nfs_version4 = {
5845 --- linux-2.6.7/fs/nfs/pagelist.c.lsec 2004-06-15 23:20:03.000000000 -0600
5846 +++ linux-2.6.7/fs/nfs/pagelist.c 2005-03-23 14:28:23.057544416 -0700
5847 @@ -36,7 +36,6 @@ nfs_page_alloc(void)
5849 memset(p, 0, sizeof(*p));
5850 INIT_LIST_HEAD(&p->wb_list);
5851 - init_waitqueue_head(&p->wb_wait);
5855 @@ -62,7 +61,7 @@ nfs_page_free(struct nfs_page *p)
5856 * User should ensure it is safe to sleep in this function.
5859 -nfs_create_request(struct file *file, struct inode *inode,
5860 +nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
5862 unsigned int offset, unsigned int count)
5864 @@ -94,33 +93,38 @@ nfs_create_request(struct file *file, st
5865 req->wb_offset = offset;
5866 req->wb_pgbase = offset;
5867 req->wb_bytes = count;
5868 - req->wb_inode = inode;
5870 - server->rpc_ops->request_init(req, file);
5871 + req->wb_context = get_nfs_open_context(ctx);
5877 + * nfs_unlock_request - Unlock request and wake up sleepers.
5880 +void nfs_unlock_request(struct nfs_page *req)
5882 + if (!NFS_WBACK_BUSY(req)) {
5883 + printk(KERN_ERR "NFS: Invalid unlock attempted\n");
5886 + smp_mb__before_clear_bit();
5887 + clear_bit(PG_BUSY, &req->wb_flags);
5888 + smp_mb__after_clear_bit();
5889 + wake_up_all(&req->wb_context->waitq);
5890 + nfs_release_request(req);
5894 * nfs_clear_request - Free up all resources allocated to the request
5897 - * Release all resources associated with a write request after it
5898 + * Release page resources associated with a write request after it
5901 void nfs_clear_request(struct nfs_page *req)
5903 - if (req->wb_state)
5904 - req->wb_state = NULL;
5905 - /* Release struct file or cached credential */
5906 - if (req->wb_file) {
5907 - fput(req->wb_file);
5908 - req->wb_file = NULL;
5910 - if (req->wb_cred) {
5911 - put_rpccred(req->wb_cred);
5912 - req->wb_cred = NULL;
5915 page_cache_release(req->wb_page);
5916 req->wb_page = NULL;
5917 @@ -151,6 +155,7 @@ nfs_release_request(struct nfs_page *req
5919 /* Release struct file or cached credential */
5920 nfs_clear_request(req);
5921 + put_nfs_open_context(req->wb_context);
5925 @@ -194,12 +199,12 @@ nfs_list_add_request(struct nfs_page *re
5927 nfs_wait_on_request(struct nfs_page *req)
5929 - struct inode *inode = req->wb_inode;
5930 + struct inode *inode = req->wb_context->dentry->d_inode;
5931 struct rpc_clnt *clnt = NFS_CLIENT(inode);
5933 if (!NFS_WBACK_BUSY(req))
5935 - return nfs_wait_event(clnt, req->wb_wait, !NFS_WBACK_BUSY(req));
5936 + return nfs_wait_event(clnt, req->wb_context->waitq, !NFS_WBACK_BUSY(req));
5940 @@ -224,7 +229,11 @@ nfs_coalesce_requests(struct list_head *
5942 req = nfs_list_entry(head->next);
5944 - if (req->wb_cred != prev->wb_cred)
5945 + if (req->wb_context->cred != prev->wb_context->cred)
5947 + if (req->wb_context->pid != prev->wb_context->pid)
5949 + if (req->wb_context->state != prev->wb_context->state)
5951 if (req->wb_index != (prev->wb_index + 1))
5953 --- linux-2.6.7/fs/nfs/nfs4proc.c.lsec 2004-06-15 23:19:44.000000000 -0600
5954 +++ linux-2.6.7/fs/nfs/nfs4proc.c 2005-03-23 14:32:35.532162440 -0700
5956 #include <linux/smp_lock.h>
5957 #include <linux/namei.h>
5959 +#include "delegation.h"
5961 #define NFSDBG_FACILITY NFSDBG_PROC
5963 -#define NFS4_POLL_RETRY_TIME (15*HZ)
5964 +#define NFS4_POLL_RETRY_MIN (1*HZ)
5965 +#define NFS4_POLL_RETRY_MAX (15*HZ)
5967 static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
5968 static int nfs4_async_handle_error(struct rpc_task *, struct nfs_server *);
5969 +static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry);
5970 extern u32 *nfs4_decode_dirent(u32 *p, struct nfs_entry *entry, int plus);
5971 extern struct rpc_procinfo nfs4_procedures[];
5973 @@ -189,53 +193,296 @@ static void update_changeattr(struct ino
5974 * reclaim state on the server after a reboot.
5975 * Assumes caller is holding the sp->so_sem
5978 -nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
5979 +static int _nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
5981 struct inode *inode = state->inode;
5982 struct nfs_server *server = NFS_SERVER(inode);
5983 - struct nfs_fattr fattr = {
5986 - struct nfs_open_reclaimargs o_arg = {
5987 + struct nfs_delegation *delegation = NFS_I(inode)->delegation;
5988 + struct nfs_openargs o_arg = {
5989 .fh = NFS_FH(inode),
5990 .seqid = sp->so_seqid,
5992 - .share_access = state->state,
5993 + .open_flags = state->state,
5994 .clientid = server->nfs4_state->cl_clientid,
5995 .claim = NFS4_OPEN_CLAIM_PREVIOUS,
5996 .bitmask = server->attr_bitmask,
5998 struct nfs_openres o_res = {
6000 .server = server, /* Grrr */
6002 struct rpc_message msg = {
6003 - .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_RECLAIM],
6004 + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR],
6007 .rpc_cred = sp->so_cred,
6011 + if (delegation != NULL) {
6012 + if (!(delegation->flags & NFS_DELEGATION_NEED_RECLAIM)) {
6013 + memcpy(&state->stateid, &delegation->stateid,
6014 + sizeof(state->stateid));
6015 + set_bit(NFS_DELEGATED_STATE, &state->flags);
6018 + o_arg.u.delegation_type = delegation->type;
6020 status = rpc_call_sync(server->client, &msg, 0);
6021 nfs4_increment_seqid(status, sp);
6023 + if (status == 0) {
6024 memcpy(&state->stateid, &o_res.stateid, sizeof(state->stateid));
6025 - /* Update the inode attributes */
6026 - nfs_refresh_inode(inode, &fattr);
6027 + if (o_res.delegation_type != 0) {
6028 + nfs_inode_reclaim_delegation(inode, sp->so_cred, &o_res);
6029 + /* Did the server issue an immediate delegation recall? */
6030 + if (o_res.do_recall)
6031 + nfs_async_inode_return_delegation(inode, &o_res.stateid);
6034 + clear_bit(NFS_DELEGATED_STATE, &state->flags);
6035 + /* Ensure we update the inode attributes */
6036 + NFS_CACHEINV(inode);
6040 +int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *state)
6042 + struct nfs_server *server = NFS_SERVER(state->inode);
6043 + struct nfs4_exception exception = { };
6046 + err = _nfs4_open_reclaim(sp, state);
6049 + case -NFS4ERR_STALE_CLIENTID:
6050 + case -NFS4ERR_STALE_STATEID:
6051 + case -NFS4ERR_EXPIRED:
6054 + err = nfs4_handle_exception(server, err, &exception);
6055 + } while (exception.retry);
6059 +static int _nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state)
6061 + struct nfs4_state_owner *sp = state->owner;
6062 + struct inode *inode = dentry->d_inode;
6063 + struct nfs_server *server = NFS_SERVER(inode);
6064 + struct dentry *parent = dget_parent(dentry);
6065 + struct nfs_openargs arg = {
6066 + .fh = NFS_FH(parent->d_inode),
6067 + .clientid = server->nfs4_state->cl_clientid,
6068 + .name = &dentry->d_name,
6071 + .bitmask = server->attr_bitmask,
6072 + .claim = NFS4_OPEN_CLAIM_DELEGATE_CUR,
6074 + struct nfs_openres res = {
6077 + struct rpc_message msg = {
6078 + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR],
6081 + .rpc_cred = sp->so_cred,
6085 + down(&sp->so_sema);
6086 + if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
6088 + if (state->state == 0)
6090 + arg.seqid = sp->so_seqid;
6091 + arg.open_flags = state->state;
6092 + memcpy(arg.u.delegation.data, state->stateid.data, sizeof(arg.u.delegation.data));
6093 + status = rpc_call_sync(server->client, &msg, 0);
6094 + nfs4_increment_seqid(status, sp);
6095 + if (status >= 0) {
6096 + memcpy(state->stateid.data, res.stateid.data,
6097 + sizeof(state->stateid.data));
6098 + clear_bit(NFS_DELEGATED_STATE, &state->flags);
6106 +int nfs4_open_delegation_recall(struct dentry *dentry, struct nfs4_state *state)
6108 + struct nfs4_exception exception = { };
6109 + struct nfs_server *server = NFS_SERVER(dentry->d_inode);
6112 + err = _nfs4_open_delegation_recall(dentry, state);
6116 + case -NFS4ERR_STALE_CLIENTID:
6117 + case -NFS4ERR_STALE_STATEID:
6118 + case -NFS4ERR_EXPIRED:
6119 + /* Don't recall a delegation if it was lost */
6120 + nfs4_schedule_state_recovery(server->nfs4_state);
6123 + err = nfs4_handle_exception(server, err, &exception);
6124 + } while (exception.retry);
6128 +static int _nfs4_proc_open_confirm(struct rpc_clnt *clnt, const struct nfs_fh *fh, struct nfs4_state_owner *sp, nfs4_stateid *stateid)
6130 + struct nfs_open_confirmargs arg = {
6132 + .seqid = sp->so_seqid,
6133 + .stateid = *stateid,
6135 + struct nfs_open_confirmres res;
6136 + struct rpc_message msg = {
6137 + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
6140 + .rpc_cred = sp->so_cred,
6144 + status = rpc_call_sync(clnt, &msg, 0);
6145 + nfs4_increment_seqid(status, sp);
6147 + memcpy(stateid, &res.stateid, sizeof(*stateid));
6151 +static int _nfs4_do_access(struct inode *inode, struct rpc_cred *cred, int mask)
6153 + struct nfs_access_entry cache;
6156 + status = nfs_access_get_cached(inode, cred, &cache);
6160 + /* Be clever: ask server to check for all possible rights */
6161 + cache.mask = MAY_EXEC | MAY_WRITE | MAY_READ;
6162 + cache.cred = cred;
6163 + cache.jiffies = jiffies;
6164 + status = _nfs4_proc_access(inode, &cache);
6167 + nfs_access_add_cache(inode, &cache);
6169 + if ((cache.mask & mask) == mask)
6175 + * Returns an nfs4_state + an extra reference to the inode
6177 +int _nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred *cred, struct nfs4_state **res)
6179 + struct nfs_delegation *delegation;
6180 + struct nfs_server *server = NFS_SERVER(inode);
6181 + struct nfs4_client *clp = server->nfs4_state;
6182 + struct nfs_inode *nfsi = NFS_I(inode);
6183 + struct nfs4_state_owner *sp = NULL;
6184 + struct nfs4_state *state = NULL;
6185 + int open_flags = flags & (FMODE_READ|FMODE_WRITE);
6189 + /* Protect against reboot recovery - NOTE ORDER! */
6190 + down_read(&clp->cl_sem);
6191 + /* Protect against delegation recall */
6192 + down_read(&nfsi->rwsem);
6193 + delegation = NFS_I(inode)->delegation;
6195 + if (delegation == NULL || (delegation->type & open_flags) != open_flags)
6198 + if (!(sp = nfs4_get_state_owner(server, cred))) {
6199 + dprintk("%s: nfs4_get_state_owner failed!\n", __FUNCTION__);
6202 + down(&sp->so_sema);
6203 + state = nfs4_get_open_state(inode, sp);
6204 + if (state == NULL)
6208 + if ((state->state & open_flags) == open_flags) {
6209 + spin_lock(&inode->i_lock);
6210 + if (open_flags & FMODE_READ)
6211 + state->nreaders++;
6212 + if (open_flags & FMODE_WRITE)
6213 + state->nwriters++;
6214 + spin_unlock(&inode->i_lock);
6216 + } else if (state->state != 0)
6220 + err = _nfs4_do_access(inode, cred, mask);
6224 + spin_lock(&inode->i_lock);
6225 + memcpy(state->stateid.data, delegation->stateid.data,
6226 + sizeof(state->stateid.data));
6227 + state->state |= open_flags;
6228 + if (open_flags & FMODE_READ)
6229 + state->nreaders++;
6230 + if (open_flags & FMODE_WRITE)
6231 + state->nwriters++;
6232 + set_bit(NFS_DELEGATED_STATE, &state->flags);
6233 + spin_unlock(&inode->i_lock);
6236 + nfs4_put_state_owner(sp);
6237 + up_read(&nfsi->rwsem);
6238 + up_read(&clp->cl_sem);
6244 + if (state != NULL)
6245 + nfs4_put_open_state(state);
6247 + nfs4_put_state_owner(sp);
6249 + up_read(&nfsi->rwsem);
6250 + up_read(&clp->cl_sem);
6254 +static struct nfs4_state *nfs4_open_delegated(struct inode *inode, int flags, struct rpc_cred *cred)
6256 + struct nfs4_exception exception = { };
6257 + struct nfs4_state *res;
6261 + err = _nfs4_open_delegated(inode, flags, cred, &res);
6264 + res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(inode),
6265 + err, &exception));
6266 + } while (exception.retry);
6271 * Returns an nfs4_state + an referenced inode
6273 -struct nfs4_state *
6274 -nfs4_do_open(struct inode *dir, struct qstr *name, int flags, struct iattr *sattr, struct rpc_cred *cred)
6275 +static int _nfs4_do_open(struct inode *dir, struct qstr *name, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res)
6277 struct nfs4_state_owner *sp;
6278 struct nfs4_state *state = NULL;
6279 struct nfs_server *server = NFS_SERVER(dir);
6280 + struct nfs4_client *clp = server->nfs4_state;
6281 struct inode *inode = NULL;
6283 struct nfs_fattr f_attr = {
6284 @@ -243,12 +490,11 @@ nfs4_do_open(struct inode *dir, struct q
6286 struct nfs_openargs o_arg = {
6288 - .share_access = flags & (FMODE_READ|FMODE_WRITE),
6289 - .opentype = (flags & O_CREAT) ? NFS4_OPEN_CREATE : NFS4_OPEN_NOCREATE,
6290 - .createmode = (flags & O_EXCL) ? NFS4_CREATE_EXCLUSIVE : NFS4_CREATE_UNCHECKED,
6291 + .open_flags = flags,
6294 .bitmask = server->attr_bitmask,
6295 + .claim = NFS4_OPEN_CLAIM_NULL,
6297 struct nfs_openres o_res = {
6299 @@ -261,60 +507,44 @@ nfs4_do_open(struct inode *dir, struct q
6304 + /* Protect against reboot recovery conflicts */
6305 + down_read(&clp->cl_sem);
6307 - if (!(sp = nfs4_get_state_owner(NFS_SERVER(dir), cred))) {
6308 + if (!(sp = nfs4_get_state_owner(server, cred))) {
6309 dprintk("nfs4_do_open: nfs4_get_state_owner failed!\n");
6313 - if (o_arg.createmode & NFS4_CREATE_EXCLUSIVE){
6314 + if (flags & O_EXCL) {
6315 u32 *p = (u32 *) o_arg.u.verifier.data;
6317 p[1] = current->pid;
6318 - } else if (o_arg.createmode == NFS4_CREATE_UNCHECKED) {
6320 o_arg.u.attrs = sattr;
6322 /* Serialization for the sequence id */
6324 o_arg.seqid = sp->so_seqid;
6325 o_arg.id = sp->so_id;
6326 - o_arg.clientid = NFS_SERVER(dir)->nfs4_state->cl_clientid,
6327 + o_arg.clientid = clp->cl_clientid;
6329 status = rpc_call_sync(server->client, &msg, 0);
6330 nfs4_increment_seqid(status, sp);
6334 update_changeattr(dir, &o_res.cinfo);
6335 + if(o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) {
6336 + status = _nfs4_proc_open_confirm(server->client, &o_res.fh, sp, &o_res.stateid);
6342 inode = nfs_fhget(dir->i_sb, &o_res.fh, &f_attr);
6346 state = nfs4_get_open_state(inode, sp);
6350 - if(o_res.rflags & NFS4_OPEN_RESULT_CONFIRM) {
6351 - struct nfs_open_confirmargs oc_arg = {
6353 - .seqid = sp->so_seqid,
6355 - struct nfs_open_confirmres oc_res;
6356 - struct rpc_message msg = {
6357 - .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_CONFIRM],
6358 - .rpc_argp = &oc_arg,
6359 - .rpc_resp = &oc_res,
6363 - memcpy(&oc_arg.stateid, &o_res.stateid, sizeof(oc_arg.stateid));
6364 - status = rpc_call_sync(server->client, &msg, 0);
6365 - nfs4_increment_seqid(status, sp);
6368 - memcpy(&state->stateid, &oc_res.stateid, sizeof(state->stateid));
6370 - memcpy(&state->stateid, &o_res.stateid, sizeof(state->stateid));
6372 + memcpy(&state->stateid, &o_res.stateid, sizeof(state->stateid));
6373 spin_lock(&inode->i_lock);
6374 if (flags & FMODE_READ)
6376 @@ -322,47 +552,62 @@ retry:
6378 state->state |= flags & (FMODE_READ|FMODE_WRITE);
6379 spin_unlock(&inode->i_lock);
6381 + if (o_res.delegation_type != 0)
6382 + nfs_inode_set_delegation(inode, cred, &o_res);
6384 nfs4_put_state_owner(sp);
6389 - nfs4_put_state_owner(sp);
6391 - nfs4_put_open_state(state);
6395 + up_read(&clp->cl_sem);
6400 + if (state != NULL)
6401 + nfs4_put_open_state(state);
6403 + nfs4_put_state_owner(sp);
6405 + /* Note: clp->cl_sem must be released before nfs4_put_open_state()! */
6406 + up_read(&clp->cl_sem);
6407 + if (inode != NULL)
6411 - /* NOTE: BAD_SEQID means the server and client disagree about the
6412 - * book-keeping w.r.t. state-changing operations
6413 - * (OPEN/CLOSE/LOCK/LOCKU...)
6414 - * It is actually a sign of a bug on the client or on the server.
6416 - * If we receive a BAD_SEQID error in the particular case of
6417 - * doing an OPEN, we assume that nfs4_increment_seqid() will
6418 - * have unhashed the old state_owner for us, and that we can
6419 - * therefore safely retry using a new one. We should still warn
6420 - * the user though...
6422 - if (status == -NFS4ERR_BAD_SEQID) {
6423 - printk(KERN_WARNING "NFS: v4 server returned a bad sequence-id error!\n");
6426 - status = nfs4_handle_error(server, status);
6429 - BUG_ON(status < -1000 || status > 0);
6431 - return ERR_PTR(status);
6437 -nfs4_do_setattr(struct nfs_server *server, struct nfs_fattr *fattr,
6439 +struct nfs4_state *nfs4_do_open(struct inode *dir, struct qstr *name, int flags, struct iattr *sattr, struct rpc_cred *cred)
6441 + struct nfs4_exception exception = { };
6442 + struct nfs4_state *res;
6446 + status = _nfs4_do_open(dir, name, flags, sattr, cred, &res);
6449 + /* NOTE: BAD_SEQID means the server and client disagree about the
6450 + * book-keeping w.r.t. state-changing operations
6451 + * (OPEN/CLOSE/LOCK/LOCKU...)
6452 + * It is actually a sign of a bug on the client or on the server.
6454 + * If we receive a BAD_SEQID error in the particular case of
6455 + * doing an OPEN, we assume that nfs4_increment_seqid() will
6456 + * have unhashed the old state_owner for us, and that we can
6457 + * therefore safely retry using a new one. We should still warn
6458 + * the user though...
6460 + if (status == -NFS4ERR_BAD_SEQID) {
6461 + printk(KERN_WARNING "NFS: v4 server returned a bad sequence-id error!\n");
6462 + exception.retry = 1;
6465 + res = ERR_PTR(nfs4_handle_exception(NFS_SERVER(dir),
6466 + status, &exception));
6467 + } while (exception.retry);
6471 +static int _nfs4_do_setattr(struct nfs_server *server, struct nfs_fattr *fattr,
6472 struct nfs_fh *fhandle, struct iattr *sattr,
6473 struct nfs4_state *state)
6475 @@ -381,9 +626,7 @@ nfs4_do_setattr(struct nfs_server *serve
6484 if (sattr->ia_valid & ATTR_SIZE)
6485 @@ -391,13 +634,22 @@ retry:
6487 memcpy(&arg.stateid, &zero_stateid, sizeof(arg.stateid));
6489 - status = rpc_call_sync(server->client, &msg, 0);
6491 - status = nfs4_handle_error(server, status);
6496 + return rpc_call_sync(server->client, &msg, 0);
6499 +int nfs4_do_setattr(struct nfs_server *server, struct nfs_fattr *fattr,
6500 + struct nfs_fh *fhandle, struct iattr *sattr,
6501 + struct nfs4_state *state)
6503 + struct nfs4_exception exception = { };
6506 + err = nfs4_handle_exception(server,
6507 + _nfs4_do_setattr(server, fattr, fhandle, sattr,
6510 + } while (exception.retry);
6515 @@ -411,8 +663,7 @@ retry:
6517 * NOTE: Caller must be holding the sp->so_owner semaphore!
6520 -nfs4_do_close(struct inode *inode, struct nfs4_state *state)
6521 +static int _nfs4_do_close(struct inode *inode, struct nfs4_state *state)
6523 struct nfs4_state_owner *sp = state->owner;
6525 @@ -426,6 +677,8 @@ nfs4_do_close(struct inode *inode, struc
6529 + if (test_bit(NFS_DELEGATED_STATE, &state->flags))
6531 memcpy(&arg.stateid, &state->stateid, sizeof(arg.stateid));
6532 /* Serialization for the sequence id */
6533 arg.seqid = sp->so_seqid,
6534 @@ -441,15 +694,34 @@ nfs4_do_close(struct inode *inode, struc
6539 -nfs4_do_downgrade(struct inode *inode, struct nfs4_state *state, mode_t mode)
6540 +int nfs4_do_close(struct inode *inode, struct nfs4_state *state)
6542 + struct nfs_server *server = NFS_SERVER(state->inode);
6543 + struct nfs4_exception exception = { };
6546 + err = _nfs4_do_close(inode, state);
6548 + case -NFS4ERR_STALE_STATEID:
6549 + case -NFS4ERR_EXPIRED:
6550 + nfs4_schedule_state_recovery(server->nfs4_state);
6555 + err = nfs4_handle_exception(server, err, &exception);
6556 + } while (exception.retry);
6560 +static int _nfs4_do_downgrade(struct inode *inode, struct nfs4_state *state, mode_t mode)
6562 struct nfs4_state_owner *sp = state->owner;
6564 struct nfs_closeargs arg = {
6565 .fh = NFS_FH(inode),
6566 .seqid = sp->so_seqid,
6567 - .share_access = mode,
6568 + .open_flags = mode,
6570 struct nfs_closeres res;
6571 struct rpc_message msg = {
6572 @@ -458,6 +730,8 @@ nfs4_do_downgrade(struct inode *inode, s
6576 + if (test_bit(NFS_DELEGATED_STATE, &state->flags))
6578 memcpy(&arg.stateid, &state->stateid, sizeof(arg.stateid));
6579 status = rpc_call_sync(NFS_SERVER(inode)->client, &msg, 0);
6580 nfs4_increment_seqid(status, sp);
6581 @@ -467,6 +741,26 @@ nfs4_do_downgrade(struct inode *inode, s
6585 +int nfs4_do_downgrade(struct inode *inode, struct nfs4_state *state, mode_t mode)
6587 + struct nfs_server *server = NFS_SERVER(state->inode);
6588 + struct nfs4_exception exception = { };
6591 + err = _nfs4_do_downgrade(inode, state, mode);
6593 + case -NFS4ERR_STALE_STATEID:
6594 + case -NFS4ERR_EXPIRED:
6595 + nfs4_schedule_state_recovery(server->nfs4_state);
6597 + state->state = mode;
6600 + err = nfs4_handle_exception(server, err, &exception);
6601 + } while (exception.retry);
6606 nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
6608 @@ -500,7 +794,9 @@ nfs4_open_revalidate(struct inode *dir,
6609 struct inode *inode;
6611 cred = rpcauth_lookupcred(NFS_SERVER(dir)->client->cl_auth, 0);
6612 - state = nfs4_do_open(dir, &dentry->d_name, openflags, NULL, cred);
6613 + state = nfs4_open_delegated(dentry->d_inode, openflags, cred);
6614 + if (IS_ERR(state))
6615 + state = nfs4_do_open(dir, &dentry->d_name, openflags, NULL, cred);
6617 if (state == ERR_PTR(-ENOENT) && dentry->d_inode == 0)
6619 @@ -518,7 +814,7 @@ nfs4_open_revalidate(struct inode *dir,
6623 -static int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
6624 +static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
6626 struct nfs4_server_caps_res res = {};
6627 struct rpc_message msg = {
6628 @@ -542,7 +838,19 @@ static int nfs4_server_capabilities(stru
6632 -static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
6633 +static int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
6635 + struct nfs4_exception exception = { };
6638 + err = nfs4_handle_exception(server,
6639 + _nfs4_server_capabilities(server, fhandle),
6641 + } while (exception.retry);
6645 +static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
6646 struct nfs_fsinfo *info)
6648 struct nfs_fattr * fattr = info->fattr;
6649 @@ -563,6 +871,19 @@ static int nfs4_lookup_root(struct nfs_s
6650 return rpc_call_sync(server->client, &msg, 0);
6653 +static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
6654 + struct nfs_fsinfo *info)
6656 + struct nfs4_exception exception = { };
6659 + err = nfs4_handle_exception(server,
6660 + _nfs4_lookup_root(server, fhandle, info),
6662 + } while (exception.retry);
6666 static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
6667 struct nfs_fsinfo *info)
6669 @@ -597,6 +918,8 @@ static int nfs4_proc_get_root(struct nfs
6671 p = server->mnt_path;
6673 + struct nfs4_exception exception = { };
6678 @@ -606,9 +929,13 @@ static int nfs4_proc_get_root(struct nfs
6683 - status = rpc_call_sync(server->client, &msg, 0);
6687 + status = nfs4_handle_exception(server,
6688 + rpc_call_sync(server->client, &msg, 0),
6690 + } while (exception.retry);
6693 if (status == -ENOENT) {
6694 printk(KERN_NOTICE "NFS: mount path %s does not exist!\n", server->mnt_path);
6695 @@ -621,10 +948,10 @@ static int nfs4_proc_get_root(struct nfs
6697 status = nfs4_do_fsinfo(server, fhandle, info);
6699 - return nfs4_map_errors(status);
6703 -static int nfs4_proc_getattr(struct inode *inode, struct nfs_fattr *fattr)
6704 +static int _nfs4_proc_getattr(struct inode *inode, struct nfs_fattr *fattr)
6706 struct nfs_server *server = NFS_SERVER(inode);
6707 struct nfs4_getattr_arg args = {
6708 @@ -642,8 +969,19 @@ static int nfs4_proc_getattr(struct inod
6712 + return rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
6715 - return nfs4_map_errors(rpc_call_sync(NFS_CLIENT(inode), &msg, 0));
6716 +static int nfs4_proc_getattr(struct inode *inode, struct nfs_fattr *fattr)
6718 + struct nfs4_exception exception = { };
6721 + err = nfs4_handle_exception(NFS_SERVER(inode),
6722 + _nfs4_proc_getattr(inode, fattr),
6724 + } while (exception.retry);
6729 @@ -678,9 +1016,13 @@ nfs4_proc_setattr(struct dentry *dentry,
6731 struct rpc_cred *cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0);
6732 state = nfs4_find_state(inode, cred, FMODE_WRITE);
6734 - state = nfs4_do_open(dentry->d_parent->d_inode,
6735 - &dentry->d_name, FMODE_WRITE, NULL, cred);
6736 + if (state == NULL) {
6737 + state = nfs4_open_delegated(dentry->d_inode,
6738 + FMODE_WRITE, cred);
6739 + if (IS_ERR(state))
6740 + state = nfs4_do_open(dentry->d_parent->d_inode,
6741 + &dentry->d_name, FMODE_WRITE,
6746 @@ -705,7 +1047,7 @@ out:
6750 -static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
6751 +static int _nfs4_proc_lookup(struct inode *dir, struct qstr *name,
6752 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
6755 @@ -731,12 +1073,23 @@ static int nfs4_proc_lookup(struct inode
6756 dprintk("NFS call lookup %s\n", name->name);
6757 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
6758 dprintk("NFS reply lookup: %d\n", status);
6759 - return nfs4_map_errors(status);
6763 -static int nfs4_proc_access(struct inode *inode, struct rpc_cred *cred, int mode)
6764 +static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
6766 + struct nfs4_exception exception = { };
6769 + err = nfs4_handle_exception(NFS_SERVER(dir),
6770 + _nfs4_proc_lookup(dir, name, fhandle, fattr),
6772 + } while (exception.retry);
6776 +static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
6779 struct nfs4_accessargs args = {
6780 .fh = NFS_FH(inode),
6782 @@ -745,8 +1098,10 @@ static int nfs4_proc_access(struct inode
6783 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
6787 + .rpc_cred = entry->cred,
6789 + int mode = entry->mask;
6793 * Determine which access bits we want to ask for...
6794 @@ -758,8 +1113,7 @@ static int nfs4_proc_access(struct inode
6795 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE;
6796 if (mode & MAY_EXEC)
6797 args.access |= NFS4_ACCESS_LOOKUP;
6801 if (mode & MAY_WRITE)
6802 args.access |= NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND;
6803 if (mode & MAY_EXEC)
6804 @@ -767,13 +1121,27 @@ static int nfs4_proc_access(struct inode
6806 status = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
6808 - if (args.access != res.supported) {
6809 - printk(KERN_NOTICE "NFS: server didn't support all access bits!\n");
6810 - status = -ENOTSUPP;
6811 - } else if ((args.access & res.access) != args.access)
6814 + if (res.access & NFS4_ACCESS_READ)
6815 + entry->mask |= MAY_READ;
6816 + if (res.access & (NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_DELETE))
6817 + entry->mask |= MAY_WRITE;
6818 + if (res.access & (NFS4_ACCESS_LOOKUP|NFS4_ACCESS_EXECUTE))
6819 + entry->mask |= MAY_EXEC;
6821 - return nfs4_map_errors(status);
6825 +static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
6827 + struct nfs4_exception exception = { };
6830 + err = nfs4_handle_exception(NFS_SERVER(inode),
6831 + _nfs4_proc_access(inode, entry),
6833 + } while (exception.retry);
6838 @@ -800,7 +1168,7 @@ static int nfs4_proc_access(struct inode
6839 * Both of these changes to the XDR layer would in fact be quite
6840 * minor, but I decided to leave them for a subsequent patch.
6842 -static int nfs4_proc_readlink(struct inode *inode, struct page *page)
6843 +static int _nfs4_proc_readlink(struct inode *inode, struct page *page)
6845 struct nfs4_readlink args = {
6846 .fh = NFS_FH(inode),
6847 @@ -813,11 +1181,22 @@ static int nfs4_proc_readlink(struct ino
6851 - return nfs4_map_errors(rpc_call_sync(NFS_CLIENT(inode), &msg, 0));
6852 + return rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
6856 -nfs4_proc_read(struct nfs_read_data *rdata, struct file *filp)
6857 +static int nfs4_proc_readlink(struct inode *inode, struct page *page)
6859 + struct nfs4_exception exception = { };
6862 + err = nfs4_handle_exception(NFS_SERVER(inode),
6863 + _nfs4_proc_readlink(inode, page),
6865 + } while (exception.retry);
6869 +static int _nfs4_proc_read(struct nfs_read_data *rdata)
6871 int flags = rdata->flags;
6872 struct inode *inode = rdata->inode;
6873 @@ -827,6 +1206,7 @@ nfs4_proc_read(struct nfs_read_data *rda
6874 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ],
6875 .rpc_argp = &rdata->args,
6876 .rpc_resp = &rdata->res,
6877 + .rpc_cred = rdata->cred,
6879 unsigned long timestamp = jiffies;
6881 @@ -834,29 +1214,27 @@ nfs4_proc_read(struct nfs_read_data *rda
6882 dprintk("NFS call read %d @ %Ld\n", rdata->args.count,
6883 (long long) rdata->args.offset);
6886 - * Try first to use O_RDONLY, then O_RDWR stateid.
6889 - struct nfs4_state *state;
6890 - state = (struct nfs4_state *)filp->private_data;
6891 - rdata->args.state = state;
6892 - msg.rpc_cred = state->owner->so_cred;
6894 - rdata->args.state = NULL;
6895 - msg.rpc_cred = NFS_I(inode)->mm_cred;
6899 status = rpc_call_sync(server->client, &msg, flags);
6901 renew_lease(server, timestamp);
6902 dprintk("NFS reply read: %d\n", status);
6903 - return nfs4_map_errors(status);
6908 -nfs4_proc_write(struct nfs_write_data *wdata, struct file *filp)
6909 +static int nfs4_proc_read(struct nfs_read_data *rdata)
6911 + struct nfs4_exception exception = { };
6914 + err = nfs4_handle_exception(NFS_SERVER(rdata->inode),
6915 + _nfs4_proc_read(rdata),
6917 + } while (exception.retry);
6921 +static int _nfs4_proc_write(struct nfs_write_data *wdata)
6923 int rpcflags = wdata->flags;
6924 struct inode *inode = wdata->inode;
6925 @@ -866,33 +1244,32 @@ nfs4_proc_write(struct nfs_write_data *w
6926 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE],
6927 .rpc_argp = &wdata->args,
6928 .rpc_resp = &wdata->res,
6929 + .rpc_cred = wdata->cred,
6933 dprintk("NFS call write %d @ %Ld\n", wdata->args.count,
6934 (long long) wdata->args.offset);
6937 - * Try first to use O_WRONLY, then O_RDWR stateid.
6940 - struct nfs4_state *state;
6941 - state = (struct nfs4_state *)filp->private_data;
6942 - wdata->args.state = state;
6943 - msg.rpc_cred = state->owner->so_cred;
6945 - wdata->args.state = NULL;
6946 - msg.rpc_cred = NFS_I(inode)->mm_cred;
6950 status = rpc_call_sync(server->client, &msg, rpcflags);
6951 dprintk("NFS reply write: %d\n", status);
6952 - return nfs4_map_errors(status);
6957 -nfs4_proc_commit(struct nfs_write_data *cdata, struct file *filp)
6958 +static int nfs4_proc_write(struct nfs_write_data *wdata)
6960 + struct nfs4_exception exception = { };
6963 + err = nfs4_handle_exception(NFS_SERVER(wdata->inode),
6964 + _nfs4_proc_write(wdata),
6966 + } while (exception.retry);
6970 +static int _nfs4_proc_commit(struct nfs_write_data *cdata)
6972 struct inode *inode = cdata->inode;
6973 struct nfs_fattr *fattr = cdata->res.fattr;
6974 @@ -901,24 +1278,29 @@ nfs4_proc_commit(struct nfs_write_data *
6975 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT],
6976 .rpc_argp = &cdata->args,
6977 .rpc_resp = &cdata->res,
6978 + .rpc_cred = cdata->cred,
6982 dprintk("NFS call commit %d @ %Ld\n", cdata->args.count,
6983 (long long) cdata->args.offset);
6986 - * Try first to use O_WRONLY, then O_RDWR stateid.
6989 - msg.rpc_cred = ((struct nfs4_state *)filp->private_data)->owner->so_cred;
6991 - msg.rpc_cred = NFS_I(inode)->mm_cred;
6994 status = rpc_call_sync(server->client, &msg, 0);
6995 dprintk("NFS reply commit: %d\n", status);
6996 - return nfs4_map_errors(status);
7000 +static int nfs4_proc_commit(struct nfs_write_data *cdata)
7002 + struct nfs4_exception exception = { };
7005 + err = nfs4_handle_exception(NFS_SERVER(cdata->inode),
7006 + _nfs4_proc_commit(cdata),
7008 + } while (exception.retry);
7013 @@ -965,7 +1347,7 @@ nfs4_proc_create(struct inode *dir, stru
7017 -static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
7018 +static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
7020 struct nfs4_remove_arg args = {
7022 @@ -982,7 +1364,19 @@ static int nfs4_proc_remove(struct inode
7023 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
7025 update_changeattr(dir, &res);
7026 - return nfs4_map_errors(status);
7030 +static int nfs4_proc_remove(struct inode *dir, struct qstr *name)
7032 + struct nfs4_exception exception = { };
7035 + err = nfs4_handle_exception(NFS_SERVER(dir),
7036 + _nfs4_proc_remove(dir, name),
7038 + } while (exception.retry);
7042 struct unlink_desc {
7043 @@ -1023,7 +1417,7 @@ static int nfs4_proc_unlink_done(struct
7047 -static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
7048 +static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
7049 struct inode *new_dir, struct qstr *new_name)
7051 struct nfs4_rename_arg arg = {
7052 @@ -1046,10 +1440,24 @@ static int nfs4_proc_rename(struct inode
7053 update_changeattr(old_dir, &res.old_cinfo);
7054 update_changeattr(new_dir, &res.new_cinfo);
7056 - return nfs4_map_errors(status);
7060 -static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
7061 +static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
7062 + struct inode *new_dir, struct qstr *new_name)
7064 + struct nfs4_exception exception = { };
7067 + err = nfs4_handle_exception(NFS_SERVER(old_dir),
7068 + _nfs4_proc_rename(old_dir, old_name,
7069 + new_dir, new_name),
7071 + } while (exception.retry);
7075 +static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
7077 struct nfs4_link_arg arg = {
7078 .fh = NFS_FH(inode),
7079 @@ -1068,10 +1476,22 @@ static int nfs4_proc_link(struct inode *
7081 update_changeattr(dir, &cinfo);
7083 - return nfs4_map_errors(status);
7087 +static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name)
7089 + struct nfs4_exception exception = { };
7092 + err = nfs4_handle_exception(NFS_SERVER(inode),
7093 + _nfs4_proc_link(inode, dir, name),
7095 + } while (exception.retry);
7099 -static int nfs4_proc_symlink(struct inode *dir, struct qstr *name,
7100 +static int _nfs4_proc_symlink(struct inode *dir, struct qstr *name,
7101 struct qstr *path, struct iattr *sattr, struct nfs_fh *fhandle,
7102 struct nfs_fattr *fattr)
7104 @@ -1090,22 +1510,39 @@ static int nfs4_proc_symlink(struct inod
7107 struct rpc_message msg = {
7108 - .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE],
7109 + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SYMLINK],
7115 + if (path->len > NFS4_MAXPATHLEN)
7116 + return -ENAMETOOLONG;
7117 arg.u.symlink = path;
7120 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
7122 update_changeattr(dir, &res.dir_cinfo);
7123 - return nfs4_map_errors(status);
7127 -static int nfs4_proc_mkdir(struct inode *dir, struct qstr *name,
7128 +static int nfs4_proc_symlink(struct inode *dir, struct qstr *name,
7129 + struct qstr *path, struct iattr *sattr, struct nfs_fh *fhandle,
7130 + struct nfs_fattr *fattr)
7132 + struct nfs4_exception exception = { };
7135 + err = nfs4_handle_exception(NFS_SERVER(dir),
7136 + _nfs4_proc_symlink(dir, name, path, sattr,
7139 + } while (exception.retry);
7143 +static int _nfs4_proc_mkdir(struct inode *dir, struct qstr *name,
7144 struct iattr *sattr, struct nfs_fh *fhandle,
7145 struct nfs_fattr *fattr)
7147 @@ -1135,10 +1572,25 @@ static int nfs4_proc_mkdir(struct inode
7148 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
7150 update_changeattr(dir, &res.dir_cinfo);
7151 - return nfs4_map_errors(status);
7155 -static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
7156 +static int nfs4_proc_mkdir(struct inode *dir, struct qstr *name,
7157 + struct iattr *sattr, struct nfs_fh *fhandle,
7158 + struct nfs_fattr *fattr)
7160 + struct nfs4_exception exception = { };
7163 + err = nfs4_handle_exception(NFS_SERVER(dir),
7164 + _nfs4_proc_mkdir(dir, name, sattr,
7167 + } while (exception.retry);
7171 +static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
7172 u64 cookie, struct page *page, unsigned int count, int plus)
7174 struct inode *dir = dentry->d_inode;
7175 @@ -1164,10 +1616,24 @@ static int nfs4_proc_readdir(struct dent
7177 memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
7179 - return nfs4_map_errors(status);
7183 -static int nfs4_proc_mknod(struct inode *dir, struct qstr *name,
7184 +static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
7185 + u64 cookie, struct page *page, unsigned int count, int plus)
7187 + struct nfs4_exception exception = { };
7190 + err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode),
7191 + _nfs4_proc_readdir(dentry, cred, cookie,
7192 + page, count, plus),
7194 + } while (exception.retry);
7198 +static int _nfs4_proc_mknod(struct inode *dir, struct qstr *name,
7199 struct iattr *sattr, dev_t rdev, struct nfs_fh *fh,
7200 struct nfs_fattr *fattr)
7202 @@ -1214,10 +1680,25 @@ static int nfs4_proc_mknod(struct inode
7203 status = rpc_call_sync(NFS_CLIENT(dir), &msg, 0);
7205 update_changeattr(dir, &res.dir_cinfo);
7206 - return nfs4_map_errors(status);
7210 +static int nfs4_proc_mknod(struct inode *dir, struct qstr *name,
7211 + struct iattr *sattr, dev_t rdev, struct nfs_fh *fh,
7212 + struct nfs_fattr *fattr)
7214 + struct nfs4_exception exception = { };
7217 + err = nfs4_handle_exception(NFS_SERVER(dir),
7218 + _nfs4_proc_mknod(dir, name, sattr, rdev,
7221 + } while (exception.retry);
7225 -static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
7226 +static int _nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle,
7227 struct nfs_fsstat *fsstat)
7229 struct nfs4_statfs_arg args = {
7230 @@ -1231,10 +1712,22 @@ static int nfs4_proc_statfs(struct nfs_s
7233 fsstat->fattr->valid = 0;
7234 - return nfs4_map_errors(rpc_call_sync(server->client, &msg, 0));
7235 + return rpc_call_sync(server->client, &msg, 0);
7238 -static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
7239 +static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat)
7241 + struct nfs4_exception exception = { };
7244 + err = nfs4_handle_exception(server,
7245 + _nfs4_proc_statfs(server, fhandle, fsstat),
7247 + } while (exception.retry);
7251 +static int _nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle,
7252 struct nfs_fsinfo *fsinfo)
7254 struct nfs4_fsinfo_arg args = {
7255 @@ -1247,16 +1740,29 @@ static int nfs4_do_fsinfo(struct nfs_ser
7259 - return nfs4_map_errors(rpc_call_sync(server->client, &msg, 0));
7260 + return rpc_call_sync(server->client, &msg, 0);
7263 +static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
7265 + struct nfs4_exception exception = { };
7269 + err = nfs4_handle_exception(server,
7270 + _nfs4_do_fsinfo(server, fhandle, fsinfo),
7272 + } while (exception.retry);
7276 static int nfs4_proc_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo)
7278 fsinfo->fattr->valid = 0;
7279 - return nfs4_map_errors(nfs4_do_fsinfo(server, fhandle, fsinfo));
7280 + return nfs4_do_fsinfo(server, fhandle, fsinfo);
7283 -static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
7284 +static int _nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
7285 struct nfs_pathconf *pathconf)
7287 struct nfs4_pathconf_arg args = {
7288 @@ -1276,7 +1782,21 @@ static int nfs4_proc_pathconf(struct nfs
7291 pathconf->fattr->valid = 0;
7292 - return nfs4_map_errors(rpc_call_sync(server->client, &msg, 0));
7293 + return rpc_call_sync(server->client, &msg, 0);
7296 +static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
7297 + struct nfs_pathconf *pathconf)
7299 + struct nfs4_exception exception = { };
7303 + err = nfs4_handle_exception(server,
7304 + _nfs4_proc_pathconf(server, fhandle, pathconf),
7306 + } while (exception.retry);
7311 @@ -1467,8 +1987,10 @@ static int
7312 nfs4_proc_file_open(struct inode *inode, struct file *filp)
7314 struct dentry *dentry = filp->f_dentry;
7315 - struct nfs4_state *state;
7316 + struct nfs_open_context *ctx;
7317 + struct nfs4_state *state = NULL;
7318 struct rpc_cred *cred;
7319 + int status = -ENOMEM;
7321 dprintk("nfs4_proc_file_open: starting on (%.*s/%.*s)\n",
7322 (int)dentry->d_parent->d_name.len,
7323 @@ -1478,21 +2000,28 @@ nfs4_proc_file_open(struct inode *inode,
7325 /* Find our open stateid */
7326 cred = rpcauth_lookupcred(NFS_SERVER(inode)->client->cl_auth, 0);
7327 - state = nfs4_find_state(inode, cred, filp->f_mode);
7328 + if (unlikely(cred == NULL))
7330 + ctx = alloc_nfs_open_context(dentry, cred);
7332 - if (state == NULL) {
7333 - printk(KERN_WARNING "NFS: v4 raced in function %s\n", __FUNCTION__);
7334 - return -EIO; /* ERACE actually */
7336 + if (unlikely(ctx == NULL))
7338 + status = -EIO; /* ERACE actually */
7339 + state = nfs4_find_state(inode, cred, filp->f_mode);
7340 + if (unlikely(state == NULL))
7342 + ctx->state = state;
7343 nfs4_close_state(state, filp->f_mode);
7344 - if (filp->f_mode & FMODE_WRITE) {
7346 - nfs_set_mmcred(inode, state->owner->so_cred);
7347 + ctx->mode = filp->f_mode;
7348 + nfs_file_set_open_context(filp, ctx);
7349 + put_nfs_open_context(ctx);
7350 + if (filp->f_mode & FMODE_WRITE)
7351 nfs_begin_data_update(inode);
7354 - filp->private_data = state;
7357 + printk(KERN_WARNING "NFS: v4 raced in function %s\n", __FUNCTION__);
7358 + put_nfs_open_context(ctx);
7363 @@ -1501,35 +2030,148 @@ nfs4_proc_file_open(struct inode *inode,
7365 nfs4_proc_file_release(struct inode *inode, struct file *filp)
7367 - struct nfs4_state *state = (struct nfs4_state *)filp->private_data;
7370 - nfs4_close_state(state, filp->f_mode);
7371 - if (filp->f_mode & FMODE_WRITE) {
7373 + if (filp->f_mode & FMODE_WRITE)
7374 nfs_end_data_update(inode);
7377 + nfs_file_clear_open_context(filp);
7382 - * Set up the nfspage struct with the right state info and credentials
7385 +nfs4_read_acl_attr(struct inode *inode, char *buf, ssize_t buflen)
7387 + struct nfs_inode *nfsi = NFS_I(inode);
7390 + spin_lock(&inode->i_lock);
7391 + if (buf == NULL && nfsi->acl_len)
7394 + if (nfsi->acl_len == 0)
7396 + ret = -ERANGE; /* see getxattr(2) man page */
7397 + if (nfsi->acl_len > buflen)
7399 + memcpy(buf, nfsi->acl, nfsi->acl_len);
7401 + ret = nfsi->acl_len;
7403 + spin_unlock(&inode->i_lock);
7408 -nfs4_request_init(struct nfs_page *req, struct file *filp)
7409 +nfs4_set_acl_attr(struct inode *inode, char *buf, ssize_t buflen)
7411 - struct nfs4_state *state;
7412 + struct nfs_inode *nfsi = NFS_I(inode);
7415 - req->wb_cred = get_rpccred(NFS_I(req->wb_inode)->mm_cred);
7416 - req->wb_state = NULL;
7418 + spin_lock(&inode->i_lock);
7421 + nfsi->acl_len = buflen;
7422 + spin_unlock(&inode->i_lock);
7426 +nfs4_write_acl_attr(struct inode *inode, const char *buf, ssize_t buflen)
7428 + void *abuf = NULL;
7430 + if (buflen > PAGE_SIZE)
7432 + abuf = kmalloc(buflen, GFP_KERNEL);
7435 + memcpy(abuf, buf, buflen);
7436 + nfs4_set_acl_attr(inode, abuf, buflen);
7439 + nfs4_set_acl_attr(inode, NULL, 0);
7444 +nfs4_zap_acl_attr(struct inode *inode)
7446 + nfs4_set_acl_attr(inode, NULL, 0);
7450 +nfs4_server_supports_acls(struct nfs_server *server)
7452 + return (server->caps & NFS_CAP_ACLS)
7453 + && (server->acl_bitmask & ACL4_SUPPORT_ALLOW_ACL)
7454 + && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
7458 +nfs4_proc_get_acl(struct inode *inode, void *buf, ssize_t buflen)
7460 + struct nfs_server *server = NFS_SERVER(inode);
7461 + struct nfs_getaclres res = {
7463 + .acl_len = buflen,
7466 + struct rpc_message msg = {
7467 + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETACL],
7468 + .rpc_argp = NFS_FH(inode),
7473 + if (!nfs4_server_supports_acls(server))
7474 + return -EOPNOTSUPP;
7476 + ret = nfs_revalidate_inode(NFS_SERVER(inode), inode);
7479 + ret = nfs4_read_acl_attr(inode, buf, buflen);
7480 + if (ret == -ENOENT) {
7481 + ret = rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
7483 + nfs4_write_acl_attr(inode, res.acl, res.acl_len);
7484 + ret = res.acl_len;
7486 + if (res.acl != buf) {
7487 + /* xdr decode allocated the memory: */
7491 - state = (struct nfs4_state *)filp->private_data;
7492 - req->wb_state = state;
7493 - req->wb_cred = get_rpccred(state->owner->so_cred);
7494 - req->wb_lockowner = current->files;
7501 +nfs4_proc_set_acl(struct inode *inode, const void *buf, ssize_t buflen)
7503 + struct nfs_server *server = NFS_SERVER(inode);
7504 + struct nfs_setaclargs arg = {
7505 + .fh = NFS_FH(inode),
7508 + .acl_len = buflen,
7510 + struct rpc_message msg = {
7511 + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETACL],
7517 + if (!nfs4_server_supports_acls(server))
7518 + return -EOPNOTSUPP;
7520 + /* XXX: should check for buflen too large? */
7523 + ret = rpc_call_sync(NFS_SERVER(inode)->client, &msg, 0);
7527 + nfs4_write_acl_attr(inode, buf, buflen);
7533 @@ -1545,11 +2187,13 @@ nfs4_async_handle_error(struct rpc_task
7534 case -NFS4ERR_EXPIRED:
7535 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL, NULL);
7536 nfs4_schedule_state_recovery(clp);
7537 + if (test_bit(NFS4CLNT_OK, &clp->cl_state))
7538 + rpc_wake_up_task(task);
7539 task->tk_status = 0;
7541 case -NFS4ERR_GRACE:
7542 case -NFS4ERR_DELAY:
7543 - rpc_delay(task, NFS4_POLL_RETRY_TIME);
7544 + rpc_delay(task, NFS4_POLL_RETRY_MAX);
7545 task->tk_status = 0;
7547 case -NFS4ERR_OLD_STATEID:
7548 @@ -1560,12 +2204,11 @@ nfs4_async_handle_error(struct rpc_task
7553 -nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs4_client *clp)
7554 +int nfs4_wait_clnt_recover(struct rpc_clnt *clnt, struct nfs4_client *clp)
7558 - int interruptible, res;
7559 + int interruptible, res = 0;
7563 @@ -1573,101 +2216,85 @@ nfs4_wait_clnt_recover(struct rpc_clnt *
7564 interruptible = TASK_UNINTERRUPTIBLE;
7566 interruptible = TASK_INTERRUPTIBLE;
7569 - prepare_to_wait(&clp->cl_waitq, &wait, interruptible);
7570 - nfs4_schedule_state_recovery(clp);
7571 - if (test_bit(NFS4CLNT_OK, &clp->cl_state) &&
7572 - !test_bit(NFS4CLNT_SETUP_STATE, &clp->cl_state))
7574 - if (clnt->cl_intr && signalled()) {
7575 - res = -ERESTARTSYS;
7578 + prepare_to_wait(&clp->cl_waitq, &wait, interruptible);
7579 + nfs4_schedule_state_recovery(clp);
7580 + if (clnt->cl_intr && signalled())
7581 + res = -ERESTARTSYS;
7582 + else if (!test_bit(NFS4CLNT_OK, &clp->cl_state))
7584 - } while(!test_bit(NFS4CLNT_OK, &clp->cl_state));
7585 finish_wait(&clp->cl_waitq, &wait);
7586 rpc_clnt_sigunmask(clnt, &oldset);
7591 -nfs4_delay(struct rpc_clnt *clnt)
7592 +static int nfs4_delay(struct rpc_clnt *clnt, long *timeout)
7599 + if (*timeout <= 0)
7600 + *timeout = NFS4_POLL_RETRY_MIN;
7601 + if (*timeout > NFS4_POLL_RETRY_MAX)
7602 + *timeout = NFS4_POLL_RETRY_MAX;
7603 rpc_clnt_sigmask(clnt, &oldset);
7604 if (clnt->cl_intr) {
7605 set_current_state(TASK_INTERRUPTIBLE);
7606 - schedule_timeout(NFS4_POLL_RETRY_TIME);
7607 + schedule_timeout(*timeout);
7611 set_current_state(TASK_UNINTERRUPTIBLE);
7612 - schedule_timeout(NFS4_POLL_RETRY_TIME);
7613 + schedule_timeout(*timeout);
7615 rpc_clnt_sigunmask(clnt, &oldset);
7620 /* This is the error handling routine for processes that are allowed
7624 -nfs4_handle_error(struct nfs_server *server, int errorcode)
7625 +int nfs4_handle_exception(struct nfs_server *server, int errorcode, struct nfs4_exception *exception)
7627 struct nfs4_client *clp = server->nfs4_state;
7628 int ret = errorcode;
7630 + exception->retry = 0;
7634 case -NFS4ERR_STALE_CLIENTID:
7635 case -NFS4ERR_STALE_STATEID:
7636 case -NFS4ERR_EXPIRED:
7637 ret = nfs4_wait_clnt_recover(server->client, clp);
7639 + exception->retry = 1;
7641 case -NFS4ERR_GRACE:
7642 case -NFS4ERR_DELAY:
7643 - ret = nfs4_delay(server->client);
7644 + ret = nfs4_delay(server->client, &exception->timeout);
7646 + exception->retry = 1;
7648 case -NFS4ERR_OLD_STATEID:
7651 + exception->retry = 1;
7653 /* We failed to handle the error */
7654 return nfs4_map_errors(ret);
7659 -nfs4_request_compatible(struct nfs_page *req, struct file *filp, struct page *page)
7661 - struct nfs4_state *state = NULL;
7662 - struct rpc_cred *cred = NULL;
7664 - if (req->wb_file != filp)
7666 - if (req->wb_page != page)
7668 - state = (struct nfs4_state *)filp->private_data;
7669 - if (req->wb_state != state)
7671 - if (req->wb_lockowner != current->files)
7673 - cred = state->owner->so_cred;
7674 - if (req->wb_cred != cred)
7680 -nfs4_proc_setclientid(struct nfs4_client *clp,
7681 - u32 program, unsigned short port)
7682 +int nfs4_proc_setclientid(struct nfs4_client *clp, u32 program, unsigned short port)
7685 - struct nfs4_setclientid setclientid;
7686 - struct timespec tv;
7687 + static nfs4_verifier sc_verifier;
7688 + static int initialized;
7690 + struct nfs4_setclientid setclientid = {
7691 + .sc_verifier = &sc_verifier,
7692 + .sc_prog = program,
7694 struct rpc_message msg = {
7695 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETCLIENTID],
7696 .rpc_argp = &setclientid,
7697 @@ -1675,15 +2302,24 @@ nfs4_proc_setclientid(struct nfs4_client
7698 .rpc_cred = clp->cl_cred,
7701 - tv = CURRENT_TIME;
7702 - p = (u32*)setclientid.sc_verifier.data;
7703 - *p++ = (u32)tv.tv_sec;
7704 - *p = (u32)tv.tv_nsec;
7705 - setclientid.sc_name = clp->cl_ipaddr;
7706 - sprintf(setclientid.sc_netid, "tcp");
7707 - sprintf(setclientid.sc_uaddr, "%s.%d.%d", clp->cl_ipaddr, port >> 8, port & 255);
7708 - setclientid.sc_prog = htonl(program);
7709 - setclientid.sc_cb_ident = 0;
7710 + if (!initialized) {
7711 + struct timespec boot_time;
7715 + boot_time = CURRENT_TIME;
7716 + p = (u32*)sc_verifier.data;
7717 + *p++ = htonl((u32)boot_time.tv_sec);
7718 + *p = htonl((u32)boot_time.tv_nsec);
7720 + setclientid.sc_name_len = scnprintf(setclientid.sc_name,
7721 + sizeof(setclientid.sc_name), "%s/%u.%u.%u.%u",
7722 + clp->cl_ipaddr, NIPQUAD(clp->cl_addr.s_addr));
7723 + setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
7724 + sizeof(setclientid.sc_netid), "tcp");
7725 + setclientid.sc_uaddr_len = scnprintf(setclientid.sc_uaddr,
7726 + sizeof(setclientid.sc_uaddr), "%s.%d.%d",
7727 + clp->cl_ipaddr, port >> 8, port & 255);
7729 return rpc_call_sync(clp->cl_rpcclient, &msg, 0);
7731 @@ -1712,6 +2348,40 @@ nfs4_proc_setclientid_confirm(struct nfs
7735 +static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid)
7737 + struct nfs4_delegreturnargs args = {
7738 + .fhandle = NFS_FH(inode),
7739 + .stateid = stateid,
7741 + struct rpc_message msg = {
7742 + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DELEGRETURN],
7743 + .rpc_argp = &args,
7747 + return rpc_call_sync(NFS_CLIENT(inode), &msg, 0);
7750 +int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid)
7752 + struct nfs_server *server = NFS_SERVER(inode);
7753 + struct nfs4_exception exception = { };
7756 + err = _nfs4_proc_delegreturn(inode, cred, stateid);
7758 + case -NFS4ERR_STALE_STATEID:
7759 + case -NFS4ERR_EXPIRED:
7760 + nfs4_schedule_state_recovery(server->nfs4_state);
7764 + err = nfs4_handle_exception(server, err, &exception);
7765 + } while (exception.retry);
7769 #define NFS4_LOCK_MINTIMEOUT (1 * HZ)
7770 #define NFS4_LOCK_MAXTIMEOUT (30 * HZ)
7772 @@ -1753,8 +2423,7 @@ nfs4_lck_length(struct file_lock *reques
7773 return request->fl_end - request->fl_start + 1;
7777 -nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7778 +static int _nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7780 struct inode *inode = state->inode;
7781 struct nfs_server *server = NFS_SERVER(inode);
7782 @@ -1778,9 +2447,10 @@ nfs4_proc_getlk(struct nfs4_state *state
7783 struct nfs4_lock_state *lsp;
7786 + down_read(&clp->cl_sem);
7787 nlo.clientid = clp->cl_clientid;
7788 down(&state->lock_sema);
7789 - lsp = nfs4_find_lock_state(state, request->fl_owner);
7790 + lsp = nfs4_find_lock_state(state, request->fl_pid);
7792 nlo.id = lsp->ls_id;
7794 @@ -1811,14 +2481,28 @@ nfs4_proc_getlk(struct nfs4_state *state
7796 nfs4_put_lock_state(lsp);
7797 up(&state->lock_sema);
7798 - return nfs4_map_errors(status);
7799 + up_read(&clp->cl_sem);
7804 -nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
7805 +static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7807 + struct nfs4_exception exception = { };
7811 + err = nfs4_handle_exception(NFS_SERVER(state->inode),
7812 + _nfs4_proc_getlk(state, cmd, request),
7814 + } while (exception.retry);
7818 +static int _nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
7820 struct inode *inode = state->inode;
7821 struct nfs_server *server = NFS_SERVER(inode);
7822 + struct nfs4_client *clp = server->nfs4_state;
7823 struct nfs_lockargs arg = {
7824 .fh = NFS_FH(inode),
7825 .type = nfs4_lck_type(cmd, request),
7826 @@ -1838,29 +2522,46 @@ nfs4_proc_unlck(struct nfs4_state *state
7827 struct nfs_locku_opargs luargs;
7830 + down_read(&clp->cl_sem);
7831 down(&state->lock_sema);
7832 - lsp = nfs4_find_lock_state(state, request->fl_owner);
7833 + lsp = nfs4_find_lock_state(state, request->fl_pid);
7836 - luargs.seqid = lsp->ls_seqid;
7837 - memcpy(&luargs.stateid, &lsp->ls_stateid, sizeof(luargs.stateid));
7838 - arg.u.locku = &luargs;
7839 - status = rpc_call_sync(server->client, &msg, 0);
7840 - nfs4_increment_lock_seqid(status, lsp);
7841 + /* We might have lost the locks! */
7842 + if ((lsp->flags & NFS_LOCK_INITIALIZED) != 0) {
7843 + luargs.seqid = lsp->ls_seqid;
7844 + memcpy(&luargs.stateid, &lsp->ls_stateid, sizeof(luargs.stateid));
7845 + arg.u.locku = &luargs;
7846 + status = rpc_call_sync(server->client, &msg, 0);
7847 + nfs4_increment_lock_seqid(status, lsp);
7851 memcpy(&lsp->ls_stateid, &res.u.stateid,
7852 sizeof(lsp->ls_stateid));
7853 - nfs4_notify_unlck(inode, request, lsp);
7854 + nfs4_notify_unlck(state, request, lsp);
7856 nfs4_put_lock_state(lsp);
7858 up(&state->lock_sema);
7859 - return nfs4_map_errors(status);
7860 + up_read(&clp->cl_sem);
7865 -nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7866 +static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *request)
7868 + struct nfs4_exception exception = { };
7872 + err = nfs4_handle_exception(NFS_SERVER(state->inode),
7873 + _nfs4_proc_unlck(state, cmd, request),
7875 + } while (exception.retry);
7879 +static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *request, int reclaim)
7881 struct inode *inode = state->inode;
7882 struct nfs_server *server = NFS_SERVER(inode);
7883 @@ -1881,23 +2582,22 @@ nfs4_proc_setlk(struct nfs4_state *state
7884 .rpc_cred = state->owner->so_cred,
7886 struct nfs_lock_opargs largs = {
7887 + .reclaim = reclaim,
7888 .new_lock_owner = 0,
7892 - down(&state->lock_sema);
7893 - lsp = nfs4_find_lock_state(state, request->fl_owner);
7894 - if (lsp == NULL) {
7895 + lsp = nfs4_get_lock_state(state, request->fl_pid);
7898 + if (!(lsp->flags & NFS_LOCK_INITIALIZED)) {
7899 struct nfs4_state_owner *owner = state->owner;
7900 struct nfs_open_to_lock otl = {
7902 .clientid = server->nfs4_state->cl_clientid,
7906 - lsp = nfs4_alloc_lock_state(state, request->fl_owner);
7910 otl.lock_seqid = lsp->ls_seqid;
7911 otl.lock_owner.id = lsp->ls_id;
7912 memcpy(&otl.open_stateid, &state->stateid, sizeof(otl.open_stateid));
7913 @@ -1926,25 +2626,60 @@ nfs4_proc_setlk(struct nfs4_state *state
7914 /* save the returned stateid. */
7916 memcpy(&lsp->ls_stateid, &res.u.stateid, sizeof(nfs4_stateid));
7917 - nfs4_notify_setlk(inode, request, lsp);
7919 + nfs4_notify_setlk(state, request, lsp);
7920 } else if (status == -NFS4ERR_DENIED)
7922 nfs4_put_lock_state(lsp);
7927 +int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request)
7930 + return _nfs4_do_setlk(state, F_SETLK64, request, 1);
7932 + return _nfs4_do_setlk(state, F_SETLK, request, 1);
7936 +static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7938 + struct nfs4_client *clp = state->owner->so_client;
7941 + down_read(&clp->cl_sem);
7942 + down(&state->lock_sema);
7943 + status = _nfs4_do_setlk(state, cmd, request, 0);
7944 up(&state->lock_sema);
7945 - return nfs4_map_errors(status);
7946 + up_read(&clp->cl_sem);
7950 +static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
7952 + struct nfs4_exception exception = { };
7956 + err = nfs4_handle_exception(NFS_SERVER(state->inode),
7957 + _nfs4_proc_setlk(state, cmd, request),
7959 + } while (exception.retry);
7964 nfs4_proc_lock(struct file *filp, int cmd, struct file_lock *request)
7966 + struct nfs_open_context *ctx;
7967 struct nfs4_state *state;
7968 unsigned long timeout = NFS4_LOCK_MINTIMEOUT;
7971 /* verify open state */
7972 - state = (struct nfs4_state *)filp->private_data;
7974 + ctx = (struct nfs_open_context *)filp->private_data;
7975 + state = ctx->state;
7977 if (request->fl_start < 0 || request->fl_end < 0)
7979 @@ -1975,6 +2710,7 @@ struct nfs_rpc_ops nfs_v4_clientops = {
7980 .version = 4, /* protocol version */
7981 .dentry_ops = &nfs4_dentry_operations,
7982 .dir_inode_ops = &nfs4_dir_inode_operations,
7983 + .file_inode_ops = &nfs4_file_inode_operations,
7984 .getroot = nfs4_proc_get_root,
7985 .getattr = nfs4_proc_getattr,
7986 .setattr = nfs4_proc_setattr,
7987 @@ -2004,8 +2740,6 @@ struct nfs_rpc_ops nfs_v4_clientops = {
7988 .commit_setup = nfs4_proc_commit_setup,
7989 .file_open = nfs4_proc_file_open,
7990 .file_release = nfs4_proc_file_release,
7991 - .request_init = nfs4_request_init,
7992 - .request_compatible = nfs4_request_compatible,
7993 .lock = nfs4_proc_lock,
7996 --- linux-2.6.7/fs/nfs/callback.h.lsec 2005-03-23 14:28:22.484631512 -0700
7997 +++ linux-2.6.7/fs/nfs/callback.h 2005-03-23 14:28:22.484631512 -0700
8000 + * linux/fs/nfs/callback.h
8002 + * Copyright (C) 2004 Trond Myklebust
8004 + * NFSv4 callback definitions
8006 +#ifndef __LINUX_FS_NFS_CALLBACK_H
8007 +#define __LINUX_FS_NFS_CALLBACK_H
8009 +#define NFS4_CALLBACK 0x40000000
8010 +#define NFS4_CALLBACK_XDRSIZE 2048
8011 +#define NFS4_CALLBACK_BUFSIZE (1024 + NFS4_CALLBACK_XDRSIZE)
8013 +enum nfs4_callback_procnum {
8018 +enum nfs4_callback_opnum {
8019 + OP_CB_GETATTR = 3,
8021 + OP_CB_ILLEGAL = 10044,
8024 +struct cb_compound_hdr_arg {
8027 + unsigned int callback_ident;
8031 +struct cb_compound_hdr_res {
8038 +struct cb_getattrargs {
8039 + struct sockaddr_in *addr;
8041 + uint32_t bitmap[2];
8044 +struct cb_getattrres {
8046 + uint32_t bitmap[2];
8048 + uint64_t change_attr;
8049 + struct timespec ctime;
8050 + struct timespec mtime;
8053 +struct cb_recallargs {
8054 + struct sockaddr_in *addr;
8056 + nfs4_stateid stateid;
8057 + uint32_t truncate;
8060 +extern unsigned nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res);
8061 +extern unsigned nfs4_callback_recall(struct cb_recallargs *args, void *dummy);
8063 +extern int nfs_callback_up(void);
8064 +extern int nfs_callback_down(void);
8066 +extern unsigned short nfs_callback_tcpport;
8068 +#endif /* __LINUX_FS_NFS_CALLBACK_H */
8069 --- linux-2.6.7/fs/nfs/direct.c.lsec 2004-06-15 23:19:53.000000000 -0600
8070 +++ linux-2.6.7/fs/nfs/direct.c 2005-03-23 14:28:22.702598376 -0700
8071 @@ -110,7 +110,7 @@ nfs_free_user_pages(struct page **pages,
8072 * nfs_direct_read_seg - Read in one iov segment. Generate separate
8073 * read RPCs for each "rsize" bytes.
8074 * @inode: target inode
8075 - * @file: target file (may be NULL)
8076 + * @ctx: target file open context
8077 * user_addr: starting address of this segment of user's buffer
8078 * count: size of this segment
8079 * file_offset: offset in file to begin the operation
8080 @@ -118,7 +118,7 @@ nfs_free_user_pages(struct page **pages,
8081 * nr_pages: size of pages array
8084 -nfs_direct_read_seg(struct inode *inode, struct file *file,
8085 +nfs_direct_read_seg(struct inode *inode, struct nfs_open_context *ctx,
8086 unsigned long user_addr, size_t count, loff_t file_offset,
8087 struct page **pages, int nr_pages)
8089 @@ -127,9 +127,10 @@ nfs_direct_read_seg(struct inode *inode,
8091 struct nfs_read_data rdata = {
8093 + .cred = ctx->cred,
8095 .fh = NFS_FH(inode),
8096 - .lockowner = current->files,
8100 .fattr = &rdata.fattr,
8101 @@ -151,7 +152,7 @@ nfs_direct_read_seg(struct inode *inode,
8102 user_addr + tot_bytes, rdata.args.pgbase, curpage);
8105 - result = NFS_PROTO(inode)->read(&rdata, file);
8106 + result = NFS_PROTO(inode)->read(&rdata);
8110 @@ -183,7 +184,7 @@ nfs_direct_read_seg(struct inode *inode,
8111 * nfs_direct_read - For each iov segment, map the user's buffer
8112 * then generate read RPCs.
8113 * @inode: target inode
8114 - * @file: target file (may be NULL)
8115 + * @ctx: target file open context
8116 * @iov: array of vectors that define I/O buffer
8117 * file_offset: offset in file to begin the operation
8118 * nr_segs: size of iovec array
8119 @@ -193,7 +194,7 @@ nfs_direct_read_seg(struct inode *inode,
8123 -nfs_direct_read(struct inode *inode, struct file *file,
8124 +nfs_direct_read(struct inode *inode, struct nfs_open_context *ctx,
8125 const struct iovec *iov, loff_t file_offset,
8126 unsigned long nr_segs)
8128 @@ -216,7 +217,7 @@ nfs_direct_read(struct inode *inode, str
8132 - result = nfs_direct_read_seg(inode, file, user_addr, size,
8133 + result = nfs_direct_read_seg(inode, ctx, user_addr, size,
8134 file_offset, pages, page_count);
8136 nfs_free_user_pages(pages, page_count, 1);
8137 @@ -239,7 +240,7 @@ nfs_direct_read(struct inode *inode, str
8138 * nfs_direct_write_seg - Write out one iov segment. Generate separate
8139 * write RPCs for each "wsize" bytes, then commit.
8140 * @inode: target inode
8141 - * @file: target file (may be NULL)
8142 + * @ctx: target file open context
8143 * user_addr: starting address of this segment of user's buffer
8144 * count: size of this segment
8145 * file_offset: offset in file to begin the operation
8146 @@ -247,7 +248,7 @@ nfs_direct_read(struct inode *inode, str
8147 * nr_pages: size of pages array
8150 -nfs_direct_write_seg(struct inode *inode, struct file *file,
8151 +nfs_direct_write_seg(struct inode *inode, struct nfs_open_context *ctx,
8152 unsigned long user_addr, size_t count, loff_t file_offset,
8153 struct page **pages, int nr_pages)
8155 @@ -257,9 +258,10 @@ nfs_direct_write_seg(struct inode *inode
8156 struct nfs_writeverf first_verf;
8157 struct nfs_write_data wdata = {
8159 + .cred = ctx->cred,
8161 .fh = NFS_FH(inode),
8162 - .lockowner = current->files,
8166 .fattr = &wdata.fattr,
8167 @@ -290,7 +292,7 @@ retry:
8168 user_addr + tot_bytes, wdata.args.pgbase, curpage);
8171 - result = NFS_PROTO(inode)->write(&wdata, file);
8172 + result = NFS_PROTO(inode)->write(&wdata);
8176 @@ -325,7 +327,7 @@ retry:
8177 wdata.args.offset = file_offset;
8180 - result = NFS_PROTO(inode)->commit(&wdata, file);
8181 + result = NFS_PROTO(inode)->commit(&wdata);
8184 if (result < 0 || memcmp(&first_verf.verifier,
8185 @@ -349,7 +351,7 @@ sync_retry:
8186 * nfs_direct_write - For each iov segment, map the user's buffer
8187 * then generate write and commit RPCs.
8188 * @inode: target inode
8189 - * @file: target file (may be NULL)
8190 + * @ctx: target file open context
8191 * @iov: array of vectors that define I/O buffer
8192 * file_offset: offset in file to begin the operation
8193 * nr_segs: size of iovec array
8194 @@ -358,8 +360,7 @@ sync_retry:
8195 * that non-direct readers might access, so they will pick up these
8196 * writes immediately.
8199 -nfs_direct_write(struct inode *inode, struct file *file,
8200 +static int nfs_direct_write(struct inode *inode, struct nfs_open_context *ctx,
8201 const struct iovec *iov, loff_t file_offset,
8202 unsigned long nr_segs)
8204 @@ -382,7 +383,7 @@ nfs_direct_write(struct inode *inode, st
8208 - result = nfs_direct_write_seg(inode, file, user_addr, size,
8209 + result = nfs_direct_write_seg(inode, ctx, user_addr, size,
8210 file_offset, pages, page_count);
8211 nfs_free_user_pages(pages, page_count, 0);
8213 @@ -414,6 +415,7 @@ nfs_direct_IO(int rw, struct kiocb *iocb
8215 ssize_t result = -EINVAL;
8216 struct file *file = iocb->ki_filp;
8217 + struct nfs_open_context *ctx;
8218 struct dentry *dentry = file->f_dentry;
8219 struct inode *inode = dentry->d_inode;
8221 @@ -423,19 +425,20 @@ nfs_direct_IO(int rw, struct kiocb *iocb
8222 if (!is_sync_kiocb(iocb))
8225 + ctx = (struct nfs_open_context *)file->private_data;
8228 dprintk("NFS: direct_IO(read) (%s) off/no(%Lu/%lu)\n",
8229 dentry->d_name.name, file_offset, nr_segs);
8231 - result = nfs_direct_read(inode, file, iov,
8232 + result = nfs_direct_read(inode, ctx, iov,
8233 file_offset, nr_segs);
8236 dprintk("NFS: direct_IO(write) (%s) off/no(%Lu/%lu)\n",
8237 dentry->d_name.name, file_offset, nr_segs);
8239 - result = nfs_direct_write(inode, file, iov,
8240 + result = nfs_direct_write(inode, ctx, iov,
8241 file_offset, nr_segs);
8244 @@ -471,6 +474,8 @@ nfs_file_direct_read(struct kiocb *iocb,
8245 ssize_t retval = -EINVAL;
8246 loff_t *ppos = &iocb->ki_pos;
8247 struct file *file = iocb->ki_filp;
8248 + struct nfs_open_context *ctx =
8249 + (struct nfs_open_context *) file->private_data;
8250 struct dentry *dentry = file->f_dentry;
8251 struct address_space *mapping = file->f_mapping;
8252 struct inode *inode = mapping->host;
8253 @@ -502,7 +507,7 @@ nfs_file_direct_read(struct kiocb *iocb,
8257 - retval = nfs_direct_read(inode, file, &iov, pos, 1);
8258 + retval = nfs_direct_read(inode, ctx, &iov, pos, 1);
8260 *ppos = pos + retval;
8262 @@ -542,6 +547,8 @@ nfs_file_direct_write(struct kiocb *iocb
8263 loff_t *ppos = &iocb->ki_pos;
8264 unsigned long limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
8265 struct file *file = iocb->ki_filp;
8266 + struct nfs_open_context *ctx =
8267 + (struct nfs_open_context *) file->private_data;
8268 struct dentry *dentry = file->f_dentry;
8269 struct address_space *mapping = file->f_mapping;
8270 struct inode *inode = mapping->host;
8271 @@ -589,7 +596,7 @@ nfs_file_direct_write(struct kiocb *iocb
8275 - retval = nfs_direct_write(inode, file, &iov, pos, 1);
8276 + retval = nfs_direct_write(inode, ctx, &iov, pos, 1);
8277 if (mapping->nrpages)
8278 invalidate_inode_pages2(mapping);
8280 --- linux-2.6.7/fs/nfs/nfs4state.c.lsec 2004-06-15 23:18:47.000000000 -0600
8281 +++ linux-2.6.7/fs/nfs/nfs4state.c 2005-03-23 14:28:22.939562352 -0700
8284 #include <linux/config.h>
8285 #include <linux/slab.h>
8286 +#include <linux/smp_lock.h>
8287 #include <linux/nfs_fs.h>
8288 #include <linux/nfs_idmap.h>
8289 #include <linux/workqueue.h>
8290 #include <linux/bitops.h>
8292 +#include "callback.h"
8293 +#include "delegation.h"
8295 #define OPENOWNER_POOL_SIZE 8
8297 static spinlock_t state_spinlock = SPIN_LOCK_UNLOCKED;
8298 @@ -93,21 +97,26 @@ nfs4_alloc_client(struct in_addr *addr)
8300 struct nfs4_client *clp;
8302 - if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL))) {
8303 - memset(clp, 0, sizeof(*clp));
8304 - memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));
8305 - init_rwsem(&clp->cl_sem);
8306 - INIT_LIST_HEAD(&clp->cl_state_owners);
8307 - INIT_LIST_HEAD(&clp->cl_unused);
8308 - spin_lock_init(&clp->cl_lock);
8309 - atomic_set(&clp->cl_count, 1);
8310 - INIT_WORK(&clp->cl_recoverd, nfs4_recover_state, clp);
8311 - INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
8312 - INIT_LIST_HEAD(&clp->cl_superblocks);
8313 - init_waitqueue_head(&clp->cl_waitq);
8314 - rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client");
8315 - clp->cl_state = 1 << NFS4CLNT_NEW;
8316 + if (nfs_callback_up() < 0)
8318 + if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL)) == NULL) {
8319 + nfs_callback_down();
8322 + memset(clp, 0, sizeof(*clp));
8323 + memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));
8324 + init_rwsem(&clp->cl_sem);
8325 + INIT_LIST_HEAD(&clp->cl_delegations);
8326 + INIT_LIST_HEAD(&clp->cl_state_owners);
8327 + INIT_LIST_HEAD(&clp->cl_unused);
8328 + spin_lock_init(&clp->cl_lock);
8329 + atomic_set(&clp->cl_count, 1);
8330 + INIT_WORK(&clp->cl_recoverd, nfs4_recover_state, clp);
8331 + INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
8332 + INIT_LIST_HEAD(&clp->cl_superblocks);
8333 + init_waitqueue_head(&clp->cl_waitq);
8334 + rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client");
8335 + clp->cl_state = 1 << NFS4CLNT_OK;
8339 @@ -130,25 +139,52 @@ nfs4_free_client(struct nfs4_client *clp
8340 if (clp->cl_rpcclient)
8341 rpc_shutdown_client(clp->cl_rpcclient);
8343 + nfs_callback_down();
8346 +static struct nfs4_client *__nfs4_find_client(struct in_addr *addr)
8348 + struct nfs4_client *clp;
8349 + list_for_each_entry(clp, &nfs4_clientid_list, cl_servers) {
8350 + if (memcmp(&clp->cl_addr, addr, sizeof(clp->cl_addr)) == 0) {
8351 + atomic_inc(&clp->cl_count);
8358 +struct nfs4_client *nfs4_find_client(struct in_addr *addr)
8360 + struct nfs4_client *clp;
8361 + spin_lock(&state_spinlock);
8362 + clp = __nfs4_find_client(addr);
8363 + spin_unlock(&state_spinlock);
8367 struct nfs4_client *
8368 nfs4_get_client(struct in_addr *addr)
8370 - struct nfs4_client *new, *clp = NULL;
8371 + struct nfs4_client *clp, *new = NULL;
8373 - new = nfs4_alloc_client(addr);
8374 spin_lock(&state_spinlock);
8375 - list_for_each_entry(clp, &nfs4_clientid_list, cl_servers) {
8376 - if (memcmp(&clp->cl_addr, addr, sizeof(clp->cl_addr)) == 0)
8379 + clp = __nfs4_find_client(addr);
8383 + if (clp != NULL) {
8384 + list_add(&clp->cl_servers, &nfs4_clientid_list);
8388 + spin_unlock(&state_spinlock);
8389 + new = nfs4_alloc_client(addr);
8390 + spin_lock(&state_spinlock);
8395 - list_add(&new->cl_servers, &nfs4_clientid_list);
8396 - spin_unlock(&state_spinlock);
8399 - atomic_inc(&clp->cl_count);
8400 spin_unlock(&state_spinlock);
8402 nfs4_free_client(new);
8403 @@ -169,6 +205,16 @@ nfs4_put_client(struct nfs4_client *clp)
8404 nfs4_free_client(clp);
8407 +int nfs4_init_client(struct nfs4_client *clp)
8409 + int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, nfs_callback_tcpport);
8411 + status = nfs4_proc_setclientid_confirm(clp);
8413 + nfs4_schedule_state_renewal(clp);
8418 nfs4_alloc_lockowner_id(struct nfs4_client *clp)
8420 @@ -185,7 +231,6 @@ nfs4_client_grab_unused(struct nfs4_clie
8421 atomic_inc(&sp->so_count);
8423 list_move(&sp->so_list, &clp->cl_state_owners);
8424 - sp->so_generation = clp->cl_generation;
8428 @@ -224,6 +269,7 @@ nfs4_alloc_state_owner(void)
8429 init_MUTEX(&sp->so_sema);
8430 sp->so_seqid = 0; /* arbitrary */
8431 INIT_LIST_HEAD(&sp->so_states);
8432 + INIT_LIST_HEAD(&sp->so_delegations);
8433 atomic_set(&sp->so_count, 1);
8436 @@ -237,8 +283,11 @@ nfs4_unhash_state_owner(struct nfs4_stat
8437 spin_unlock(&clp->cl_lock);
8440 -struct nfs4_state_owner *
8441 -nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
8443 + * Note: must be called with clp->cl_sem held in order to prevent races
8444 + * with reboot recovery!
8446 +struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
8448 struct nfs4_client *clp = server->nfs4_state;
8449 struct nfs4_state_owner *sp, *new;
8450 @@ -254,23 +303,23 @@ nfs4_get_state_owner(struct nfs_server *
8451 new->so_client = clp;
8452 new->so_id = nfs4_alloc_lockowner_id(clp);
8453 new->so_cred = cred;
8454 - new->so_generation = clp->cl_generation;
8458 spin_unlock(&clp->cl_lock);
8462 - if (!test_bit(NFS4CLNT_OK, &clp->cl_state))
8463 - nfs4_wait_clnt_recover(server->client, clp);
8465 - put_rpccred(cred);
8469 + put_rpccred(cred);
8474 -nfs4_put_state_owner(struct nfs4_state_owner *sp)
8476 + * Must be called with clp->cl_sem held in order to avoid races
8477 + * with state recovery...
8479 +void nfs4_put_state_owner(struct nfs4_state_owner *sp)
8481 struct nfs4_client *clp = sp->so_client;
8482 struct rpc_cred *cred = sp->so_cred;
8483 @@ -330,8 +379,6 @@ __nfs4_find_state(struct inode *inode, s
8485 if ((state->state & mode) != mode)
8487 - /* Add the state to the head of the inode's list */
8488 - list_move(&state->inode_states, &nfsi->open_states);
8489 atomic_inc(&state->count);
8490 if (mode & FMODE_READ)
8492 @@ -353,8 +400,6 @@ __nfs4_find_state_byowner(struct inode *
8493 if (state->nreaders == 0 && state->nwriters == 0)
8495 if (state->owner == owner) {
8496 - /* Add the state to the head of the inode's list */
8497 - list_move(&state->inode_states, &nfsi->open_states);
8498 atomic_inc(&state->count);
8501 @@ -411,51 +456,40 @@ out:
8506 -__nfs4_put_open_state(struct nfs4_state *state)
8508 + * Beware! Caller must be holding exactly one
8509 + * reference to clp->cl_sem and owner->so_sema!
8511 +void nfs4_put_open_state(struct nfs4_state *state)
8513 struct inode *inode = state->inode;
8514 struct nfs4_state_owner *owner = state->owner;
8517 - if (!atomic_dec_and_lock(&state->count, &inode->i_lock)) {
8518 - up(&owner->so_sema);
8519 + if (!atomic_dec_and_lock(&state->count, &inode->i_lock))
8522 if (!list_empty(&state->inode_states))
8523 list_del(&state->inode_states);
8524 spin_unlock(&inode->i_lock);
8525 list_del(&state->open_states);
8526 - if (state->state != 0) {
8528 - status = nfs4_do_close(inode, state);
8531 - up(&owner->so_sema);
8532 - status = nfs4_handle_error(NFS_SERVER(inode), status);
8533 - down(&owner->so_sema);
8534 - } while (!status);
8536 - up(&owner->so_sema);
8537 + BUG_ON (state->state != 0);
8538 nfs4_free_open_state(state);
8539 nfs4_put_state_owner(owner);
8543 -nfs4_put_open_state(struct nfs4_state *state)
8545 - down(&state->owner->so_sema);
8546 - __nfs4_put_open_state(state);
8550 -nfs4_close_state(struct nfs4_state *state, mode_t mode)
8552 + * Beware! Caller must be holding no references to clp->cl_sem!
8553 + * of owner->so_sema!
8555 +void nfs4_close_state(struct nfs4_state *state, mode_t mode)
8557 struct inode *inode = state->inode;
8558 struct nfs4_state_owner *owner = state->owner;
8559 + struct nfs4_client *clp = owner->so_client;
8563 + atomic_inc(&owner->so_count);
8564 + down_read(&clp->cl_sem);
8565 down(&owner->so_sema);
8566 /* Protect against nfs4_find_state() */
8567 spin_lock(&inode->i_lock);
8568 @@ -466,29 +500,24 @@ nfs4_close_state(struct nfs4_state *stat
8569 if (state->nwriters == 0 && state->nreaders == 0)
8570 list_del_init(&state->inode_states);
8571 spin_unlock(&inode->i_lock);
8574 - if (state->state == 0)
8577 + if (state->state != 0) {
8578 if (state->nreaders)
8579 newstate |= FMODE_READ;
8580 if (state->nwriters)
8581 newstate |= FMODE_WRITE;
8582 if (state->state == newstate)
8586 status = nfs4_do_downgrade(inode, state, newstate);
8588 status = nfs4_do_close(inode, state);
8590 - state->state = newstate;
8593 - up(&owner->so_sema);
8594 - status = nfs4_handle_error(NFS_SERVER(inode), status);
8595 - down(&owner->so_sema);
8596 - } while (!status);
8597 - __nfs4_put_open_state(state);
8600 + nfs4_put_open_state(state);
8601 + up(&owner->so_sema);
8602 + nfs4_put_state_owner(owner);
8603 + up_read(&clp->cl_sem);
8607 @@ -496,11 +525,11 @@ nfs4_close_state(struct nfs4_state *stat
8608 * that is compatible with current->files
8610 static struct nfs4_lock_state *
8611 -__nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
8612 +__nfs4_find_lock_state(struct nfs4_state *state, unsigned int pid)
8614 struct nfs4_lock_state *pos;
8615 list_for_each_entry(pos, &state->lock_states, ls_locks) {
8616 - if (pos->ls_owner != fl_owner)
8617 + if (pos->ls_pid != pid)
8619 atomic_inc(&pos->ls_count);
8621 @@ -509,23 +538,16 @@ __nfs4_find_lock_state(struct nfs4_state
8624 struct nfs4_lock_state *
8625 -nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
8626 +nfs4_find_lock_state(struct nfs4_state *state, unsigned int pid)
8628 struct nfs4_lock_state *lsp;
8629 read_lock(&state->state_lock);
8630 - lsp = __nfs4_find_lock_state(state, fl_owner);
8631 + lsp = __nfs4_find_lock_state(state, pid);
8632 read_unlock(&state->state_lock);
8637 - * Return a compatible lock_state. If no initialized lock_state structure
8638 - * exists, return an uninitialized one.
8640 - * The caller must be holding state->lock_sema
8642 -struct nfs4_lock_state *
8643 -nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
8644 +static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, unsigned int pid)
8646 struct nfs4_lock_state *lsp;
8647 struct nfs4_client *clp = state->owner->so_client;
8648 @@ -533,12 +555,12 @@ nfs4_alloc_lock_state(struct nfs4_state
8649 lsp = kmalloc(sizeof(*lsp), GFP_KERNEL);
8653 lsp->ls_seqid = 0; /* arbitrary */
8655 memset(lsp->ls_stateid.data, 0, sizeof(lsp->ls_stateid.data));
8656 atomic_set(&lsp->ls_count, 1);
8657 - lsp->ls_owner = fl_owner;
8658 - lsp->ls_parent = state;
8659 + lsp->ls_pid = pid;
8660 INIT_LIST_HEAD(&lsp->ls_locks);
8661 spin_lock(&clp->cl_lock);
8662 lsp->ls_id = nfs4_alloc_lockowner_id(clp);
8663 @@ -547,16 +569,32 @@ nfs4_alloc_lock_state(struct nfs4_state
8667 + * Return a compatible lock_state. If no initialized lock_state structure
8668 + * exists, return an uninitialized one.
8670 + * The caller must be holding state->lock_sema and clp->cl_sem
8672 +struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, unsigned int pid)
8674 + struct nfs4_lock_state * lsp;
8676 + lsp = nfs4_find_lock_state(state, pid);
8678 + lsp = nfs4_alloc_lock_state(state, pid);
8683 * Byte-range lock aware utility to initialize the stateid of read/write
8687 -nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
8688 +nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, unsigned int pid)
8690 if (test_bit(LK_STATE_IN_USE, &state->flags)) {
8691 struct nfs4_lock_state *lsp;
8693 - lsp = nfs4_find_lock_state(state, fl_owner);
8694 + lsp = nfs4_find_lock_state(state, pid);
8696 memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
8697 nfs4_put_lock_state(lsp);
8698 @@ -567,13 +605,14 @@ nfs4_copy_stateid(nfs4_stateid *dst, str
8702 -* Called with state->lock_sema held.
8703 +* Called with state->lock_sema and clp->cl_sem held.
8706 -nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp)
8707 +void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *lsp)
8709 - if (status == NFS_OK || seqid_mutating_err(-status))
8710 + if (status == NFS_OK || seqid_mutating_err(-status)) {
8712 + lsp->flags |= NFS_LOCK_INITIALIZED;
8717 @@ -598,12 +637,11 @@ nfs4_check_unlock(struct file_lock *fl,
8718 * Post an initialized lock_state on the state->lock_states list.
8721 -nfs4_notify_setlk(struct inode *inode, struct file_lock *request, struct nfs4_lock_state *lsp)
8722 +nfs4_notify_setlk(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp)
8724 - struct nfs4_state *state = lsp->ls_parent;
8726 if (!list_empty(&lsp->ls_locks))
8728 + atomic_inc(&lsp->ls_count);
8729 write_lock(&state->state_lock);
8730 list_add(&lsp->ls_locks, &state->lock_states);
8731 set_bit(LK_STATE_IN_USE, &state->flags);
8732 @@ -620,15 +658,15 @@ nfs4_notify_setlk(struct inode *inode, s
8736 -nfs4_notify_unlck(struct inode *inode, struct file_lock *request, struct nfs4_lock_state *lsp)
8737 +nfs4_notify_unlck(struct nfs4_state *state, struct file_lock *request, struct nfs4_lock_state *lsp)
8739 - struct nfs4_state *state = lsp->ls_parent;
8740 + struct inode *inode = state->inode;
8741 struct file_lock *fl;
8743 for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
8744 if (!(fl->fl_flags & FL_POSIX))
8746 - if (fl->fl_owner != lsp->ls_owner)
8747 + if (fl->fl_pid != lsp->ls_pid)
8749 /* Exit if we find at least one lock which is not consumed */
8750 if (nfs4_check_unlock(fl,request) == 0)
8751 @@ -640,6 +678,7 @@ nfs4_notify_unlck(struct inode *inode, s
8752 if (list_empty(&state->lock_states))
8753 clear_bit(LK_STATE_IN_USE, &state->flags);
8754 write_unlock(&state->state_lock);
8755 + nfs4_put_lock_state(lsp);
8759 @@ -651,20 +690,18 @@ nfs4_put_lock_state(struct nfs4_lock_sta
8761 if (!atomic_dec_and_test(&lsp->ls_count))
8763 - if (!list_empty(&lsp->ls_locks))
8765 + BUG_ON (!list_empty(&lsp->ls_locks));
8770 -* Called with sp->so_sema held.
8771 +* Called with sp->so_sema and clp->cl_sem held.
8773 * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
8774 * failed with a seqid incrementing error -
8775 * see comments nfs_fs.h:seqid_mutating_error()
8778 -nfs4_increment_seqid(int status, struct nfs4_state_owner *sp)
8779 +void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp)
8781 if (status == NFS_OK || seqid_mutating_err(-status))
8783 @@ -693,21 +730,14 @@ nfs4_recover_state(void *data)
8785 init_completion(&args.complete);
8787 - down_read(&clp->cl_sem);
8788 - if (test_and_set_bit(NFS4CLNT_SETUP_STATE, &clp->cl_state))
8790 if (kernel_thread(reclaimer, &args, CLONE_KERNEL) < 0)
8791 goto out_failed_clear;
8792 wait_for_completion(&args.complete);
8795 - smp_mb__before_clear_bit();
8796 - clear_bit(NFS4CLNT_SETUP_STATE, &clp->cl_state);
8797 - smp_mb__after_clear_bit();
8798 + set_bit(NFS4CLNT_OK, &clp->cl_state);
8799 wake_up_all(&clp->cl_waitq);
8800 rpc_wake_up(&clp->cl_rpcwaitq);
8802 - up_read(&clp->cl_sem);
8806 @@ -718,24 +748,66 @@ nfs4_schedule_state_recovery(struct nfs4
8810 - smp_mb__before_clear_bit();
8811 - clear_bit(NFS4CLNT_OK, &clp->cl_state);
8812 - smp_mb__after_clear_bit();
8813 - schedule_work(&clp->cl_recoverd);
8814 + if (test_and_clear_bit(NFS4CLNT_OK, &clp->cl_state))
8815 + schedule_work(&clp->cl_recoverd);
8819 -nfs4_reclaim_open_state(struct nfs4_state_owner *sp)
8820 +static int nfs4_reclaim_locks(struct nfs4_state *state)
8822 + struct inode *inode = state->inode;
8823 + struct file_lock *fl;
8826 + for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
8827 + if (!(fl->fl_flags & FL_POSIX))
8829 + if (((struct nfs_open_context *)fl->fl_file->private_data)->state != state)
8831 + status = nfs4_lock_reclaim(state, fl);
8836 + printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
8837 + __FUNCTION__, status);
8838 + case -NFS4ERR_EXPIRED:
8839 + case -NFS4ERR_NO_GRACE:
8840 + case -NFS4ERR_RECLAIM_BAD:
8841 + case -NFS4ERR_RECLAIM_CONFLICT:
8842 + /* kill_proc(fl->fl_pid, SIGLOST, 1); */
8844 + case -NFS4ERR_STALE_CLIENTID:
8853 +static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp)
8855 struct nfs4_state *state;
8856 + struct nfs4_lock_state *lock;
8859 list_for_each_entry(state, &sp->so_states, open_states) {
8860 if (state->state == 0)
8862 status = nfs4_open_reclaim(sp, state);
8864 + list_for_each_entry(lock, &state->lock_states, ls_locks)
8865 + lock->flags &= ~NFS_LOCK_INITIALIZED;
8866 + if (status >= 0) {
8867 + status = nfs4_reclaim_locks(state);
8870 + list_for_each_entry(lock, &state->lock_states, ls_locks) {
8871 + if (!(lock->flags & NFS_LOCK_INITIALIZED))
8872 + printk("%s: Lock reclaim failed!\n",
8879 printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
8880 @@ -762,75 +834,55 @@ out_err:
8885 -reclaimer(void *ptr)
8886 +static int reclaimer(void *ptr)
8888 struct reclaimer_args *args = (struct reclaimer_args *)ptr;
8889 struct nfs4_client *clp = args->clp;
8890 struct nfs4_state_owner *sp;
8894 daemonize("%u.%u.%u.%u-reclaim", NIPQUAD(clp->cl_addr));
8895 allow_signal(SIGKILL);
8897 + atomic_inc(&clp->cl_count);
8898 complete(&args->complete);
8900 + /* Ensure exclusive access to NFSv4 state */
8902 + down_write(&clp->cl_sem);
8903 /* Are there any NFS mounts out there? */
8904 if (list_empty(&clp->cl_superblocks))
8906 - if (!test_bit(NFS4CLNT_NEW, &clp->cl_state)) {
8907 - status = nfs4_proc_renew(clp);
8908 - if (status == 0) {
8909 - set_bit(NFS4CLNT_OK, &clp->cl_state);
8913 - status = nfs4_proc_setclientid(clp, 0, 0);
8916 - status = nfs4_proc_setclientid_confirm(clp);
8918 + status = nfs4_proc_renew(clp);
8921 + status = nfs4_init_client(clp);
8924 - generation = ++(clp->cl_generation);
8925 - clear_bit(NFS4CLNT_NEW, &clp->cl_state);
8926 - set_bit(NFS4CLNT_OK, &clp->cl_state);
8927 - up_read(&clp->cl_sem);
8928 - nfs4_schedule_state_renewal(clp);
8930 - spin_lock(&clp->cl_lock);
8931 + /* Mark all delagations for reclaim */
8932 + nfs_delegation_mark_reclaim(clp);
8933 + /* Note: list is protected by exclusive lock on cl->cl_sem */
8934 list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
8935 - if (sp->so_generation - generation >= 0)
8937 - atomic_inc(&sp->so_count);
8938 - spin_unlock(&clp->cl_lock);
8939 - down(&sp->so_sema);
8940 - if (sp->so_generation - generation < 0) {
8942 - sp->so_generation = clp->cl_generation;
8943 - status = nfs4_reclaim_open_state(sp);
8946 - nfs4_put_state_owner(sp);
8947 + status = nfs4_reclaim_open_state(sp);
8949 if (status == -NFS4ERR_STALE_CLIENTID)
8950 - nfs4_schedule_state_recovery(clp);
8952 + goto restart_loop;
8955 - goto restart_loop;
8957 - spin_unlock(&clp->cl_lock);
8958 + nfs_delegation_reap_unclaimed(clp);
8960 - smp_mb__before_clear_bit();
8961 - clear_bit(NFS4CLNT_SETUP_STATE, &clp->cl_state);
8962 - smp_mb__after_clear_bit();
8963 + set_bit(NFS4CLNT_OK, &clp->cl_state);
8964 + up_write(&clp->cl_sem);
8966 wake_up_all(&clp->cl_waitq);
8967 rpc_wake_up(&clp->cl_rpcwaitq);
8968 + nfs4_put_client(clp);
8971 - printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u\n",
8972 - NIPQUAD(clp->cl_addr.s_addr));
8973 - up_read(&clp->cl_sem);
8974 + printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
8975 + NIPQUAD(clp->cl_addr.s_addr), -status);
8979 --- linux-2.6.7/fs/nfs/inode.c.lsec 2004-06-15 23:19:44.000000000 -0600
8980 +++ linux-2.6.7/fs/nfs/inode.c 2005-03-23 14:28:22.818580744 -0700
8982 #include <asm/system.h>
8983 #include <asm/uaccess.h>
8985 +#include "delegation.h"
8987 #define NFSDBG_FACILITY NFSDBG_VFS
8988 #define NFS_PARANOIA 1
8990 @@ -123,8 +125,9 @@ nfs_delete_inode(struct inode * inode)
8992 dprintk("NFS: delete_inode(%s/%ld)\n", inode->i_sb->s_id, inode->i_ino);
8994 + nfs_wb_all(inode);
8996 - * The following can never actually happen...
8997 + * The following should never happen...
8999 if (nfs_have_writebacks(inode)) {
9000 printk(KERN_ERR "nfs_delete_inode: inode %ld has pending RPC requests\n", inode->i_ino);
9001 @@ -133,18 +136,15 @@ nfs_delete_inode(struct inode * inode)
9006 - * For the moment, the only task for the NFS clear_inode method is to
9007 - * release the mmap credential
9010 nfs_clear_inode(struct inode *inode)
9012 struct nfs_inode *nfsi = NFS_I(inode);
9013 - struct rpc_cred *cred = nfsi->mm_cred;
9014 + struct rpc_cred *cred;
9017 - put_rpccred(cred);
9018 + nfs4_zap_acl_attr(inode);
9019 + nfs_wb_all(inode);
9020 + BUG_ON (!list_empty(&nfsi->open_files));
9021 cred = nfsi->cache_access.cred;
9024 @@ -704,7 +704,7 @@ nfs_fhget(struct super_block *sb, struct
9025 /* Why so? Because we want revalidate for devices/FIFOs, and
9026 * that's precisely what we have in nfs_file_inode_operations.
9028 - inode->i_op = &nfs_file_inode_operations;
9029 + inode->i_op = NFS_SB(sb)->rpc_ops->file_inode_ops;
9030 if (S_ISREG(inode->i_mode)) {
9031 inode->i_fop = &nfs_file_operations;
9032 inode->i_data.a_ops = &nfs_file_aops;
9033 @@ -859,53 +859,114 @@ int nfs_getattr(struct vfsmount *mnt, st
9037 +struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred)
9039 + struct nfs_open_context *ctx;
9041 + ctx = (struct nfs_open_context *)kmalloc(sizeof(*ctx), GFP_KERNEL);
9042 + if (ctx != NULL) {
9043 + atomic_set(&ctx->count, 1);
9044 + ctx->dentry = dget(dentry);
9045 + ctx->cred = get_rpccred(cred);
9046 + ctx->state = NULL;
9047 + ctx->pid = current->tgid;
9049 + init_waitqueue_head(&ctx->waitq);
9054 +struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
9057 + atomic_inc(&ctx->count);
9061 +void put_nfs_open_context(struct nfs_open_context *ctx)
9063 + if (atomic_dec_and_test(&ctx->count)) {
9064 + if (ctx->state != NULL)
9065 + nfs4_close_state(ctx->state, ctx->mode);
9066 + if (ctx->cred != NULL)
9067 + put_rpccred(ctx->cred);
9068 + dput(ctx->dentry);
9074 * Ensure that mmap has a recent RPC credential for use when writing out
9078 -nfs_set_mmcred(struct inode *inode, struct rpc_cred *cred)
9079 +void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx)
9081 + struct inode *inode = filp->f_dentry->d_inode;
9082 + struct nfs_inode *nfsi = NFS_I(inode);
9084 + filp->private_data = get_nfs_open_context(ctx);
9085 + spin_lock(&inode->i_lock);
9086 + list_add(&ctx->list, &nfsi->open_files);
9087 + spin_unlock(&inode->i_lock);
9090 +struct nfs_open_context *nfs_find_open_context(struct inode *inode, int mode)
9092 + struct nfs_inode *nfsi = NFS_I(inode);
9093 + struct nfs_open_context *pos, *ctx = NULL;
9095 + spin_lock(&inode->i_lock);
9096 + list_for_each_entry(pos, &nfsi->open_files, list) {
9097 + if ((pos->mode & mode) == mode) {
9098 + ctx = get_nfs_open_context(pos);
9102 + spin_unlock(&inode->i_lock);
9106 +void nfs_file_clear_open_context(struct file *filp)
9108 - struct rpc_cred **p = &NFS_I(inode)->mm_cred,
9110 + struct inode *inode = filp->f_dentry->d_inode;
9111 + struct nfs_open_context *ctx = (struct nfs_open_context *)filp->private_data;
9113 - *p = get_rpccred(cred);
9115 - put_rpccred(oldcred);
9117 + filp->private_data = NULL;
9118 + spin_lock(&inode->i_lock);
9119 + list_del(&ctx->list);
9120 + spin_unlock(&inode->i_lock);
9121 + put_nfs_open_context(ctx);
9126 - * These are probably going to contain hooks for
9127 - * allocating and releasing RPC credentials for
9128 - * the file. I'll have to think about Tronds patch
9130 + * These allocate and release file read/write context information.
9132 int nfs_open(struct inode *inode, struct file *filp)
9134 - struct rpc_auth *auth;
9135 + struct nfs_open_context *ctx;
9136 struct rpc_cred *cred;
9138 - auth = NFS_CLIENT(inode)->cl_auth;
9139 - cred = rpcauth_lookupcred(auth, 0);
9140 - filp->private_data = cred;
9141 - if ((filp->f_mode & FMODE_WRITE) != 0) {
9142 - nfs_set_mmcred(inode, cred);
9143 + if ((cred = rpcauth_lookupcred(NFS_CLIENT(inode)->cl_auth, 0)) == NULL)
9145 + ctx = alloc_nfs_open_context(filp->f_dentry, cred);
9146 + put_rpccred(cred);
9149 + ctx->mode = filp->f_mode;
9150 + nfs_file_set_open_context(filp, ctx);
9151 + put_nfs_open_context(ctx);
9152 + if ((filp->f_mode & FMODE_WRITE) != 0)
9153 nfs_begin_data_update(inode);
9158 int nfs_release(struct inode *inode, struct file *filp)
9160 - struct rpc_cred *cred;
9163 if ((filp->f_mode & FMODE_WRITE) != 0)
9164 nfs_end_data_update(inode);
9165 - cred = nfs_file_cred(filp);
9167 - put_rpccred(cred);
9169 + nfs_file_clear_open_context(filp);
9173 @@ -1002,6 +1063,30 @@ out:
9177 +int nfs_attribute_timeout(struct inode *inode)
9179 + struct nfs_inode *nfsi = NFS_I(inode);
9181 + if (nfs_have_delegation(inode, FMODE_READ))
9183 + return time_after(jiffies, nfsi->read_cache_jiffies+nfsi->attrtimeo);
9187 + * nfs_revalidate_inode - Revalidate the inode attributes
9188 + * @server - pointer to nfs_server struct
9189 + * @inode - pointer to inode struct
9191 + * Updates inode attribute information by retrieving the data from the server.
9193 +int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
9195 + if (!(NFS_FLAGS(inode) & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA))
9196 + && !nfs_attribute_timeout(inode))
9197 + return NFS_STALE(inode) ? -ESTALE : 0;
9198 + return __nfs_revalidate_inode(server, inode);
9202 * nfs_begin_data_update
9203 * @inode - pointer to inode
9204 @@ -1023,11 +1108,13 @@ void nfs_end_data_update(struct inode *i
9206 struct nfs_inode *nfsi = NFS_I(inode);
9208 - /* Mark the attribute cache for revalidation */
9209 - nfsi->flags |= NFS_INO_INVALID_ATTR;
9210 - /* Directories and symlinks: invalidate page cache too */
9211 - if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
9212 - nfsi->flags |= NFS_INO_INVALID_DATA;
9213 + if (!nfs_have_delegation(inode, FMODE_READ)) {
9214 + /* Mark the attribute cache for revalidation */
9215 + nfsi->flags |= NFS_INO_INVALID_ATTR;
9216 + /* Directories and symlinks: invalidate page cache too */
9217 + if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
9218 + nfsi->flags |= NFS_INO_INVALID_DATA;
9220 nfsi->cache_change_attribute ++;
9221 atomic_dec(&nfsi->data_updates);
9223 @@ -1068,6 +1155,10 @@ int nfs_refresh_inode(struct inode *inod
9224 loff_t cur_size, new_isize;
9227 + /* Do we hold a delegation? */
9228 + if (nfs_have_delegation(inode, FMODE_READ))
9231 /* Are we in the process of updating data on the server? */
9232 data_unstable = nfs_caches_unstable(inode);
9234 @@ -1240,6 +1331,7 @@ static int nfs_update_inode(struct inode
9235 inode->i_nlink = fattr->nlink;
9236 inode->i_uid = fattr->uid;
9237 inode->i_gid = fattr->gid;
9238 + nfs4_zap_acl_attr(inode);
9240 if (fattr->valid & (NFS_ATTR_FATTR_V3 | NFS_ATTR_FATTR_V4)) {
9242 @@ -1265,7 +1357,8 @@ static int nfs_update_inode(struct inode
9243 if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
9244 || S_ISLNK(inode->i_mode)))
9245 invalid &= ~NFS_INO_INVALID_DATA;
9246 - nfsi->flags |= invalid;
9247 + if (!nfs_have_delegation(inode, FMODE_READ))
9248 + nfsi->flags |= invalid;
9252 @@ -1400,6 +1493,52 @@ static struct file_system_type nfs_fs_ty
9254 #ifdef CONFIG_NFS_V4
9256 +#define XATTR_NAME_NFSV4_ACL "system.nfs4_acl"
9259 +nfs_setxattr(struct dentry *dentry, const char *key, const void *buf,
9260 + size_t buflen, int flags)
9262 + struct inode *inode = dentry->d_inode;
9264 + if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)
9267 + if (!S_ISREG(inode->i_mode) &&
9268 + (!S_ISDIR(inode->i_mode) || inode->i_mode & S_ISVTX))
9271 + return nfs4_proc_set_acl(inode, buf, buflen);
9274 +/* The getxattr man page suggests returning -ENODATA for unknown attributes,
9275 + * and that's what we'll do for e.g. user attributes that haven't been set.
9276 + * But we'll follow ext2/ext3's lead by returning -EOPNOTSUPP for unsupported
9277 + * attributes in kernel-managed attribute namespaces. */
9279 +nfs_getxattr(struct dentry *dentry, const char *key, void *buf,
9282 + struct inode *inode = dentry->d_inode;
9284 + if (strcmp(key, XATTR_NAME_NFSV4_ACL) != 0)
9285 + return -EOPNOTSUPP;
9287 + return nfs4_proc_get_acl(inode, buf, buflen);
9291 +nfs_listxattr(struct dentry *dentry, char *buf, size_t buflen)
9293 + ssize_t len = strlen(XATTR_NAME_NFSV4_ACL) + 1;
9295 + if (buf && buflen < len)
9298 + memcpy(buf, XATTR_NAME_NFSV4_ACL, len);
9302 static void nfs4_clear_inode(struct inode *);
9304 static struct super_operations nfs4_sops = {
9305 @@ -1423,6 +1562,12 @@ static void nfs4_clear_inode(struct inod
9307 struct nfs_inode *nfsi = NFS_I(inode);
9309 + /* If we are holding a delegation, return it! */
9310 + if (nfsi->delegation != NULL)
9311 + nfs_inode_return_delegation(inode);
9312 + /* First call standard NFS clear_inode() code */
9313 + nfs_clear_inode(inode);
9314 + /* Now clear out any remaining state */
9315 while (!list_empty(&nfsi->open_states)) {
9316 struct nfs4_state *state;
9318 @@ -1437,8 +1582,6 @@ static void nfs4_clear_inode(struct inod
9319 BUG_ON(atomic_read(&state->count) != 1);
9320 nfs4_close_state(state, state->state);
9322 - /* Now call standard NFS clear_inode() code */
9323 - nfs_clear_inode(inode);
9327 @@ -1536,8 +1679,19 @@ static int nfs4_fill_super(struct super_
9328 memcpy(clp->cl_ipaddr, server->ip_addr, sizeof(clp->cl_ipaddr));
9331 - if (list_empty(&clp->cl_superblocks))
9332 - clear_bit(NFS4CLNT_OK, &clp->cl_state);
9333 + /* Fire up rpciod if not yet running */
9334 + if (rpciod_up() != 0) {
9335 + printk(KERN_WARNING "NFS: couldn't start rpciod!\n");
9339 + if (list_empty(&clp->cl_superblocks)) {
9340 + err = nfs4_init_client(clp);
9342 + up_write(&clp->cl_sem);
9346 list_add_tail(&server->nfs4_siblings, &clp->cl_superblocks);
9347 clnt = rpc_clone_client(clp->cl_rpcclient);
9349 @@ -1567,17 +1721,10 @@ static int nfs4_fill_super(struct super_
9353 - /* Fire up rpciod if not yet running */
9354 - if (rpciod_up() != 0) {
9355 - printk(KERN_WARNING "NFS: couldn't start rpciod!\n");
9356 - goto out_shutdown;
9359 sb->s_op = &nfs4_sops;
9360 err = nfs_sb_init(sb, authflavour);
9365 rpc_shutdown_client(server->client);
9367 @@ -1585,6 +1732,8 @@ out_remove_list:
9368 list_del_init(&server->nfs4_siblings);
9369 up_write(&server->nfs4_state->cl_sem);
9370 destroy_nfsv4_state(server);
9375 nfs4_put_client(clp);
9376 @@ -1709,22 +1858,31 @@ out_free:
9380 +static void nfs4_kill_super(struct super_block *sb)
9382 + nfs_return_all_delegations(sb);
9383 + nfs_kill_super(sb);
9386 static struct file_system_type nfs4_fs_type = {
9387 .owner = THIS_MODULE,
9389 .get_sb = nfs4_get_sb,
9390 - .kill_sb = nfs_kill_super,
9391 + .kill_sb = nfs4_kill_super,
9392 .fs_flags = FS_ODD_RENAME|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
9395 -#define nfs4_zero_state(nfsi) \
9396 +#define nfs4_init_once(nfsi) \
9398 INIT_LIST_HEAD(&(nfsi)->open_states); \
9399 + nfsi->delegation = NULL; \
9400 + nfsi->delegation_state = 0; \
9401 + init_rwsem(&nfsi->rwsem); \
9403 #define register_nfs4fs() register_filesystem(&nfs4_fs_type)
9404 #define unregister_nfs4fs() unregister_filesystem(&nfs4_fs_type)
9406 -#define nfs4_zero_state(nfsi) \
9407 +#define nfs4_init_once(nfsi) \
9409 #define register_nfs4fs() (0)
9410 #define unregister_nfs4fs()
9411 @@ -1746,8 +1904,8 @@ static struct inode *nfs_alloc_inode(str
9415 - nfsi->mm_cred = NULL;
9416 - nfs4_zero_state(nfsi);
9417 + nfsi->acl_len = 0;
9419 return &nfsi->vfs_inode;
9422 @@ -1765,12 +1923,14 @@ static void init_once(void * foo, kmem_c
9423 inode_init_once(&nfsi->vfs_inode);
9424 INIT_LIST_HEAD(&nfsi->dirty);
9425 INIT_LIST_HEAD(&nfsi->commit);
9426 + INIT_LIST_HEAD(&nfsi->open_files);
9427 INIT_RADIX_TREE(&nfsi->nfs_page_tree, GFP_ATOMIC);
9428 atomic_set(&nfsi->data_updates, 0);
9432 init_waitqueue_head(&nfsi->nfs_i_wait);
9433 + nfs4_init_once(nfsi);
9437 --- linux-2.6.7/fs/nfs/dir.c.lsec 2004-06-15 23:19:23.000000000 -0600
9438 +++ linux-2.6.7/fs/nfs/dir.c 2005-03-23 14:28:22.701598528 -0700
9440 #include <linux/smp_lock.h>
9441 #include <linux/namei.h>
9443 +#include "delegation.h"
9445 #define NFS_PARANOIA 1
9446 /* #define NFS_DEBUG_VERBOSE 1 */
9448 @@ -88,6 +90,9 @@ struct inode_operations nfs4_dir_inode_o
9449 .permission = nfs_permission,
9450 .getattr = nfs_getattr,
9451 .setattr = nfs_setattr,
9452 + .getxattr = nfs_getxattr,
9453 + .setxattr = nfs_setxattr,
9454 + .listxattr = nfs_listxattr,
9457 #endif /* CONFIG_NFS_V4 */
9458 @@ -850,22 +855,22 @@ static int nfs_open_revalidate(struct de
9459 unsigned long verifier;
9460 int openflags, ret = 0;
9462 - /* NFS only supports OPEN for regular files */
9463 - if (inode && !S_ISREG(inode->i_mode))
9465 parent = dget_parent(dentry);
9466 dir = parent->d_inode;
9467 if (!is_atomic_open(dir, nd))
9469 + /* We can't create new files in nfs_open_revalidate(), so we
9470 + * optimize away revalidation of negative dentries.
9472 + if (inode == NULL)
9474 + /* NFS only supports OPEN on regular files */
9475 + if (!S_ISREG(inode->i_mode))
9477 openflags = nd->intent.open.flags;
9478 - if (openflags & O_CREAT) {
9479 - /* If this is a negative dentry, just drop it */
9482 - /* If this is exclusive open, just revalidate */
9483 - if (openflags & O_EXCL)
9486 + /* We cannot do exclusive creation on a positive dentry */
9487 + if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
9489 /* We can't create new files, or truncate existing ones here */
9490 openflags &= ~(O_CREAT|O_TRUNC);
9492 @@ -887,6 +892,8 @@ out:
9496 + if (inode != NULL && nfs_have_delegation(inode, FMODE_READ))
9498 return nfs_lookup_revalidate(dentry, nd);
9500 #endif /* CONFIG_NFSV4 */
9501 @@ -1299,19 +1306,6 @@ nfs_symlink(struct inode *dir, struct de
9502 dfprintk(VFS, "NFS: symlink(%s/%ld, %s, %s)\n", dir->i_sb->s_id,
9503 dir->i_ino, dentry->d_name.name, symname);
9505 - error = -ENAMETOOLONG;
9506 - switch (NFS_PROTO(dir)->version) {
9508 - if (strlen(symname) > NFS2_MAXPATHLEN)
9512 - if (strlen(symname) > NFS3_MAXPATHLEN)
9519 if (dentry->d_inode)
9520 printk("nfs_proc_symlink: %s/%s not negative!\n",
9521 @@ -1341,8 +1335,6 @@ dentry->d_parent->d_name.name, dentry->d
9530 @@ -1498,10 +1490,56 @@ out:
9535 -nfs_permission(struct inode *inode, int mask, struct nameidata *nd)
9536 +int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, struct nfs_access_entry *res)
9538 + struct nfs_access_entry *cache = &NFS_I(inode)->cache_access;
9540 + if (cache->cred != cred
9541 + || time_after(jiffies, cache->jiffies + NFS_ATTRTIMEO(inode))
9542 + || (NFS_FLAGS(inode) & NFS_INO_INVALID_ATTR))
9544 + memcpy(res, cache, sizeof(*res));
9548 +void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
9550 + struct nfs_access_entry *cache = &NFS_I(inode)->cache_access;
9552 + if (cache->cred != set->cred) {
9554 + put_rpccred(cache->cred);
9555 + cache->cred = get_rpccred(set->cred);
9557 + cache->jiffies = set->jiffies;
9558 + cache->mask = set->mask;
9561 +static int nfs_do_access(struct inode *inode, struct rpc_cred *cred, int mask)
9563 + struct nfs_access_entry cache;
9566 + status = nfs_access_get_cached(inode, cred, &cache);
9570 + /* Be clever: ask server to check for all possible rights */
9571 + cache.mask = MAY_EXEC | MAY_WRITE | MAY_READ;
9572 + cache.cred = cred;
9573 + cache.jiffies = jiffies;
9574 + status = NFS_PROTO(inode)->access(inode, &cache);
9577 + nfs_access_add_cache(inode, &cache);
9579 + if ((cache.mask & mask) == mask)
9584 +int nfs_permission(struct inode *inode, int mask, struct nameidata *nd)
9586 - struct nfs_access_cache *cache = &NFS_I(inode)->cache_access;
9587 struct rpc_cred *cred;
9588 int mode = inode->i_mode;
9590 @@ -1542,24 +1580,7 @@ nfs_permission(struct inode *inode, int
9593 cred = rpcauth_lookupcred(NFS_CLIENT(inode)->cl_auth, 0);
9594 - if (cache->cred == cred
9595 - && time_before(jiffies, cache->jiffies + NFS_ATTRTIMEO(inode))
9596 - && !(NFS_FLAGS(inode) & NFS_INO_INVALID_ATTR)) {
9597 - if (!(res = cache->err)) {
9598 - /* Is the mask a subset of an accepted mask? */
9599 - if ((cache->mask & mask) == mask)
9602 - /* ...or is it a superset of a rejected mask? */
9603 - if ((cache->mask & mask) == cache->mask)
9608 - res = NFS_PROTO(inode)->access(inode, cred, mask);
9609 - if (!res || res == -EACCES)
9612 + res = nfs_do_access(inode, cred, mask);
9616 @@ -1568,15 +1589,6 @@ out_notsup:
9617 res = vfs_permission(inode, mask);
9621 - cache->jiffies = jiffies;
9623 - put_rpccred(cache->cred);
9624 - cache->cred = cred;
9625 - cache->mask = mask;
9632 --- linux-2.6.7/fs/nfs/unlink.c.lsec 2004-06-15 23:20:04.000000000 -0600
9633 +++ linux-2.6.7/fs/nfs/unlink.c 2005-03-23 14:28:23.170527240 -0700
9634 @@ -215,7 +215,6 @@ nfs_complete_unlink(struct dentry *dentr
9635 spin_lock(&dentry->d_lock);
9636 dentry->d_flags &= ~DCACHE_NFSFS_RENAMED;
9637 spin_unlock(&dentry->d_lock);
9638 - if (data->task.tk_rpcwait == &nfs_delete_queue)
9639 - rpc_wake_up_task(&data->task);
9640 + rpc_wake_up_task(&data->task);
9641 nfs_put_unlinkdata(data);
9643 --- linux-2.6.7/fs/nfs/callback_xdr.c.lsec 2005-03-23 14:28:22.545622240 -0700
9644 +++ linux-2.6.7/fs/nfs/callback_xdr.c 2005-03-23 14:28:22.544622392 -0700
9647 + * linux/fs/nfs/callback_xdr.c
9649 + * Copyright (C) 2004 Trond Myklebust
9651 + * NFSv4 callback encode/decode procedures
9653 +#include <linux/config.h>
9654 +#include <linux/kernel.h>
9655 +#include <linux/sunrpc/svc.h>
9656 +#include <linux/nfs4.h>
9657 +#include <linux/nfs_fs.h>
9658 +#include "callback.h"
9660 +#define CB_OP_TAGLEN_MAXSZ (512)
9661 +#define CB_OP_HDR_RES_MAXSZ (2 + CB_OP_TAGLEN_MAXSZ)
9662 +#define CB_OP_GETATTR_BITMAP_MAXSZ (4)
9663 +#define CB_OP_GETATTR_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ + \
9664 + CB_OP_GETATTR_BITMAP_MAXSZ + \
9666 +#define CB_OP_RECALL_RES_MAXSZ (CB_OP_HDR_RES_MAXSZ)
9668 +#define NFSDBG_FACILITY NFSDBG_CALLBACK
9670 +typedef unsigned (*callback_process_op_t)(void *, void *);
9671 +typedef unsigned (*callback_decode_arg_t)(struct svc_rqst *, struct xdr_stream *, void *);
9672 +typedef unsigned (*callback_encode_res_t)(struct svc_rqst *, struct xdr_stream *, void *);
9675 +struct callback_op {
9676 + callback_process_op_t process_op;
9677 + callback_decode_arg_t decode_args;
9678 + callback_encode_res_t encode_res;
9682 +static struct callback_op callback_ops[];
9684 +static int nfs4_callback_null(struct svc_rqst *rqstp, void *argp, void *resp)
9686 + return htonl(NFS4_OK);
9689 +static int nfs4_decode_void(struct svc_rqst *rqstp, uint32_t *p, void *dummy)
9691 + return xdr_argsize_check(rqstp, p);
9694 +static int nfs4_encode_void(struct svc_rqst *rqstp, uint32_t *p, void *dummy)
9696 + return xdr_ressize_check(rqstp, p);
9699 +static uint32_t *read_buf(struct xdr_stream *xdr, int nbytes)
9703 + p = xdr_inline_decode(xdr, nbytes);
9704 + if (unlikely(p == NULL))
9705 + printk(KERN_WARNING "NFSv4 callback reply buffer overflowed!\n");
9709 +static unsigned decode_string(struct xdr_stream *xdr, unsigned int *len, const char **str)
9713 + p = read_buf(xdr, 4);
9714 + if (unlikely(p == NULL))
9715 + return htonl(NFS4ERR_RESOURCE);
9719 + p = read_buf(xdr, *len);
9720 + if (unlikely(p == NULL))
9721 + return htonl(NFS4ERR_RESOURCE);
9722 + *str = (const char *)p;
9729 +static unsigned decode_fh(struct xdr_stream *xdr, struct nfs_fh *fh)
9733 + p = read_buf(xdr, 4);
9734 + if (unlikely(p == NULL))
9735 + return htonl(NFS4ERR_RESOURCE);
9736 + fh->size = ntohl(*p);
9737 + if (fh->size > NFS4_FHSIZE)
9738 + return htonl(NFS4ERR_BADHANDLE);
9739 + p = read_buf(xdr, fh->size);
9740 + if (unlikely(p == NULL))
9741 + return htonl(NFS4ERR_RESOURCE);
9742 + memcpy(&fh->data[0], p, fh->size);
9743 + memset(&fh->data[fh->size], 0, sizeof(fh->data) - fh->size);
9747 +static unsigned decode_bitmap(struct xdr_stream *xdr, uint32_t *bitmap)
9750 + unsigned int attrlen;
9752 + p = read_buf(xdr, 4);
9753 + if (unlikely(p == NULL))
9754 + return htonl(NFS4ERR_RESOURCE);
9755 + attrlen = ntohl(*p);
9756 + p = read_buf(xdr, attrlen << 2);
9757 + if (unlikely(p == NULL))
9758 + return htonl(NFS4ERR_RESOURCE);
9759 + if (likely(attrlen > 0))
9760 + bitmap[0] = ntohl(*p++);
9762 + bitmap[1] = ntohl(*p);
9766 +static unsigned decode_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid)
9770 + p = read_buf(xdr, 16);
9771 + if (unlikely(p == NULL))
9772 + return htonl(NFS4ERR_RESOURCE);
9773 + memcpy(stateid->data, p, 16);
9777 +static unsigned decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound_hdr_arg *hdr)
9780 + unsigned int minor_version;
9783 + status = decode_string(xdr, &hdr->taglen, &hdr->tag);
9784 + if (unlikely(status != 0))
9786 + /* We do not like overly long tags! */
9787 + if (hdr->taglen > CB_OP_TAGLEN_MAXSZ-12 || hdr->taglen < 0) {
9788 + printk("NFSv4 CALLBACK %s: client sent tag of length %u\n",
9789 + __FUNCTION__, hdr->taglen);
9790 + return htonl(NFS4ERR_RESOURCE);
9792 + p = read_buf(xdr, 12);
9793 + if (unlikely(p == NULL))
9794 + return htonl(NFS4ERR_RESOURCE);
9795 + minor_version = ntohl(*p++);
9796 + /* Check minor version is zero. */
9797 + if (minor_version != 0) {
9798 + printk(KERN_WARNING "%s: NFSv4 server callback with illegal minor version %u!\n",
9799 + __FUNCTION__, minor_version);
9800 + return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
9802 + hdr->callback_ident = ntohl(*p++);
9803 + hdr->nops = ntohl(*p);
9807 +static unsigned decode_op_hdr(struct xdr_stream *xdr, unsigned int *op)
9810 + p = read_buf(xdr, 4);
9811 + if (unlikely(p == NULL))
9812 + return htonl(NFS4ERR_RESOURCE);
9817 +static unsigned decode_getattr_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_getattrargs *args)
9821 + status = decode_fh(xdr, &args->fh);
9822 + if (unlikely(status != 0))
9824 + args->addr = &rqstp->rq_addr;
9825 + status = decode_bitmap(xdr, args->bitmap);
9827 + dprintk("%s: exit with status = %d\n", __FUNCTION__, status);
9831 +static unsigned decode_recall_args(struct svc_rqst *rqstp, struct xdr_stream *xdr, struct cb_recallargs *args)
9836 + args->addr = &rqstp->rq_addr;
9837 + status = decode_stateid(xdr, &args->stateid);
9838 + if (unlikely(status != 0))
9840 + p = read_buf(xdr, 4);
9841 + if (unlikely(p == NULL)) {
9842 + status = htonl(NFS4ERR_RESOURCE);
9845 + args->truncate = ntohl(*p);
9846 + status = decode_fh(xdr, &args->fh);
9848 + dprintk("%s: exit with status = %d\n", __FUNCTION__, status);
9852 +static unsigned encode_string(struct xdr_stream *xdr, unsigned int len, const char *str)
9856 + p = xdr_reserve_space(xdr, 4 + len);
9857 + if (unlikely(p == NULL))
9858 + return htonl(NFS4ERR_RESOURCE);
9859 + xdr_encode_opaque(p, str, len);
9863 +#define CB_SUPPORTED_ATTR0 (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE)
9864 +#define CB_SUPPORTED_ATTR1 (FATTR4_WORD1_TIME_METADATA|FATTR4_WORD1_TIME_MODIFY)
9865 +static unsigned encode_attr_bitmap(struct xdr_stream *xdr, const uint32_t *bitmap, uint32_t **savep)
9870 + bm[0] = htonl(bitmap[0] & CB_SUPPORTED_ATTR0);
9871 + bm[1] = htonl(bitmap[1] & CB_SUPPORTED_ATTR1);
9873 + p = xdr_reserve_space(xdr, 16);
9874 + if (unlikely(p == NULL))
9875 + return htonl(NFS4ERR_RESOURCE);
9879 + } else if (bm[0] != 0) {
9880 + p = xdr_reserve_space(xdr, 12);
9881 + if (unlikely(p == NULL))
9882 + return htonl(NFS4ERR_RESOURCE);
9886 + p = xdr_reserve_space(xdr, 8);
9887 + if (unlikely(p == NULL))
9888 + return htonl(NFS4ERR_RESOURCE);
9895 +static unsigned encode_attr_change(struct xdr_stream *xdr, const uint32_t *bitmap, uint64_t change)
9899 + if (!(bitmap[0] & FATTR4_WORD0_CHANGE))
9901 + p = xdr_reserve_space(xdr, 8);
9902 + if (unlikely(p == 0))
9903 + return htonl(NFS4ERR_RESOURCE);
9904 + p = xdr_encode_hyper(p, change);
9908 +static unsigned encode_attr_size(struct xdr_stream *xdr, const uint32_t *bitmap, uint64_t size)
9912 + if (!(bitmap[0] & FATTR4_WORD0_SIZE))
9914 + p = xdr_reserve_space(xdr, 8);
9915 + if (unlikely(p == 0))
9916 + return htonl(NFS4ERR_RESOURCE);
9917 + p = xdr_encode_hyper(p, size);
9921 +static unsigned encode_attr_time(struct xdr_stream *xdr, const struct timespec *time)
9925 + p = xdr_reserve_space(xdr, 12);
9926 + if (unlikely(p == 0))
9927 + return htonl(NFS4ERR_RESOURCE);
9928 + p = xdr_encode_hyper(p, time->tv_sec);
9929 + *p = htonl(time->tv_nsec);
9933 +static unsigned encode_attr_ctime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec *time)
9935 + if (!(bitmap[1] & FATTR4_WORD1_TIME_METADATA))
9937 + return encode_attr_time(xdr,time);
9940 +static unsigned encode_attr_mtime(struct xdr_stream *xdr, const uint32_t *bitmap, const struct timespec *time)
9942 + if (!(bitmap[1] & FATTR4_WORD1_TIME_MODIFY))
9944 + return encode_attr_time(xdr,time);
9947 +static unsigned encode_compound_hdr_res(struct xdr_stream *xdr, struct cb_compound_hdr_res *hdr)
9951 + hdr->status = xdr_reserve_space(xdr, 4);
9952 + if (unlikely(hdr->status == NULL))
9953 + return htonl(NFS4ERR_RESOURCE);
9954 + status = encode_string(xdr, hdr->taglen, hdr->tag);
9955 + if (unlikely(status != 0))
9957 + hdr->nops = xdr_reserve_space(xdr, 4);
9958 + if (unlikely(hdr->nops == NULL))
9959 + return htonl(NFS4ERR_RESOURCE);
9963 +static unsigned encode_op_hdr(struct xdr_stream *xdr, uint32_t op, uint32_t res)
9967 + p = xdr_reserve_space(xdr, 8);
9968 + if (unlikely(p == NULL))
9969 + return htonl(NFS4ERR_RESOURCE);
9975 +static unsigned encode_getattr_res(struct svc_rqst *rqstp, struct xdr_stream *xdr, const struct cb_getattrres *res)
9978 + unsigned status = res->status;
9980 + if (unlikely(status != 0))
9982 + status = encode_attr_bitmap(xdr, res->bitmap, &savep);
9983 + if (unlikely(status != 0))
9985 + status = encode_attr_change(xdr, res->bitmap, res->change_attr);
9986 + if (unlikely(status != 0))
9988 + status = encode_attr_size(xdr, res->bitmap, res->size);
9989 + if (unlikely(status != 0))
9991 + status = encode_attr_ctime(xdr, res->bitmap, &res->ctime);
9992 + if (unlikely(status != 0))
9994 + status = encode_attr_mtime(xdr, res->bitmap, &res->mtime);
9995 + *savep = htonl((unsigned int)((char *)xdr->p - (char *)(savep+1)));
9997 + dprintk("%s: exit with status = %d\n", __FUNCTION__, status);
10001 +static unsigned process_op(struct svc_rqst *rqstp,
10002 + struct xdr_stream *xdr_in, void *argp,
10003 + struct xdr_stream *xdr_out, void *resp)
10005 + struct callback_op *op;
10006 + unsigned int op_nr;
10007 + unsigned int status = 0;
10011 + dprintk("%s: start\n", __FUNCTION__);
10012 + status = decode_op_hdr(xdr_in, &op_nr);
10013 + if (unlikely(status != 0)) {
10014 + op_nr = OP_CB_ILLEGAL;
10015 + op = &callback_ops[0];
10016 + } else if (unlikely(op_nr != OP_CB_GETATTR && op_nr != OP_CB_RECALL)) {
10017 + op_nr = OP_CB_ILLEGAL;
10018 + op = &callback_ops[0];
10019 + status = htonl(NFS4ERR_OP_ILLEGAL);
10021 + op = &callback_ops[op_nr];
10023 + maxlen = xdr_out->end - xdr_out->p;
10024 + if (maxlen > 0 && maxlen < PAGE_SIZE) {
10025 + if (likely(status == 0 && op->decode_args != NULL))
10026 + status = op->decode_args(rqstp, xdr_in, argp);
10027 + if (likely(status == 0 && op->process_op != NULL))
10028 + status = op->process_op(argp, resp);
10030 + status = htonl(NFS4ERR_RESOURCE);
10032 + res = encode_op_hdr(xdr_out, op_nr, status);
10035 + if (op->encode_res != NULL && status == 0)
10036 + status = op->encode_res(rqstp, xdr_out, resp);
10037 + dprintk("%s: done, status = %d\n", __FUNCTION__, status);
10042 + * Decode, process and encode a COMPOUND
10044 +static int nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *resp)
10046 + struct cb_compound_hdr_arg hdr_arg;
10047 + struct cb_compound_hdr_res hdr_res;
10048 + struct xdr_stream xdr_in, xdr_out;
10050 + unsigned int status;
10051 + unsigned int nops = 1;
10053 + dprintk("%s: start\n", __FUNCTION__);
10055 + xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base);
10057 + p = (uint32_t*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len);
10058 + rqstp->rq_res.head[0].iov_len = PAGE_SIZE;
10059 + xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
10061 + decode_compound_hdr_arg(&xdr_in, &hdr_arg);
10062 + hdr_res.taglen = hdr_arg.taglen;
10063 + hdr_res.tag = hdr_arg.tag;
10064 + encode_compound_hdr_res(&xdr_out, &hdr_res);
10067 + status = process_op(rqstp, &xdr_in, argp, &xdr_out, resp);
10070 + if (nops == hdr_arg.nops)
10074 + *hdr_res.status = status;
10075 + *hdr_res.nops = htonl(nops);
10076 + dprintk("%s: done, status = %u\n", __FUNCTION__, status);
10077 + return rpc_success;
10081 + * Define NFS4 callback COMPOUND ops.
10083 +static struct callback_op callback_ops[] = {
10085 + .res_maxsize = CB_OP_HDR_RES_MAXSZ,
10087 + [OP_CB_GETATTR] = {
10088 + .process_op = (callback_process_op_t)nfs4_callback_getattr,
10089 + .decode_args = (callback_decode_arg_t)decode_getattr_args,
10090 + .encode_res = (callback_encode_res_t)encode_getattr_res,
10091 + .res_maxsize = CB_OP_GETATTR_RES_MAXSZ,
10093 + [OP_CB_RECALL] = {
10094 + .process_op = (callback_process_op_t)nfs4_callback_recall,
10095 + .decode_args = (callback_decode_arg_t)decode_recall_args,
10096 + .res_maxsize = CB_OP_RECALL_RES_MAXSZ,
10101 + * Define NFS4 callback procedures
10103 +static struct svc_procedure nfs4_callback_procedures1[] = {
10105 + .pc_func = nfs4_callback_null,
10106 + .pc_decode = (kxdrproc_t)nfs4_decode_void,
10107 + .pc_encode = (kxdrproc_t)nfs4_encode_void,
10108 + .pc_xdrressize = 1,
10110 + [CB_COMPOUND] = {
10111 + .pc_func = nfs4_callback_compound,
10112 + .pc_encode = (kxdrproc_t)nfs4_encode_void,
10113 + .pc_argsize = 256,
10114 + .pc_ressize = 256,
10115 + .pc_xdrressize = NFS4_CALLBACK_BUFSIZE,
10119 +struct svc_version nfs4_callback_version1 = {
10121 + .vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1),
10122 + .vs_proc = nfs4_callback_procedures1,
10123 + .vs_xdrsize = NFS4_CALLBACK_XDRSIZE,
10124 + .vs_dispatch = NULL,
10127 --- linux-2.6.7/fs/nfs/callback.c.lsec 2005-03-23 14:28:22.484631512 -0700
10128 +++ linux-2.6.7/fs/nfs/callback.c 2005-03-23 14:28:22.483631664 -0700
10131 + * linux/fs/nfs/callback.c
10133 + * Copyright (C) 2004 Trond Myklebust
10135 + * NFSv4 callback handling
10138 +#include <linux/config.h>
10139 +#include <linux/completion.h>
10140 +#include <linux/ip.h>
10141 +#include <linux/module.h>
10142 +#include <linux/smp_lock.h>
10143 +#include <linux/sunrpc/svc.h>
10144 +#include <linux/sunrpc/svcsock.h>
10145 +#include <linux/nfs_fs.h>
10146 +#include "callback.h"
10148 +#define NFSDBG_FACILITY NFSDBG_CALLBACK
10150 +struct nfs_callback_data {
10151 + unsigned int users;
10152 + struct svc_serv *serv;
10154 + struct completion started;
10155 + struct completion stopped;
10158 +static struct nfs_callback_data nfs_callback_info;
10159 +static DECLARE_MUTEX(nfs_callback_sema);
10160 +static struct svc_program nfs4_callback_program;
10162 +unsigned short nfs_callback_tcpport;
10165 + * This is the callback kernel thread.
10167 +static void nfs_callback_svc(struct svc_rqst *rqstp)
10169 + struct svc_serv *serv = rqstp->rq_server;
10172 + __module_get(THIS_MODULE);
10175 + nfs_callback_info.pid = current->pid;
10176 + daemonize("nfsv4-svc");
10177 + /* Process request with signals blocked, but allow SIGKILL. */
10178 + allow_signal(SIGKILL);
10180 + complete(&nfs_callback_info.started);
10182 + while (nfs_callback_info.users != 0 || !signalled()) {
10184 + * Listen for a request on the socket
10186 + err = svc_recv(serv, rqstp, MAX_SCHEDULE_TIMEOUT);
10187 + if (err == -EAGAIN || err == -EINTR)
10190 + printk(KERN_WARNING
10191 + "%s: terminating on error %d\n",
10192 + __FUNCTION__, -err);
10195 + dprintk("%s: request from %u.%u.%u.%u\n", __FUNCTION__,
10196 + NIPQUAD(rqstp->rq_addr.sin_addr.s_addr));
10197 + svc_process(serv, rqstp);
10200 + nfs_callback_info.pid = 0;
10201 + complete(&nfs_callback_info.stopped);
10203 + module_put_and_exit(0);
10207 + * Bring up the server process if it is not already up.
10209 +int nfs_callback_up(void)
10211 + struct svc_serv *serv;
10212 + struct svc_sock *svsk;
10216 + down(&nfs_callback_sema);
10217 + if (nfs_callback_info.users++ || nfs_callback_info.pid != 0)
10219 + init_completion(&nfs_callback_info.started);
10220 + init_completion(&nfs_callback_info.stopped);
10221 + serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE);
10225 + /* FIXME: We don't want to register this socket with the portmapper */
10226 + ret = svc_makesock(serv, IPPROTO_TCP, 0);
10228 + goto out_destroy;
10229 + if (!list_empty(&serv->sv_permsocks)) {
10230 + svsk = list_entry(serv->sv_permsocks.next,
10231 + struct svc_sock, sk_list);
10232 + nfs_callback_tcpport = ntohs(inet_sk(svsk->sk_sk)->sport);
10233 + dprintk ("Callback port = 0x%x\n", nfs_callback_tcpport);
10236 + ret = svc_create_thread(nfs_callback_svc, serv);
10238 + goto out_destroy;
10239 + nfs_callback_info.serv = serv;
10240 + wait_for_completion(&nfs_callback_info.started);
10242 + up(&nfs_callback_sema);
10246 + svc_destroy(serv);
10248 + nfs_callback_info.users--;
10253 + * Kill the server process if it is not already up.
10255 +int nfs_callback_down(void)
10260 + down(&nfs_callback_sema);
10261 + if (--nfs_callback_info.users || nfs_callback_info.pid == 0)
10263 + kill_proc(nfs_callback_info.pid, SIGKILL, 1);
10264 + wait_for_completion(&nfs_callback_info.stopped);
10266 + up(&nfs_callback_sema);
10272 + * AUTH_NULL authentication
10274 +static int nfs_callback_null_accept(struct svc_rqst *rqstp, u32 *authp)
10276 + struct iovec *argv = &rqstp->rq_arg.head[0];
10277 + struct iovec *resv = &rqstp->rq_res.head[0];
10279 + if (argv->iov_len < 3*4)
10280 + return SVC_GARBAGE;
10282 + if (svc_getu32(argv) != 0) {
10283 + dprintk("svc: bad null cred\n");
10284 + *authp = rpc_autherr_badcred;
10285 + return SVC_DENIED;
10287 + if (svc_getu32(argv) != RPC_AUTH_NULL || svc_getu32(argv) != 0) {
10288 + dprintk("svc: bad null verf\n");
10289 + *authp = rpc_autherr_badverf;
10290 + return SVC_DENIED;
10293 + /* Signal that mapping to nobody uid/gid is required */
10294 + rqstp->rq_cred.cr_uid = (uid_t) -1;
10295 + rqstp->rq_cred.cr_gid = (gid_t) -1;
10296 + rqstp->rq_cred.cr_group_info = groups_alloc(0);
10297 + if (rqstp->rq_cred.cr_group_info == NULL)
10298 + return SVC_DROP; /* kmalloc failure - client must retry */
10300 + /* Put NULL verifier */
10301 + svc_putu32(resv, RPC_AUTH_NULL);
10302 + svc_putu32(resv, 0);
10303 + dprintk("%s: success, returning %d!\n", __FUNCTION__, SVC_OK);
10307 +static int nfs_callback_null_release(struct svc_rqst *rqstp)
10309 + if (rqstp->rq_cred.cr_group_info)
10310 + put_group_info(rqstp->rq_cred.cr_group_info);
10311 + rqstp->rq_cred.cr_group_info = NULL;
10312 + return 0; /* don't drop */
10315 +static struct auth_ops nfs_callback_auth_null = {
10317 + .flavour = RPC_AUTH_NULL,
10318 + .accept = nfs_callback_null_accept,
10319 + .release = nfs_callback_null_release,
10323 + * AUTH_SYS authentication
10325 +static int nfs_callback_unix_accept(struct svc_rqst *rqstp, u32 *authp)
10327 + struct iovec *argv = &rqstp->rq_arg.head[0];
10328 + struct iovec *resv = &rqstp->rq_res.head[0];
10329 + struct svc_cred *cred = &rqstp->rq_cred;
10331 + int len = argv->iov_len;
10333 + dprintk("%s: start\n", __FUNCTION__);
10334 + cred->cr_group_info = NULL;
10335 + rqstp->rq_client = NULL;
10336 + if ((len -= 3*4) < 0)
10337 + return SVC_GARBAGE;
10339 + /* Get length, time stamp and machine name */
10340 + svc_getu32(argv);
10341 + svc_getu32(argv);
10342 + slen = XDR_QUADLEN(ntohl(svc_getu32(argv)));
10343 + if (slen > 64 || (len -= (slen + 3)*4) < 0)
10345 + argv->iov_base = (void*)((u32*)argv->iov_base + slen);
10346 + argv->iov_len -= slen*4;
10348 + cred->cr_uid = ntohl(svc_getu32(argv));
10349 + cred->cr_gid = ntohl(svc_getu32(argv));
10350 + slen = ntohl(svc_getu32(argv));
10351 + if (slen > 16 || (len -= (slen + 2)*4) < 0)
10353 + cred->cr_group_info = groups_alloc(slen);
10354 + if (cred->cr_group_info == NULL)
10356 + for (i = 0; i < slen; i++)
10357 + GROUP_AT(cred->cr_group_info, i) = ntohl(svc_getu32(argv));
10359 + if (svc_getu32(argv) != RPC_AUTH_NULL || svc_getu32(argv) != 0) {
10360 + *authp = rpc_autherr_badverf;
10361 + return SVC_DENIED;
10363 + /* Put NULL verifier */
10364 + svc_putu32(resv, RPC_AUTH_NULL);
10365 + svc_putu32(resv, 0);
10366 + dprintk("%s: success, returning %d!\n", __FUNCTION__, SVC_OK);
10369 + *authp = rpc_autherr_badcred;
10370 + return SVC_DENIED;
10373 +static int nfs_callback_unix_release(struct svc_rqst *rqstp)
10375 + if (rqstp->rq_cred.cr_group_info)
10376 + put_group_info(rqstp->rq_cred.cr_group_info);
10377 + rqstp->rq_cred.cr_group_info = NULL;
10381 +static struct auth_ops nfs_callback_auth_unix = {
10383 + .flavour = RPC_AUTH_UNIX,
10384 + .accept = nfs_callback_unix_accept,
10385 + .release = nfs_callback_unix_release,
10389 + * Hook the authentication protocol
10391 +static int nfs_callback_auth(struct svc_rqst *rqstp, u32 *authp)
10393 + struct in_addr *addr = &rqstp->rq_addr.sin_addr;
10394 + struct nfs4_client *clp;
10395 + struct iovec *argv = &rqstp->rq_arg.head[0];
10399 + /* Don't talk to strangers */
10400 + clp = nfs4_find_client(addr);
10403 + dprintk("%s: %u.%u.%u.%u NFSv4 callback!\n", __FUNCTION__, NIPQUAD(addr));
10404 + nfs4_put_client(clp);
10405 + flavour = ntohl(svc_getu32(argv));
10406 + switch(flavour) {
10407 + case RPC_AUTH_NULL:
10408 + if (rqstp->rq_proc != CB_NULL) {
10409 + *authp = rpc_autherr_tooweak;
10410 + retval = SVC_DENIED;
10413 + rqstp->rq_authop = &nfs_callback_auth_null;
10414 + retval = nfs_callback_null_accept(rqstp, authp);
10416 + case RPC_AUTH_UNIX:
10417 + /* Eat the authentication flavour */
10418 + rqstp->rq_authop = &nfs_callback_auth_unix;
10419 + retval = nfs_callback_unix_accept(rqstp, authp);
10422 + /* FIXME: need to add RPCSEC_GSS upcalls */
10424 + svc_ungetu32(argv);
10425 + retval = svc_authenticate(rqstp, authp);
10427 + *authp = rpc_autherr_rejectedcred;
10428 + retval = SVC_DENIED;
10431 + dprintk("%s: flavour %d returning error %d\n", __FUNCTION__, flavour, retval);
10436 + * Define NFS4 callback program
10438 +extern struct svc_version nfs4_callback_version1;
10440 +static struct svc_version *nfs4_callback_version[] = {
10441 + [1] = &nfs4_callback_version1,
10444 +static struct svc_stat nfs4_callback_stats;
10446 +static struct svc_program nfs4_callback_program = {
10447 + .pg_prog = NFS4_CALLBACK, /* RPC service number */
10448 + .pg_nvers = ARRAY_SIZE(nfs4_callback_version), /* Number of entries */
10449 + .pg_vers = nfs4_callback_version, /* version table */
10450 + .pg_name = "NFSv4 callback", /* service name */
10451 + .pg_class = "nfs", /* authentication class */
10452 + .pg_stats = &nfs4_callback_stats,
10453 + .pg_authenticate = nfs_callback_auth,
10455 --- linux-2.6.7/fs/nfs/read.c.lsec 2004-06-15 23:18:37.000000000 -0600
10456 +++ linux-2.6.7/fs/nfs/read.c 2005-03-23 14:28:23.114535752 -0700
10457 @@ -91,8 +91,8 @@ int nfs_return_empty_page(struct page *p
10459 * Read a page synchronously.
10462 -nfs_readpage_sync(struct file *file, struct inode *inode, struct page *page)
10463 +static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode,
10464 + struct page *page)
10466 unsigned int rsize = NFS_SERVER(inode)->rsize;
10467 unsigned int count = PAGE_CACHE_SIZE;
10468 @@ -105,10 +105,11 @@ nfs_readpage_sync(struct file *file, str
10470 memset(rdata, 0, sizeof(*rdata));
10471 rdata->flags = (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
10472 + rdata->cred = ctx->cred;
10473 rdata->inode = inode;
10474 INIT_LIST_HEAD(&rdata->pages);
10475 rdata->args.fh = NFS_FH(inode);
10476 - rdata->args.lockowner = current->files;
10477 + rdata->args.context = ctx;
10478 rdata->args.pages = &page;
10479 rdata->args.pgbase = 0UL;
10480 rdata->args.count = rsize;
10481 @@ -134,7 +135,7 @@ nfs_readpage_sync(struct file *file, str
10482 rdata->args.count);
10485 - result = NFS_PROTO(inode)->read(rdata, file);
10486 + result = NFS_PROTO(inode)->read(rdata);
10490 @@ -169,8 +170,8 @@ io_error:
10495 -nfs_readpage_async(struct file *file, struct inode *inode, struct page *page)
10496 +static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
10497 + struct page *page)
10499 LIST_HEAD(one_request);
10500 struct nfs_page *new;
10501 @@ -179,7 +180,7 @@ nfs_readpage_async(struct file *file, st
10502 len = nfs_page_length(inode, page);
10504 return nfs_return_empty_page(page);
10505 - new = nfs_create_request(file, inode, page, 0, len);
10506 + new = nfs_create_request(ctx, inode, page, 0, len);
10509 return PTR_ERR(new);
10510 @@ -202,8 +203,8 @@ static void nfs_readpage_release(struct
10511 nfs_unlock_request(req);
10513 dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
10514 - req->wb_inode->i_sb->s_id,
10515 - (long long)NFS_FILEID(req->wb_inode),
10516 + req->wb_context->dentry->d_inode->i_sb->s_id,
10517 + (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
10519 (long long)req_offset(req));
10521 @@ -217,16 +218,15 @@ static void nfs_read_rpcsetup(struct nfs
10522 struct inode *inode;
10525 - data->inode = inode = req->wb_inode;
10526 - data->cred = req->wb_cred;
10527 + data->inode = inode = req->wb_context->dentry->d_inode;
10528 + data->cred = req->wb_context->cred;
10530 data->args.fh = NFS_FH(inode);
10531 data->args.offset = req_offset(req) + offset;
10532 data->args.pgbase = req->wb_pgbase + offset;
10533 data->args.pages = data->pagevec;
10534 data->args.count = count;
10535 - data->args.lockowner = req->wb_lockowner;
10536 - data->args.state = req->wb_state;
10537 + data->args.context = req->wb_context;
10539 data->res.fattr = &data->fattr;
10540 data->res.count = count;
10541 @@ -396,7 +396,7 @@ nfs_pagein_list(struct list_head *head,
10542 while (!list_empty(head)) {
10543 pages += nfs_coalesce_requests(head, &one_request, rpages);
10544 req = nfs_list_entry(one_request.next);
10545 - error = nfs_pagein_one(&one_request, req->wb_inode);
10546 + error = nfs_pagein_one(&one_request, req->wb_context->dentry->d_inode);
10550 @@ -500,9 +500,9 @@ void nfs_readpage_result(struct rpc_task
10551 * - The error flag is set for this page. This happens only when a
10552 * previous async read operation failed.
10555 -nfs_readpage(struct file *file, struct page *page)
10556 +int nfs_readpage(struct file *file, struct page *page)
10558 + struct nfs_open_context *ctx;
10559 struct inode *inode = page->mapping->host;
10562 @@ -519,25 +519,33 @@ nfs_readpage(struct file *file, struct p
10566 + if (file == NULL) {
10567 + ctx = nfs_find_open_context(inode, FMODE_READ);
10571 + ctx = get_nfs_open_context((struct nfs_open_context *)
10572 + file->private_data);
10573 if (!IS_SYNC(inode)) {
10574 - error = nfs_readpage_async(file, inode, page);
10575 + error = nfs_readpage_async(ctx, inode, page);
10579 - error = nfs_readpage_sync(file, inode, page);
10580 + error = nfs_readpage_sync(ctx, inode, page);
10581 if (error < 0 && IS_SWAPFILE(inode))
10582 printk("Aiee.. nfs swap-in of page failed!\n");
10584 + put_nfs_open_context(ctx);
10593 struct nfs_readdesc {
10594 struct list_head *head;
10595 - struct file *filp;
10596 + struct nfs_open_context *ctx;
10600 @@ -552,7 +560,7 @@ readpage_async_filler(void *data, struct
10601 len = nfs_page_length(inode, page);
10603 return nfs_return_empty_page(page);
10604 - new = nfs_create_request(desc->filp, inode, page, 0, len);
10605 + new = nfs_create_request(desc->ctx, inode, page, 0, len);
10607 SetPageError(page);
10609 @@ -565,13 +573,11 @@ readpage_async_filler(void *data, struct
10614 -nfs_readpages(struct file *filp, struct address_space *mapping,
10615 +int nfs_readpages(struct file *filp, struct address_space *mapping,
10616 struct list_head *pages, unsigned nr_pages)
10619 struct nfs_readdesc desc = {
10623 struct inode *inode = mapping->host;
10624 @@ -583,12 +589,20 @@ nfs_readpages(struct file *filp, struct
10625 (long long)NFS_FILEID(inode),
10628 + if (filp == NULL) {
10629 + desc.ctx = nfs_find_open_context(inode, FMODE_READ);
10630 + if (desc.ctx == NULL)
10633 + desc.ctx = get_nfs_open_context((struct nfs_open_context *)
10634 + filp->private_data);
10635 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
10636 if (!list_empty(&head)) {
10637 int err = nfs_pagein_list(&head, server->rpages);
10641 + put_nfs_open_context(desc.ctx);
10645 --- linux-2.6.7/fs/nfs/Makefile.lsec 2004-06-15 23:19:01.000000000 -0600
10646 +++ linux-2.6.7/fs/nfs/Makefile 2005-03-23 14:28:22.819580592 -0700
10647 @@ -9,6 +9,7 @@ nfs-y := dir.o file.o inode.o nfs2xdr
10648 nfs-$(CONFIG_ROOT_NFS) += nfsroot.o mount_clnt.o
10649 nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o
10650 nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \
10652 + delegation.o idmap.o \
10653 + callback.o callback_xdr.o callback_proc.o
10654 nfs-$(CONFIG_NFS_DIRECTIO) += direct.o
10655 nfs-objs := $(nfs-y)
10656 --- linux-2.6.7/fs/Kconfig.lsec 2004-06-15 23:19:36.000000000 -0600
10657 +++ linux-2.6.7/fs/Kconfig 2005-03-23 14:28:23.871420688 -0700
10658 @@ -322,7 +322,7 @@ config FS_POSIX_ACL
10659 # Never use this symbol for ifdefs.
10662 - depends on EXT2_FS_POSIX_ACL || EXT3_FS_POSIX_ACL || JFS_POSIX_ACL || REISERFS_FS_POSIX_ACL
10663 + depends on EXT2_FS_POSIX_ACL || EXT3_FS_POSIX_ACL || JFS_POSIX_ACL || REISERFS_FS_POSIX_ACL || NFS_V4
10667 @@ -1443,6 +1443,7 @@ config NFSD_V3
10669 bool "Provide NFSv4 server support (EXPERIMENTAL)"
10670 depends on NFSD_V3 && EXPERIMENTAL
10673 If you would like to include the NFSv4 server as well as the NFSv2
10674 and NFSv3 servers, say Y here. This feature is experimental, and
10675 @@ -1450,11 +1451,13 @@ config NFSD_V4
10679 - bool "Provide NFS server over TCP support (EXPERIMENTAL)"
10680 - depends on NFSD && EXPERIMENTAL
10681 + bool "Provide NFS server over TCP support"
10685 - Enable NFS service over TCP connections. This the officially
10686 - still experimental, but seems to work well.
10687 + If you want your NFS server to support TCP connections, say Y here.
10688 + TCP connections usually perform better than the default UDP when
10689 + the network is lossy or congested. If unsure, say Y.
10692 bool "Root file system on NFS"
10693 @@ -1505,6 +1508,22 @@ config RPCSEC_GSS_KRB5
10697 +config RPCSEC_GSS_SPKM3
10698 + tristate "Secure RPC: SPKM3 mechanism (EXPERIMENTAL)"
10699 + depends on SUNRPC && EXPERIMENTAL
10700 + select SUNRPC_GSS
10702 + select CRYPTO_MD5
10703 + select CRYPTO_DES
10705 + Provides for secure RPC calls by means of a gss-api
10706 + mechanism based on the SPKM3 public-key mechanism.
10708 + Note: Requires an auxiliary userspace daemon which may be found on
10709 + http://www.citi.umich.edu/projects/nfsv4/
10711 + If unsure, say N.
10714 tristate "SMB file system support (to mount Windows shares etc.)"
10716 --- linux-2.6.7/include/linux/fs.h.lsec 2005-03-23 14:26:03.300790672 -0700
10717 +++ linux-2.6.7/include/linux/fs.h 2005-03-23 14:28:23.280510520 -0700
10718 @@ -632,7 +632,7 @@ struct file_lock {
10719 struct file_lock *fl_next; /* singly linked list for this inode */
10720 struct list_head fl_link; /* doubly linked list of all locks */
10721 struct list_head fl_block; /* circular list of blocked processes */
10722 - fl_owner_t fl_owner;
10723 + fl_owner_t fl_owner; /* 0 if lock owned by a local process */
10724 unsigned int fl_pid;
10725 wait_queue_head_t fl_wait;
10726 struct file *fl_file;
10727 --- linux-2.6.7/include/linux/nfs4.h.lsec 2004-06-15 23:19:22.000000000 -0600
10728 +++ linux-2.6.7/include/linux/nfs4.h 2005-03-23 14:28:23.335502160 -0700
10730 #ifndef _LINUX_NFS4_H
10731 #define _LINUX_NFS4_H
10733 +#include <linux/types.h>
10734 +#include <linux/list.h>
10736 #define NFS4_VERIFIER_SIZE 8
10737 #define NFS4_FHSIZE 128
10738 +#define NFS4_MAXPATHLEN PATH_MAX
10739 #define NFS4_MAXNAMLEN NAME_MAX
10741 #define NFS4_ACCESS_READ 0x0001
10743 #define ACL4_SUPPORT_AUDIT_ACL 0x04
10744 #define ACL4_SUPPORT_ALARM_ACL 0x08
10746 +#define NFS4_ACE_FILE_INHERIT_ACE 0x00000001
10747 +#define NFS4_ACE_DIRECTORY_INHERIT_ACE 0x00000002
10748 +#define NFS4_ACE_NO_PROPAGATE_INHERIT_ACE 0x00000004
10749 +#define NFS4_ACE_INHERIT_ONLY_ACE 0x00000008
10750 +#define NFS4_ACE_SUCCESSFUL_ACCESS_ACE_FLAG 0x00000010
10751 +#define NFS4_ACE_FAILED_ACCESS_ACE_FLAG 0x00000020
10752 +#define NFS4_ACE_IDENTIFIER_GROUP 0x00000040
10753 +#define NFS4_ACE_OWNER 0x00000080
10754 +#define NFS4_ACE_GROUP 0x00000100
10755 +#define NFS4_ACE_EVERYONE 0x00000200
10757 +#define NFS4_ACE_READ_DATA 0x00000001
10758 +#define NFS4_ACE_LIST_DIRECTORY 0x00000001
10759 +#define NFS4_ACE_WRITE_DATA 0x00000002
10760 +#define NFS4_ACE_ADD_FILE 0x00000002
10761 +#define NFS4_ACE_APPEND_DATA 0x00000004
10762 +#define NFS4_ACE_ADD_SUBDIRECTORY 0x00000004
10763 +#define NFS4_ACE_READ_NAMED_ATTRS 0x00000008
10764 +#define NFS4_ACE_WRITE_NAMED_ATTRS 0x00000010
10765 +#define NFS4_ACE_EXECUTE 0x00000020
10766 +#define NFS4_ACE_DELETE_CHILD 0x00000040
10767 +#define NFS4_ACE_READ_ATTRIBUTES 0x00000080
10768 +#define NFS4_ACE_WRITE_ATTRIBUTES 0x00000100
10769 +#define NFS4_ACE_DELETE 0x00010000
10770 +#define NFS4_ACE_READ_ACL 0x00020000
10771 +#define NFS4_ACE_WRITE_ACL 0x00040000
10772 +#define NFS4_ACE_WRITE_OWNER 0x00080000
10773 +#define NFS4_ACE_SYNCHRONIZE 0x00100000
10774 +#define NFS4_ACE_GENERIC_READ 0x00120081
10775 +#define NFS4_ACE_GENERIC_WRITE 0x00160106
10776 +#define NFS4_ACE_GENERIC_EXECUTE 0x001200A0
10777 +#define NFS4_ACE_MASK_ALL 0x001F01FF
10779 +enum nfs4_acl_whotype {
10780 + NFS4_ACL_WHO_NAMED = 0,
10781 + NFS4_ACL_WHO_OWNER,
10782 + NFS4_ACL_WHO_GROUP,
10783 + NFS4_ACL_WHO_EVERYONE,
10789 + uint32_t access_mask;
10792 + struct list_head l_ace;
10797 + struct list_head ace_head;
10800 typedef struct { char data[NFS4_VERIFIER_SIZE]; } nfs4_verifier;
10801 typedef struct { char data[16]; } nfs4_stateid;
10803 @@ -297,7 +355,7 @@ enum {
10804 NFSPROC4_CLNT_COMMIT,
10805 NFSPROC4_CLNT_OPEN,
10806 NFSPROC4_CLNT_OPEN_CONFIRM,
10807 - NFSPROC4_CLNT_OPEN_RECLAIM,
10808 + NFSPROC4_CLNT_OPEN_NOATTR,
10809 NFSPROC4_CLNT_OPEN_DOWNGRADE,
10810 NFSPROC4_CLNT_CLOSE,
10811 NFSPROC4_CLNT_SETATTR,
10812 @@ -315,12 +373,16 @@ enum {
10813 NFSPROC4_CLNT_REMOVE,
10814 NFSPROC4_CLNT_RENAME,
10815 NFSPROC4_CLNT_LINK,
10816 + NFSPROC4_CLNT_SYMLINK,
10817 NFSPROC4_CLNT_CREATE,
10818 NFSPROC4_CLNT_PATHCONF,
10819 NFSPROC4_CLNT_STATFS,
10820 NFSPROC4_CLNT_READLINK,
10821 NFSPROC4_CLNT_READDIR,
10822 NFSPROC4_CLNT_SERVER_CAPS,
10823 + NFSPROC4_CLNT_DELEGRETURN,
10824 + NFSPROC4_CLNT_GETACL,
10825 + NFSPROC4_CLNT_SETACL,
10829 --- linux-2.6.7/include/linux/nfs_page.h.lsec 2004-06-15 23:18:57.000000000 -0600
10830 +++ linux-2.6.7/include/linux/nfs_page.h 2005-03-23 14:28:23.392493496 -0700
10833 struct list_head wb_list, /* Defines state of page: */
10834 *wb_list_head; /* read/write/commit */
10835 - struct file *wb_file;
10836 - fl_owner_t wb_lockowner;
10837 - struct inode *wb_inode;
10838 - struct rpc_cred *wb_cred;
10839 - struct nfs4_state *wb_state;
10840 struct page *wb_page; /* page to read in/write out */
10841 + struct nfs_open_context *wb_context; /* File state context info */
10842 atomic_t wb_complete; /* i/os we're waiting for */
10843 - wait_queue_head_t wb_wait; /* wait queue */
10844 unsigned long wb_index; /* Offset >> PAGE_CACHE_SHIFT */
10845 unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */
10846 wb_pgbase, /* Start of page data */
10847 @@ -50,9 +45,11 @@ struct nfs_page {
10848 #define NFS_NEED_COMMIT(req) (test_bit(PG_NEED_COMMIT,&(req)->wb_flags))
10849 #define NFS_NEED_RESCHED(req) (test_bit(PG_NEED_RESCHED,&(req)->wb_flags))
10851 -extern struct nfs_page *nfs_create_request(struct file *, struct inode *,
10853 - unsigned int, unsigned int);
10854 +extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx,
10855 + struct inode *inode,
10856 + struct page *page,
10857 + unsigned int offset,
10858 + unsigned int count);
10859 extern void nfs_clear_request(struct nfs_page *req);
10860 extern void nfs_release_request(struct nfs_page *req);
10862 @@ -64,6 +61,7 @@ extern int nfs_scan_list(struct list_hea
10863 extern int nfs_coalesce_requests(struct list_head *, struct list_head *,
10865 extern int nfs_wait_on_request(struct nfs_page *);
10866 +extern void nfs_unlock_request(struct nfs_page *req);
10868 extern spinlock_t nfs_wreq_lock;
10870 @@ -90,19 +88,6 @@ nfs_lock_request(struct nfs_page *req)
10874 -static inline void
10875 -nfs_unlock_request(struct nfs_page *req)
10877 - if (!NFS_WBACK_BUSY(req)) {
10878 - printk(KERN_ERR "NFS: Invalid unlock attempted\n");
10881 - smp_mb__before_clear_bit();
10882 - clear_bit(PG_BUSY, &req->wb_flags);
10883 - smp_mb__after_clear_bit();
10884 - wake_up_all(&req->wb_wait);
10885 - nfs_release_request(req);
10889 * nfs_list_remove_request - Remove a request from its wb_list
10890 --- linux-2.6.7/include/linux/sunrpc/svc.h.lsec 2004-06-15 23:19:35.000000000 -0600
10891 +++ linux-2.6.7/include/linux/sunrpc/svc.h 2005-03-23 14:28:23.541470848 -0700
10892 @@ -87,6 +87,14 @@ static inline u32 svc_getu32(struct iove
10893 iov->iov_len -= sizeof(u32);
10897 +static inline void svc_ungetu32(struct iovec *iov)
10899 + u32 *vp = (u32 *)iov->iov_base;
10900 + iov->iov_base = (void *)(vp - 1);
10901 + iov->iov_len += sizeof(*vp);
10904 static inline void svc_putu32(struct iovec *iov, u32 val)
10906 u32 *vp = iov->iov_base + iov->iov_len;
10907 @@ -243,6 +251,8 @@ struct svc_program {
10908 char * pg_name; /* service name */
10909 char * pg_class; /* class name: services sharing authentication */
10910 struct svc_stat * pg_stats; /* rpc statistics */
10911 + /* Override authentication. NULL means use default */
10912 + int (*pg_authenticate)(struct svc_rqst *, u32 *);
10916 --- linux-2.6.7/include/linux/sunrpc/gss_spkm3.h.lsec 2005-03-23 14:28:24.186372808 -0700
10917 +++ linux-2.6.7/include/linux/sunrpc/gss_spkm3.h 2005-03-23 14:28:24.185372960 -0700
10920 + * linux/include/linux/sunrpc/gss_spkm3.h
10922 + * Copyright (c) 2000 The Regents of the University of Michigan.
10923 + * All rights reserved.
10925 + * Andy Adamson <andros@umich.edu>
10928 +#include <linux/sunrpc/auth_gss.h>
10929 +#include <linux/sunrpc/gss_err.h>
10930 +#include <linux/sunrpc/gss_asn1.h>
10932 +struct spkm3_ctx {
10933 + struct xdr_netobj ctx_id; /* per message context id */
10934 + int qop; /* negotiated qop */
10935 + struct xdr_netobj mech_used;
10936 + unsigned int ret_flags ;
10937 + unsigned int req_flags ;
10938 + struct xdr_netobj share_key;
10940 + struct crypto_tfm* derived_conf_key;
10942 + struct crypto_tfm* derived_integ_key;
10943 + int keyestb_alg; /* alg used to get share_key */
10944 + int owf_alg; /* one way function */
10947 +/* from openssl/objects.h */
10948 +/* XXX need SEAL_ALG_NONE */
10950 +#define NID_dhKeyAgreement 28
10951 +#define NID_des_cbc 31
10952 +#define NID_sha1 64
10953 +#define NID_cast5_cbc 108
10955 +/* SPKM InnerContext Token types */
10957 +#define SPKM_ERROR_TOK 3
10958 +#define SPKM_MIC_TOK 4
10959 +#define SPKM_WRAP_TOK 5
10960 +#define SPKM_DEL_TOK 6
10962 +u32 spkm3_make_token(struct spkm3_ctx *ctx, int qop_req, struct xdr_buf * text, struct xdr_netobj * token, int toktype);
10964 +u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, struct xdr_buf *message_buffer, int *qop_state, int toktype);
10966 +#define CKSUMTYPE_RSA_MD5 0x0007
10968 +s32 make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
10969 + struct xdr_netobj *cksum);
10970 +void asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits);
10971 +int decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen,
10973 +void spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen,
10974 + unsigned char *ctxhdr, int elen, int zbit);
10975 +void spkm3_make_mic_token(unsigned char **tokp, int toklen,
10976 + struct xdr_netobj *mic_hdr,
10977 + struct xdr_netobj *md5cksum, int md5elen, int md5zbit);
10978 +u32 spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen,
10979 + unsigned char **cksum);
10980 --- linux-2.6.7/include/linux/sunrpc/sched.h.lsec 2004-06-15 23:19:42.000000000 -0600
10981 +++ linux-2.6.7/include/linux/sunrpc/sched.h 2005-03-23 14:28:23.540471000 -0700
10984 #include <linux/timer.h>
10985 #include <linux/sunrpc/types.h>
10986 +#include <linux/spinlock.h>
10987 #include <linux/wait.h>
10988 +#include <linux/workqueue.h>
10989 #include <linux/sunrpc/xdr.h>
10992 @@ -25,11 +27,18 @@ struct rpc_message {
10993 struct rpc_cred * rpc_cred; /* Credentials */
10996 +struct rpc_wait_queue;
10998 + struct list_head list; /* wait queue links */
10999 + struct list_head links; /* Links to related tasks */
11000 + wait_queue_head_t waitq; /* sync: sleep on this q */
11001 + struct rpc_wait_queue * rpc_waitq; /* RPC wait queue we're on */
11005 * This is the RPC task struct
11008 - struct list_head tk_list; /* wait queue links */
11010 unsigned long tk_magic; /* 0xf00baa */
11012 @@ -37,7 +46,6 @@ struct rpc_task {
11013 struct rpc_clnt * tk_client; /* RPC client */
11014 struct rpc_rqst * tk_rqstp; /* RPC request */
11015 int tk_status; /* result of last operation */
11016 - struct rpc_wait_queue * tk_rpcwait; /* RPC wait queue we're on */
11020 @@ -70,13 +78,18 @@ struct rpc_task {
11021 * you have a pathological interest in kernel oopses.
11023 struct timer_list tk_timer; /* kernel timer */
11024 - wait_queue_head_t tk_wait; /* sync: sleep on this q */
11025 unsigned long tk_timeout; /* timeout for rpc_sleep() */
11026 unsigned short tk_flags; /* misc flags */
11027 unsigned char tk_active : 1;/* Task has been activated */
11028 unsigned char tk_priority : 2;/* Task priority */
11029 unsigned long tk_runstate; /* Task run status */
11030 - struct list_head tk_links; /* links to related tasks */
11031 + struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could
11032 + * be any workqueue
11035 + struct work_struct tk_work; /* Async task work queue */
11036 + struct rpc_wait tk_wait; /* RPC wait */
11039 unsigned short tk_pid; /* debugging aid */
11041 @@ -87,11 +100,11 @@ struct rpc_task {
11042 /* support walking a list of tasks on a wait queue */
11043 #define task_for_each(task, pos, head) \
11044 list_for_each(pos, head) \
11045 - if ((task=list_entry(pos, struct rpc_task, tk_list)),1)
11046 + if ((task=list_entry(pos, struct rpc_task, u.tk_wait.list)),1)
11048 #define task_for_first(task, head) \
11049 if (!list_empty(head) && \
11050 - ((task=list_entry((head)->next, struct rpc_task, tk_list)),1))
11051 + ((task=list_entry((head)->next, struct rpc_task, u.tk_wait.list)),1))
11053 /* .. and walking list of all tasks */
11054 #define alltask_for_each(task, pos, head) \
11055 @@ -124,22 +137,24 @@ typedef void (*rpc_action)(struct rpc_
11056 #define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL)
11057 #define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
11059 -#define RPC_TASK_SLEEPING 0
11060 -#define RPC_TASK_RUNNING 1
11061 -#define RPC_IS_SLEEPING(t) (test_bit(RPC_TASK_SLEEPING, &(t)->tk_runstate))
11062 -#define RPC_IS_RUNNING(t) (test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate))
11063 +#define RPC_TASK_RUNNING 0
11064 +#define RPC_TASK_QUEUED 1
11066 +#define RPC_IS_RUNNING(t) (test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate))
11067 #define rpc_set_running(t) (set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate))
11068 -#define rpc_clear_running(t) (clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate))
11070 -#define rpc_set_sleeping(t) (set_bit(RPC_TASK_SLEEPING, &(t)->tk_runstate))
11072 -#define rpc_clear_sleeping(t) \
11073 +#define rpc_test_and_set_running(t) \
11074 + (test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate))
11075 +#define rpc_clear_running(t) \
11077 smp_mb__before_clear_bit(); \
11078 - clear_bit(RPC_TASK_SLEEPING, &(t)->tk_runstate); \
11079 + clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \
11080 smp_mb__after_clear_bit(); \
11084 +#define RPC_IS_QUEUED(t) (test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate))
11085 +#define rpc_set_queued(t) (set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate))
11086 +#define rpc_test_and_clear_queued(t) \
11087 + (test_and_clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate))
11091 @@ -155,6 +170,7 @@ typedef void (*rpc_action)(struct rpc_
11092 * RPC synchronization objects
11094 struct rpc_wait_queue {
11096 struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */
11097 unsigned long cookie; /* cookie of last task serviced */
11098 unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
11099 @@ -175,6 +191,7 @@ struct rpc_wait_queue {
11102 # define RPC_WAITQ_INIT(var,qname) { \
11103 + .lock = SPIN_LOCK_UNLOCKED, \
11105 [0] = LIST_HEAD_INIT(var.tasks[0]), \
11106 [1] = LIST_HEAD_INIT(var.tasks[1]), \
11107 @@ -183,6 +200,7 @@ struct rpc_wait_queue {
11110 # define RPC_WAITQ_INIT(var,qname) { \
11111 + .lock = SPIN_LOCK_UNLOCKED, \
11113 [0] = LIST_HEAD_INIT(var.tasks[0]), \
11114 [1] = LIST_HEAD_INIT(var.tasks[1]), \
11115 @@ -207,13 +225,10 @@ void rpc_killall_tasks(struct rpc_clnt
11116 int rpc_execute(struct rpc_task *);
11117 void rpc_run_child(struct rpc_task *parent, struct rpc_task *child,
11118 rpc_action action);
11119 -int rpc_add_wait_queue(struct rpc_wait_queue *, struct rpc_task *);
11120 -void rpc_remove_wait_queue(struct rpc_task *);
11121 void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
11122 void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
11123 void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *,
11124 rpc_action action, rpc_action timer);
11125 -void rpc_add_timer(struct rpc_task *, rpc_action);
11126 void rpc_wake_up_task(struct rpc_task *);
11127 void rpc_wake_up(struct rpc_wait_queue *);
11128 struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
11129 --- linux-2.6.7/include/linux/sunrpc/gss_api.h.lsec 2004-06-15 23:20:03.000000000 -0600
11130 +++ linux-2.6.7/include/linux/sunrpc/gss_api.h 2005-03-23 14:28:24.688296504 -0700
11131 @@ -47,6 +47,18 @@ u32 gss_verify_mic(
11132 struct xdr_buf *message,
11133 struct xdr_netobj *mic_token,
11136 + struct gss_ctx *ctx_id,
11139 + struct xdr_buf *outbuf,
11140 + struct page **inpages);
11142 + struct gss_ctx *ctx_id,
11145 + struct xdr_buf *inbuf,
11146 + int *out_offset);
11147 u32 gss_delete_sec_context(
11148 struct gss_ctx **ctx_id);
11150 @@ -93,6 +105,18 @@ struct gss_api_ops {
11151 struct xdr_buf *message,
11152 struct xdr_netobj *mic_token,
11155 + struct gss_ctx *ctx_id,
11158 + struct xdr_buf *outbuf,
11159 + struct page **inpages);
11160 + u32 (*gss_unwrap)(
11161 + struct gss_ctx *ctx_id,
11164 + struct xdr_buf *buf,
11165 + int *out_offset);
11166 void (*gss_delete_sec_context)(
11167 void *internal_ctx_id);
11169 --- linux-2.6.7/include/linux/sunrpc/xprt.h.lsec 2004-06-15 23:19:43.000000000 -0600
11170 +++ linux-2.6.7/include/linux/sunrpc/xprt.h 2005-03-23 14:28:24.783282064 -0700
11171 @@ -95,7 +95,10 @@ struct rpc_rqst {
11172 int rq_cong; /* has incremented xprt->cong */
11173 int rq_received; /* receive completed */
11174 u32 rq_seqno; /* gss seq no. used on req. */
11176 + int rq_enc_pages_num;
11177 + struct page **rq_enc_pages; /* scratch pages for use by
11178 + gss privacy code */
11179 + void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
11180 struct list_head rq_list;
11182 struct xdr_buf rq_private_buf; /* The receive buffer
11183 --- linux-2.6.7/include/linux/sunrpc/gss_krb5.h.lsec 2004-06-15 23:19:29.000000000 -0600
11184 +++ linux-2.6.7/include/linux/sunrpc/gss_krb5.h 2005-03-23 14:28:24.840273400 -0700
11185 @@ -53,6 +53,8 @@ struct krb5_ctx {
11186 struct xdr_netobj mech_used;
11189 +extern spinlock_t krb5_seq_lock;
11191 #define KG_TOK_MIC_MSG 0x0101
11192 #define KG_TOK_WRAP_MSG 0x0201
11194 @@ -116,18 +118,25 @@ enum seal_alg {
11197 make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
11198 - struct xdr_netobj *cksum);
11199 + int body_offset, struct xdr_netobj *cksum);
11202 krb5_make_token(struct krb5_ctx *context_handle, int qop_req,
11203 struct xdr_buf *input_message_buffer,
11204 - struct xdr_netobj *output_message_buffer, int toktype);
11205 + struct xdr_netobj *output_message_buffer);
11208 krb5_read_token(struct krb5_ctx *context_handle,
11209 struct xdr_netobj *input_token_buffer,
11210 - struct xdr_buf *message_buffer,
11211 - int *qop_state, int toktype);
11212 + struct xdr_buf *message_buffer, int *qop_state);
11215 +gss_wrap_kerberos(struct gss_ctx *ctx_id, u32 qop, int offset,
11216 + struct xdr_buf *outbuf, struct page **pages);
11219 +gss_unwrap_kerberos(struct gss_ctx *ctx_id, u32 *qop, int offset,
11220 + struct xdr_buf *buf, int *out_offset);
11223 krb5_encrypt(struct crypto_tfm * key,
11224 @@ -137,6 +146,13 @@ u32
11225 krb5_decrypt(struct crypto_tfm * key,
11226 void *iv, void *in, void *out, int length);
11229 +gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *outbuf, int offset,
11230 + struct page **pages);
11233 +gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *inbuf, int offset);
11236 krb5_make_seq_num(struct crypto_tfm * key,
11238 --- linux-2.6.7/include/linux/sunrpc/gss_asn1.h.lsec 2004-06-15 23:20:04.000000000 -0600
11239 +++ linux-2.6.7/include/linux/sunrpc/gss_asn1.h 2005-03-23 14:28:23.706445768 -0700
11240 @@ -69,7 +69,6 @@ u32 g_verify_token_header(
11241 struct xdr_netobj *mech,
11243 unsigned char **buf_in,
11247 u32 g_get_mech_oid(struct xdr_netobj *mech, struct xdr_netobj * in_buf);
11248 --- linux-2.6.7/include/linux/sunrpc/cache.h.lsec 2004-06-15 23:19:28.000000000 -0600
11249 +++ linux-2.6.7/include/linux/sunrpc/cache.h 2005-03-23 14:28:24.349348032 -0700
11250 @@ -128,20 +128,17 @@ struct cache_deferred_req {
11251 * just like a template in C++, this macro does cache lookup
11253 * The function is passed some sort of HANDLE from which a cache_detail
11254 - * structure can be determined (via SETUP, DETAIL), a template
11255 + * structure can be determined (via DETAIL), a template
11256 * cache entry (type RTN*), and a "set" flag. Using the HASHFN and the
11257 * TEST, the function will try to find a matching cache entry in the cache.
11259 * If an entry is found, it is returned
11260 * If no entry is found, a new non-VALID entry is created.
11261 - * If "set" == 1 and INPLACE == 0 :
11263 * If no entry is found a new one is inserted with data from "template"
11264 * If a non-CACHE_VALID entry is found, it is updated from template using UPDATE
11265 * If a CACHE_VALID entry is found, a new entry is swapped in with data
11267 - * If set == 1, and INPLACE == 1 :
11268 - * As above, except that if a CACHE_VALID entry is found, we UPDATE in place
11269 - * instead of swapping in a new entry.
11271 * If the passed handle has the CACHE_NEGATIVE flag set, then UPDATE is not
11272 * run but insteead CACHE_NEGATIVE is set in any new item.
11273 @@ -153,21 +150,18 @@ struct cache_deferred_req {
11274 * MEMBER is the member of the cache which is cache_head, which must be first
11275 * FNAME is the name for the function
11276 * ARGS are arguments to function and must contain RTN *item, int set. May
11277 - * also contain something to be usedby SETUP or DETAIL to find cache_detail.
11278 - * SETUP locates the cache detail and makes it available as...
11279 - * DETAIL identifies the cache detail, possibly set up by SETUP
11280 + * also contain something to be used by DETAIL to find cache_detail.
11281 + * DETAIL identifies the cache detail
11282 * HASHFN returns a hash value of the cache entry "item"
11283 * TEST tests if "tmp" matches "item"
11284 * INIT copies key information from "item" to "new"
11285 * UPDATE copies content information from "item" to "tmp"
11286 - * INPLACE is true if updates can happen inplace rather than allocating a new structure
11288 -#define DefineCacheLookup(RTN,MEMBER,FNAME,ARGS,SETUP,DETAIL,HASHFN,TEST,INIT,UPDATE,INPLACE) \
11289 +#define DefineCacheLookup(RTN,MEMBER,FNAME,ARGS,DETAIL,HASHFN,TEST,INIT,UPDATE) \
11292 RTN *tmp, *new=NULL; \
11293 struct cache_head **hp, **head; \
11295 head = &(DETAIL)->hash_table[HASHFN]; \
11297 if (set||new) write_lock(&(DETAIL)->hash_lock); \
11298 @@ -176,14 +170,14 @@ RTN *FNAME ARGS \
11299 tmp = container_of(*hp, RTN, MEMBER); \
11300 if (TEST) { /* found a match */ \
11302 - if (set && !INPLACE && test_bit(CACHE_VALID, &tmp->MEMBER.flags) && !new) \
11303 + if (set && test_bit(CACHE_VALID, &tmp->MEMBER.flags) && !new) \
11308 cache_get(&tmp->MEMBER); \
11310 - if (!INPLACE && test_bit(CACHE_VALID, &tmp->MEMBER.flags))\
11311 + if (test_bit(CACHE_VALID, &tmp->MEMBER.flags))\
11312 { /* need to swap in new */ \
11315 @@ -205,7 +199,7 @@ RTN *FNAME ARGS \
11316 else read_unlock(&(DETAIL)->hash_lock); \
11318 cache_fresh(DETAIL, &tmp->MEMBER, item->MEMBER.expiry_time); \
11319 - if (set && !INPLACE && new) cache_fresh(DETAIL, &new->MEMBER, 0); \
11320 + if (set && new) cache_fresh(DETAIL, &new->MEMBER, 0); \
11321 if (new) (DETAIL)->cache_put(&new->MEMBER, DETAIL); \
11324 @@ -233,16 +227,15 @@ RTN *FNAME ARGS \
11325 new = kmalloc(sizeof(*new), GFP_KERNEL); \
11327 cache_init(&new->MEMBER); \
11328 - cache_get(&new->MEMBER); \
11334 -#define DefineSimpleCacheLookup(STRUCT,INPLACE) \
11335 - DefineCacheLookup(struct STRUCT, h, STRUCT##_lookup, (struct STRUCT *item, int set), /*no setup */, \
11336 +#define DefineSimpleCacheLookup(STRUCT) \
11337 + DefineCacheLookup(struct STRUCT, h, STRUCT##_lookup, (struct STRUCT *item, int set), \
11338 & STRUCT##_cache, STRUCT##_hash(item), STRUCT##_match(item, tmp),\
11339 - STRUCT##_init(new, item), STRUCT##_update(tmp, item),INPLACE)
11340 + STRUCT##_init(new, item), STRUCT##_update(tmp, item))
11342 #define cache_for_each(pos, detail, index, member) \
11343 for (({read_lock(&(detail)->hash_lock); index = (detail)->hash_size;}) ; \
11344 --- linux-2.6.7/include/linux/sunrpc/xdr.h.lsec 2004-06-15 23:20:26.000000000 -0600
11345 +++ linux-2.6.7/include/linux/sunrpc/xdr.h 2005-03-23 14:28:24.783282064 -0700
11346 @@ -192,6 +192,7 @@ extern void xdr_write_pages(struct xdr_s
11347 extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, uint32_t *p);
11348 extern uint32_t *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
11349 extern void xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
11350 +extern void truncate_xdr_buf(struct xdr_buf *xdr, int len);
11352 #endif /* __KERNEL__ */
11354 --- linux-2.6.7/include/linux/nfsd/state.h.lsec 2004-06-15 23:18:56.000000000 -0600
11355 +++ linux-2.6.7/include/linux/nfsd/state.h 2005-03-23 14:28:24.081388768 -0700
11357 #define _NFSD4_STATE_H
11359 #include <linux/list.h>
11360 +#include <linux/sunrpc/clnt.h>
11362 #define NFS4_OPAQUE_LIMIT 1024
11364 @@ -65,6 +66,22 @@ extern stateid_t onestateid;
11365 #define ZERO_STATEID(stateid) (!memcmp((stateid), &zerostateid, sizeof(stateid_t)))
11366 #define ONE_STATEID(stateid) (!memcmp((stateid), &onestateid, sizeof(stateid_t)))
11368 +/* client delegation callback info */
11369 +struct nfs4_callback {
11370 + /* SETCLIENTID info */
11371 + u32 cb_parsed; /* addr parsed */
11373 + unsigned short cb_port;
11376 + struct xdr_netobj cb_netid;
11377 + /* RPC client info */
11378 + u32 cb_set; /* successful CB_NULL call */
11379 + struct rpc_program cb_program;
11380 + struct rpc_stat cb_stat;
11381 + struct rpc_clnt * cb_client;
11385 * struct nfs4_client - one per client. Clientids live here.
11386 * o Each nfs4_client is hashed by clientid.
11387 @@ -87,6 +104,21 @@ struct nfs4_client {
11388 struct svc_cred cl_cred; /* setclientid principal */
11389 clientid_t cl_clientid; /* generated by server */
11390 nfs4_verifier cl_confirm; /* generated by server */
11391 + struct nfs4_callback cl_callback; /* callback info */
11392 + time_t cl_first_state; /* first state aquisition*/
11393 + atomic_t cl_count; /* ref count */
11396 +/* struct nfs4_client_reset
11397 + * one per old client. Populates reset_str_hashtbl. Filled from conf_id_hashtbl
11398 + * upon lease reset, or from upcall to state_daemon (to read in state
11399 + * from non-volitile storage) upon reboot.
11401 +struct nfs4_client_reclaim {
11402 + struct list_head cr_strhash; /* hash by cr_name */
11403 + struct xdr_netobj cr_name; /* id generated by client */
11404 + time_t cr_first_state; /* first state aquisition */
11405 + u32 cr_expired; /* boolean: lease expired? */
11409 @@ -216,5 +248,8 @@ extern int nfs4_share_conflict(struct sv
11410 extern void nfs4_lock_state(void);
11411 extern void nfs4_unlock_state(void);
11412 extern int nfs4_in_grace(void);
11413 -extern int nfs4_in_no_grace(void);
11414 +extern int nfs4_check_open_reclaim(clientid_t *clid);
11415 +extern void nfsd4_probe_callback(struct nfs4_client *clp);
11416 +extern void expire_client(struct nfs4_client *clp);
11417 +extern void put_nfs4_client(struct nfs4_client *clp);
11418 #endif /* NFSD4_STATE_H */
11419 --- linux-2.6.7/include/linux/nfsd/nfsd.h.lsec 2004-06-15 23:20:04.000000000 -0600
11420 +++ linux-2.6.7/include/linux/nfsd/nfsd.h 2005-03-23 14:28:24.133380864 -0700
11421 @@ -76,6 +76,11 @@ int nfsd_lookup(struct svc_rqst *, stru
11422 const char *, int, struct svc_fh *);
11423 int nfsd_setattr(struct svc_rqst *, struct svc_fh *,
11424 struct iattr *, int, time_t);
11425 +#ifdef CONFIG_NFSD_V4
11426 +int nfsd4_set_nfs4_acl(struct svc_rqst *, struct svc_fh *,
11427 + struct nfs4_acl *);
11428 +int nfsd4_get_nfs4_acl(struct svc_rqst *, struct dentry *, struct nfs4_acl **);
11429 +#endif /* CONFIG_NFSD_V4 */
11430 int nfsd_create(struct svc_rqst *, struct svc_fh *,
11431 char *name, int len, struct iattr *attrs,
11432 int type, dev_t rdev, struct svc_fh *res);
11433 @@ -126,9 +131,13 @@ int nfsd_permission(struct svc_export *
11434 #ifdef CONFIG_NFSD_V4
11435 void nfs4_state_init(void);
11436 void nfs4_state_shutdown(void);
11437 +time_t nfs4_lease_time(void);
11438 +void nfs4_reset_lease(time_t leasetime);
11440 void static inline nfs4_state_init(void){}
11441 void static inline nfs4_state_shutdown(void){}
11442 +time_t static inline nfs4_lease_time(void){return 0;}
11443 +void static inline nfs4_reset_lease(time_t leasetime){}
11447 @@ -249,12 +258,11 @@ static inline int is_fsid(struct svc_fh
11448 #define COMPOUND_SLACK_SPACE 140 /* OP_GETFH */
11449 #define COMPOUND_ERR_SLACK_SPACE 12 /* OP_SETATTR */
11451 -#define NFSD_LEASE_TIME 60 /* seconds */
11452 +#define NFSD_LEASE_TIME (nfs4_lease_time())
11453 #define NFSD_LAUNDROMAT_MINTIMEOUT 10 /* seconds */
11456 * The following attributes are currently not supported by the NFSv4 server:
11457 - * ACL (will be supported in a forthcoming patch)
11458 * ARCHIVE (deprecated anyway)
11459 * FS_LOCATIONS (will be supported eventually)
11460 * HIDDEN (unlikely to be supported any time soon)
11461 @@ -274,7 +282,7 @@ static inline int is_fsid(struct svc_fh
11462 | FATTR4_WORD0_FILEHANDLE | FATTR4_WORD0_FILEID | FATTR4_WORD0_FILES_AVAIL \
11463 | FATTR4_WORD0_FILES_FREE | FATTR4_WORD0_FILES_TOTAL | FATTR4_WORD0_HOMOGENEOUS \
11464 | FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD0_MAXLINK | FATTR4_WORD0_MAXNAME \
11465 - | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE)
11466 + | FATTR4_WORD0_MAXREAD | FATTR4_WORD0_MAXWRITE | FATTR4_WORD0_ACL)
11468 #define NFSD_SUPPORTED_ATTRS_WORD1 \
11469 (FATTR4_WORD1_MODE | FATTR4_WORD1_NO_TRUNC | FATTR4_WORD1_NUMLINKS \
11470 @@ -289,7 +297,8 @@ static inline int is_fsid(struct svc_fh
11471 (FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_MODIFY_SET)
11473 /* These are the only attrs allowed in CREATE/OPEN/SETATTR. */
11474 -#define NFSD_WRITEABLE_ATTRS_WORD0 FATTR4_WORD0_SIZE
11475 +#define NFSD_WRITEABLE_ATTRS_WORD0 \
11476 +(FATTR4_WORD0_SIZE | FATTR4_WORD0_ACL )
11477 #define NFSD_WRITEABLE_ATTRS_WORD1 \
11478 (FATTR4_WORD1_MODE | FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP \
11479 | FATTR4_WORD1_TIME_ACCESS_SET | FATTR4_WORD1_TIME_METADATA | FATTR4_WORD1_TIME_MODIFY_SET)
11480 --- linux-2.6.7/include/linux/nfsd/xdr4.h.lsec 2004-06-15 23:18:59.000000000 -0600
11481 +++ linux-2.6.7/include/linux/nfsd/xdr4.h 2005-03-23 14:28:24.082388616 -0700
11483 #ifndef _LINUX_NFSD_XDR4_H
11484 #define _LINUX_NFSD_XDR4_H
11486 +#include <linux/nfs4.h>
11488 #define NFSD4_MAX_TAGLEN 128
11489 #define XDR_LEN(n) (((n) + 3) & ~3)
11491 @@ -95,6 +97,7 @@ struct nfsd4_create {
11492 u32 cr_bmval[2]; /* request */
11493 struct iattr cr_iattr; /* request */
11494 struct nfsd4_change_info cr_cinfo; /* response */
11495 + struct nfs4_acl *cr_acl;
11497 #define cr_linklen u.link.namelen
11498 #define cr_linkname u.link.name
11499 @@ -216,7 +219,7 @@ struct nfsd4_open {
11500 u32 op_rflags; /* response */
11501 int op_truncate; /* used during processing */
11502 struct nfs4_stateowner *op_stateowner; /* used during processing */
11504 + struct nfs4_acl *op_acl;
11506 #define op_iattr u.iattr
11507 #define op_verf u.verf
11508 @@ -291,6 +294,7 @@ struct nfsd4_setattr {
11509 stateid_t sa_stateid; /* request */
11510 u32 sa_bmval[2]; /* request */
11511 struct iattr sa_iattr; /* request */
11512 + struct nfs4_acl *sa_acl;
11515 struct nfsd4_setclientid {
11516 @@ -378,6 +382,7 @@ struct nfsd4_compoundargs {
11519 struct tmpbuf *next;
11520 + void (*release)(const void *);
11524 @@ -449,6 +454,7 @@ extern int nfsd4_locku(struct svc_rqst *
11526 nfsd4_release_lockowner(struct svc_rqst *rqstp,
11527 struct nfsd4_release_lockowner *rlockowner);
11528 +extern void nfsd4_release_compoundargs(struct nfsd4_compoundargs *);
11532 --- linux-2.6.7/include/linux/nfs_fs.h.lsec 2004-06-15 23:19:13.000000000 -0600
11533 +++ linux-2.6.7/include/linux/nfs_fs.h 2005-03-23 14:28:23.338501704 -0700
11535 #include <linux/nfs3.h>
11536 #include <linux/nfs4.h>
11537 #include <linux/nfs_xdr.h>
11538 +#include <linux/rwsem.h>
11539 #include <linux/workqueue.h>
11542 @@ -75,15 +76,33 @@
11546 - * NFSv3 Access mode cache
11547 + * NFSv3/v4 Access mode cache entry
11549 -struct nfs_access_cache {
11550 +struct nfs_access_entry {
11551 unsigned long jiffies;
11552 struct rpc_cred * cred;
11557 +struct nfs4_state;
11558 +struct nfs_open_context {
11560 + struct dentry *dentry;
11561 + struct rpc_cred *cred;
11562 + struct nfs4_state *state;
11563 + unsigned int pid;
11567 + struct list_head list;
11568 + wait_queue_head_t waitq;
11572 + * NFSv4 delegation
11574 +struct nfs_delegation;
11577 * nfs fs inode data in memory
11579 @@ -137,7 +156,7 @@ struct nfs_inode {
11581 atomic_t data_updates;
11583 - struct nfs_access_cache cache_access;
11584 + struct nfs_access_entry cache_access;
11587 * This is the cookie verifier used for NFSv3 readdir
11588 @@ -156,16 +175,20 @@ struct nfs_inode {
11592 - /* Credentials for shared mmap */
11593 - struct rpc_cred *mm_cred;
11594 + /* Open contexts for shared mmap writes */
11595 + struct list_head open_files;
11597 wait_queue_head_t nfs_i_wait;
11599 #ifdef CONFIG_NFS_V4
11601 struct list_head open_states;
11602 + struct nfs_delegation *delegation;
11603 + int delegation_state;
11604 + struct rw_semaphore rwsem;
11605 #endif /* CONFIG_NFS_V4*/
11609 struct inode vfs_inode;
11612 @@ -259,6 +282,18 @@ static inline int nfs_verify_change_attr
11613 && chattr == NFS_I(inode)->cache_change_attribute;
11617 + * nfs_compare_fh - compare two filehandles for equality
11618 + * @fh1 - pointer to first filehandle
11619 + * @fh2 - pointer to second filehandle
11621 +static inline int nfs_compare_fh(const struct nfs_fh *fh1, const struct nfs_fh *fh2)
11623 + if (fh1->size == fh2->size)
11624 + return memcmp(fh1->data, fh2->data, fh1->size);
11625 + return (fh1->size > fh2->size) ? 1 : -1;
11629 * linux/fs/nfs/inode.c
11631 @@ -268,9 +303,12 @@ extern struct inode *nfs_fhget(struct su
11632 extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
11633 extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
11634 extern int nfs_permission(struct inode *, int, struct nameidata *);
11635 -extern void nfs_set_mmcred(struct inode *, struct rpc_cred *);
11636 +extern int nfs_access_get_cached(struct inode *, struct rpc_cred *, struct nfs_access_entry *);
11637 +extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
11638 extern int nfs_open(struct inode *, struct file *);
11639 extern int nfs_release(struct inode *, struct file *);
11640 +extern int nfs_attribute_timeout(struct inode *inode);
11641 +extern int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode);
11642 extern int __nfs_revalidate_inode(struct nfs_server *, struct inode *);
11643 extern int nfs_setattr(struct dentry *, struct iattr *);
11644 extern void nfs_begin_attr_update(struct inode *);
11645 @@ -278,6 +316,12 @@ extern void nfs_end_attr_update(struct i
11646 extern void nfs_begin_data_update(struct inode *);
11647 extern void nfs_end_data_update(struct inode *);
11648 extern void nfs_end_data_update_defer(struct inode *);
11649 +extern struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, struct rpc_cred *cred);
11650 +extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx);
11651 +extern void put_nfs_open_context(struct nfs_open_context *ctx);
11652 +extern void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx);
11653 +extern struct nfs_open_context *nfs_find_open_context(struct inode *inode, int mode);
11654 +extern void nfs_file_clear_open_context(struct file *filp);
11656 /* linux/net/ipv4/ipconfig.c: trims ip addr off front of name, too. */
11657 extern u32 root_nfs_parse_addr(char *name); /*__init*/
11658 @@ -289,16 +333,15 @@ extern struct inode_operations nfs_file_
11659 extern struct file_operations nfs_file_operations;
11660 extern struct address_space_operations nfs_file_aops;
11662 -static __inline__ struct rpc_cred *
11663 -nfs_file_cred(struct file *file)
11664 +static inline struct rpc_cred *nfs_file_cred(struct file *file)
11666 - struct rpc_cred *cred = NULL;
11668 - cred = (struct rpc_cred *)file->private_data;
11670 - BUG_ON(cred && cred->cr_magic != RPCAUTH_CRED_MAGIC);
11673 + if (file != NULL) {
11674 + struct nfs_open_context *ctx;
11676 + ctx = (struct nfs_open_context*)file->private_data;
11677 + return ctx->cred;
11683 @@ -418,28 +461,6 @@ extern int nfsroot_mount(struct sockadd
11687 -static inline int nfs_attribute_timeout(struct inode *inode)
11689 - struct nfs_inode *nfsi = NFS_I(inode);
11691 - return time_after(jiffies, nfsi->read_cache_jiffies+nfsi->attrtimeo);
11695 - * nfs_revalidate_inode - Revalidate the inode attributes
11696 - * @server - pointer to nfs_server struct
11697 - * @inode - pointer to inode struct
11699 - * Updates inode attribute information by retrieving the data from the server.
11701 -static inline int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
11703 - if (!(NFS_FLAGS(inode) & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA))
11704 - && !nfs_attribute_timeout(inode))
11705 - return NFS_STALE(inode) ? -ESTALE : 0;
11706 - return __nfs_revalidate_inode(server, inode);
11709 static inline loff_t
11710 nfs_size_to_loff_t(__u64 size)
11712 @@ -507,8 +528,6 @@ struct idmap;
11714 enum nfs4_client_state {
11717 - NFS4CLNT_SETUP_STATE,
11721 @@ -520,7 +539,6 @@ struct nfs4_client {
11722 u64 cl_clientid; /* constant */
11723 nfs4_verifier cl_confirm;
11724 unsigned long cl_state;
11725 - long cl_generation;
11727 u32 cl_lockowner_id;
11729 @@ -530,6 +548,7 @@ struct nfs4_client {
11731 struct rw_semaphore cl_sem;
11733 + struct list_head cl_delegations;
11734 struct list_head cl_state_owners;
11735 struct list_head cl_unused;
11737 @@ -573,12 +592,11 @@ struct nfs4_state_owner {
11738 u32 so_id; /* 32-bit identifier, unique */
11739 struct semaphore so_sema;
11740 u32 so_seqid; /* protected by so_sema */
11741 - unsigned int so_flags; /* protected by so_sema */
11743 - long so_generation;
11745 struct rpc_cred *so_cred; /* Associated cred */
11746 struct list_head so_states;
11747 + struct list_head so_delegations;
11751 @@ -593,10 +611,13 @@ struct nfs4_state_owner {
11752 * LOCK: one nfs4_state (LOCK) to hold the lock stateid nfs4_state(OPEN)
11755 +/* bits for nfs4_lock_state->flags */
11757 struct nfs4_lock_state {
11758 struct list_head ls_locks; /* Other lock stateids */
11759 - fl_owner_t ls_owner; /* POSIX lock owner */
11760 - struct nfs4_state * ls_parent; /* Parent nfs4_state */
11761 + unsigned int ls_pid; /* pid of owner process */
11762 +#define NFS_LOCK_INITIALIZED 1
11766 nfs4_stateid ls_stateid;
11767 @@ -606,6 +627,7 @@ struct nfs4_lock_state {
11768 /* bits for nfs4_state->flags */
11771 + NFS_DELEGATED_STATE,
11774 struct nfs4_state {
11775 @@ -629,8 +651,19 @@ struct nfs4_state {
11779 +struct nfs4_exception {
11784 extern struct dentry_operations nfs4_dentry_operations;
11785 extern struct inode_operations nfs4_dir_inode_operations;
11786 +extern struct inode_operations nfs4_file_inode_operations;
11789 +extern ssize_t nfs_getxattr(struct dentry *, const char *, void *, size_t);
11790 +extern int nfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
11791 +extern ssize_t nfs_listxattr(struct dentry *, char *, size_t);
11794 extern int nfs4_proc_setclientid(struct nfs4_client *, u32, unsigned short);
11795 @@ -639,10 +672,15 @@ extern int nfs4_open_reclaim(struct nfs4
11796 extern int nfs4_proc_async_renew(struct nfs4_client *);
11797 extern int nfs4_proc_renew(struct nfs4_client *);
11798 extern int nfs4_do_close(struct inode *, struct nfs4_state *);
11799 -int nfs4_do_downgrade(struct inode *inode, struct nfs4_state *state, mode_t mode);
11800 +extern int nfs4_do_downgrade(struct inode *inode, struct nfs4_state *state, mode_t mode);
11801 extern int nfs4_wait_clnt_recover(struct rpc_clnt *, struct nfs4_client *);
11802 extern struct inode *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
11803 extern int nfs4_open_revalidate(struct inode *, struct dentry *, int);
11804 +extern int nfs4_handle_exception(struct nfs_server *, int, struct nfs4_exception *);
11805 +extern int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request);
11806 +extern ssize_t nfs4_proc_get_acl(struct inode *, void *buf, ssize_t buflen);
11807 +extern int nfs4_proc_set_acl(struct inode *, const void *buf, ssize_t buflen);
11808 +extern void nfs4_zap_acl_attr(struct inode *inode);
11811 extern void nfs4_schedule_state_renewal(struct nfs4_client *);
11812 @@ -654,6 +692,8 @@ extern void init_nfsv4_state(struct nfs_
11813 extern void destroy_nfsv4_state(struct nfs_server *);
11814 extern struct nfs4_client *nfs4_get_client(struct in_addr *);
11815 extern void nfs4_put_client(struct nfs4_client *clp);
11816 +extern int nfs4_init_client(struct nfs4_client *clp);
11817 +extern struct nfs4_client *nfs4_find_client(struct in_addr *);
11818 extern u32 nfs4_alloc_lockowner_id(struct nfs4_client *);
11820 extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
11821 @@ -663,15 +703,14 @@ extern void nfs4_put_open_state(struct n
11822 extern void nfs4_close_state(struct nfs4_state *, mode_t);
11823 extern struct nfs4_state *nfs4_find_state(struct inode *, struct rpc_cred *, mode_t mode);
11824 extern void nfs4_increment_seqid(int status, struct nfs4_state_owner *sp);
11825 -extern int nfs4_handle_error(struct nfs_server *, int);
11826 extern void nfs4_schedule_state_recovery(struct nfs4_client *);
11827 -extern struct nfs4_lock_state *nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t);
11828 -extern struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t);
11829 +extern struct nfs4_lock_state *nfs4_find_lock_state(struct nfs4_state *state, unsigned int pid);
11830 +extern struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, unsigned int pid);
11831 extern void nfs4_put_lock_state(struct nfs4_lock_state *state);
11832 extern void nfs4_increment_lock_seqid(int status, struct nfs4_lock_state *ls);
11833 -extern void nfs4_notify_setlk(struct inode *, struct file_lock *, struct nfs4_lock_state *);
11834 -extern void nfs4_notify_unlck(struct inode *, struct file_lock *, struct nfs4_lock_state *);
11835 -extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t);
11836 +extern void nfs4_notify_setlk(struct nfs4_state *, struct file_lock *, struct nfs4_lock_state *);
11837 +extern void nfs4_notify_unlck(struct nfs4_state *, struct file_lock *, struct nfs4_lock_state *);
11838 +extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, unsigned int pid);
11842 @@ -681,6 +720,7 @@ struct nfs4_mount_data;
11843 #define destroy_nfsv4_state(server) do { } while (0)
11844 #define nfs4_put_state_owner(inode, owner) do { } while (0)
11845 #define nfs4_put_open_state(state) do { } while (0)
11846 +#define nfs4_close_state(a, b) do { } while (0)
11847 #define nfs4_renewd_prepare_shutdown(server) do { } while (0)
11850 @@ -697,6 +737,7 @@ struct nfs4_mount_data;
11851 #define NFSDBG_XDR 0x0020
11852 #define NFSDBG_FILE 0x0040
11853 #define NFSDBG_ROOT 0x0080
11854 +#define NFSDBG_CALLBACK 0x0100
11855 #define NFSDBG_ALL 0xFFFF
11858 --- linux-2.6.7/include/linux/nfs4_acl.h.lsec 2005-03-23 14:28:24.519322192 -0700
11859 +++ linux-2.6.7/include/linux/nfs4_acl.h 2005-03-23 14:28:24.518322344 -0700
11862 + * include/linux/nfs4_acl.c
11864 + * Common NFSv4 ACL handling definitions.
11866 + * Copyright (c) 2002 The Regents of the University of Michigan.
11867 + * All rights reserved.
11869 + * Marius Aamodt Eriksen <marius@umich.edu>
11871 + * Redistribution and use in source and binary forms, with or without
11872 + * modification, are permitted provided that the following conditions
11875 + * 1. Redistributions of source code must retain the above copyright
11876 + * notice, this list of conditions and the following disclaimer.
11877 + * 2. Redistributions in binary form must reproduce the above copyright
11878 + * notice, this list of conditions and the following disclaimer in the
11879 + * documentation and/or other materials provided with the distribution.
11880 + * 3. Neither the name of the University nor the names of its
11881 + * contributors may be used to endorse or promote products derived
11882 + * from this software without specific prior written permission.
11884 + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
11885 + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
11886 + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
11887 + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
11888 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
11889 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
11890 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
11891 + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
11892 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
11893 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
11894 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
11897 +#ifndef LINUX_NFS4_ACL_H
11898 +#define LINUX_NFS4_ACL_H
11900 +#include <linux/posix_acl.h>
11902 +struct nfs4_acl *nfs4_acl_new(void);
11903 +void nfs4_acl_free(struct nfs4_acl *);
11904 +int nfs4_acl_add_ace(struct nfs4_acl *, u32, u32, u32, int, uid_t);
11905 +int nfs4_acl_get_whotype(char *, u32);
11906 +int nfs4_acl_write_who(int who, char *p);
11907 +int nfs4_acl_permission(struct nfs4_acl *acl, uid_t owner, gid_t group,
11908 + uid_t who, u32 mask);
11910 +#define NFS4_ACL_TYPE_DEFAULT 0x01
11911 +#define NFS4_ACL_DIR 0x02
11912 +#define NFS4_ACL_OWNER 0x04
11914 +struct nfs4_acl *nfs4_acl_posix_to_nfsv4(struct posix_acl *,
11915 + struct posix_acl *, unsigned int flags);
11916 +int nfs4_acl_nfsv4_to_posix(struct nfs4_acl *, struct posix_acl **,
11917 + struct posix_acl **, unsigned int flags);
11919 +#endif /* LINUX_NFS4_ACL_H */
11920 --- linux-2.6.7/include/linux/nfs_xdr.h.lsec 2004-06-15 23:19:52.000000000 -0600
11921 +++ linux-2.6.7/include/linux/nfs_xdr.h 2005-03-23 14:28:23.539471152 -0700
11922 @@ -99,20 +99,21 @@ struct nfs4_change_info {
11923 * Arguments to the open call.
11925 struct nfs_openargs {
11926 - struct nfs_fh * fh;
11927 + const struct nfs_fh * fh;
11929 - __u32 share_access;
11934 - __u32 createmode;
11936 struct iattr * attrs; /* UNCHECKED, GUARDED */
11937 nfs4_verifier verifier; /* EXCLUSIVE */
11938 + nfs4_stateid delegation; /* CLAIM_DELEGATE_CUR */
11939 + int delegation_type; /* CLAIM_PREVIOUS */
11941 const struct qstr * name;
11942 const struct nfs_server *server; /* Needed for ID mapping */
11943 const u32 * bitmask;
11947 struct nfs_openres {
11948 @@ -122,13 +123,17 @@ struct nfs_openres {
11950 struct nfs_fattr * f_attr;
11951 const struct nfs_server *server;
11952 + int delegation_type;
11953 + nfs4_stateid delegation;
11959 * Arguments to the open_confirm call.
11961 struct nfs_open_confirmargs {
11962 - struct nfs_fh * fh;
11963 + const struct nfs_fh * fh;
11964 nfs4_stateid stateid;
11967 @@ -138,26 +143,13 @@ struct nfs_open_confirmres {
11971 - * Arguments to the open_reclaim call.
11973 -struct nfs_open_reclaimargs {
11974 - struct nfs_fh * fh;
11978 - __u32 share_access;
11980 - const __u32 * bitmask;
11984 * Arguments to the close call.
11986 struct nfs_closeargs {
11987 struct nfs_fh * fh;
11988 nfs4_stateid stateid;
11990 - __u32 share_access;
11994 struct nfs_closeres {
11995 @@ -224,6 +216,11 @@ struct nfs_lockres {
11996 const struct nfs_server * server;
11999 +struct nfs4_delegreturnargs {
12000 + const struct nfs_fh *fhandle;
12001 + const nfs4_stateid *stateid;
12005 * Arguments to the read call.
12007 @@ -235,8 +232,7 @@ struct nfs_lockres {
12009 struct nfs_readargs {
12010 struct nfs_fh * fh;
12011 - fl_owner_t lockowner;
12012 - struct nfs4_state * state;
12013 + struct nfs_open_context *context;
12016 unsigned int pgbase;
12017 @@ -259,8 +255,7 @@ struct nfs_readres {
12019 struct nfs_writeargs {
12020 struct nfs_fh * fh;
12021 - fl_owner_t lockowner;
12022 - struct nfs4_state * state;
12023 + struct nfs_open_context *context;
12026 enum nfs3_stable_how stable;
12027 @@ -331,6 +326,19 @@ struct nfs_setattrargs {
12028 const u32 * bitmask;
12031 +struct nfs_setaclargs {
12032 + struct nfs_fh * fh;
12033 + const char * acl;
12035 + const struct nfs_server * server; /* Needed for name mapping */
12038 +struct nfs_getaclres {
12041 + const struct nfs_server * server; /* Needed for name mapping */
12044 struct nfs_setattrres {
12045 struct nfs_fattr * fattr;
12046 const struct nfs_server * server;
12047 @@ -597,13 +605,15 @@ struct nfs4_rename_res {
12050 struct nfs4_setclientid {
12051 - nfs4_verifier sc_verifier; /* request */
12052 - char * sc_name; /* request */
12053 + const nfs4_verifier * sc_verifier; /* request */
12054 + unsigned int sc_name_len;
12055 + char sc_name[32]; /* request */
12056 u32 sc_prog; /* request */
12057 + unsigned int sc_netid_len;
12058 char sc_netid[4]; /* request */
12059 + unsigned int sc_uaddr_len;
12060 char sc_uaddr[24]; /* request */
12061 u32 sc_cb_ident; /* request */
12062 - struct nfs4_client * sc_state; /* response */
12065 struct nfs4_statfs_arg {
12066 @@ -657,6 +667,8 @@ struct nfs_write_data {
12067 void (*complete) (struct nfs_write_data *, int);
12070 +struct nfs_access_entry;
12073 * RPC procedure vector for NFSv2/NFSv3 demuxing
12075 @@ -664,6 +676,7 @@ struct nfs_rpc_ops {
12076 int version; /* Protocol version */
12077 struct dentry_operations *dentry_ops;
12078 struct inode_operations *dir_inode_ops;
12079 + struct inode_operations *file_inode_ops;
12081 int (*getroot) (struct nfs_server *, struct nfs_fh *,
12082 struct nfs_fsinfo *);
12083 @@ -672,11 +685,11 @@ struct nfs_rpc_ops {
12085 int (*lookup) (struct inode *, struct qstr *,
12086 struct nfs_fh *, struct nfs_fattr *);
12087 - int (*access) (struct inode *, struct rpc_cred *, int);
12088 + int (*access) (struct inode *, struct nfs_access_entry *);
12089 int (*readlink)(struct inode *, struct page *);
12090 - int (*read) (struct nfs_read_data *, struct file *);
12091 - int (*write) (struct nfs_write_data *, struct file *);
12092 - int (*commit) (struct nfs_write_data *, struct file *);
12093 + int (*read) (struct nfs_read_data *);
12094 + int (*write) (struct nfs_write_data *);
12095 + int (*commit) (struct nfs_write_data *);
12096 struct inode * (*create) (struct inode *, struct qstr *,
12097 struct iattr *, int);
12098 int (*remove) (struct inode *, struct qstr *);
12099 @@ -708,8 +721,6 @@ struct nfs_rpc_ops {
12100 void (*commit_setup) (struct nfs_write_data *, int how);
12101 int (*file_open) (struct inode *, struct file *);
12102 int (*file_release) (struct inode *, struct file *);
12103 - void (*request_init)(struct nfs_page *, struct file *);
12104 - int (*request_compatible)(struct nfs_page *, struct file *, struct page *);
12105 int (*lock)(struct file *, int, struct file_lock *);
12108 --- linux-2.6.7/arch/s390/defconfig.lsec 2004-06-15 23:19:52.000000000 -0600
12109 +++ linux-2.6.7/arch/s390/defconfig 2005-03-23 14:28:23.869420992 -0700
12110 @@ -422,7 +422,7 @@ CONFIG_NFS_V3=y
12113 # CONFIG_NFSD_V4 is not set
12114 -# CONFIG_NFSD_TCP is not set
12119 --- linux-2.6.7/arch/ia64/defconfig.lsec 2004-06-15 23:18:57.000000000 -0600
12120 +++ linux-2.6.7/arch/ia64/defconfig 2005-03-23 14:28:23.816429048 -0700
12121 @@ -987,7 +987,7 @@ CONFIG_NFS_DIRECTIO=y
12124 # CONFIG_NFSD_V4 is not set
12125 -# CONFIG_NFSD_TCP is not set
12130 --- linux-2.6.7/arch/ppc/defconfig.lsec 2004-06-15 23:19:52.000000000 -0600
12131 +++ linux-2.6.7/arch/ppc/defconfig 2005-03-23 14:28:23.817428896 -0700
12132 @@ -1230,7 +1230,7 @@ CONFIG_NFS_V3=y
12135 # CONFIG_NFSD_V4 is not set
12136 -# CONFIG_NFSD_TCP is not set
12141 --- linux-2.6.7/arch/i386/defconfig.lsec 2004-06-15 23:19:42.000000000 -0600
12142 +++ linux-2.6.7/arch/i386/defconfig 2005-03-23 14:28:23.763437104 -0700
12143 @@ -1148,7 +1148,7 @@ CONFIG_NFS_FS=y
12144 # CONFIG_NFS_DIRECTIO is not set
12146 # CONFIG_NFSD_V3 is not set
12147 -# CONFIG_NFSD_TCP is not set
12152 --- linux-2.6.7/arch/alpha/defconfig.lsec 2004-06-15 23:19:23.000000000 -0600
12153 +++ linux-2.6.7/arch/alpha/defconfig 2005-03-23 14:28:23.762437256 -0700
12154 @@ -791,7 +791,7 @@ CONFIG_NFS_V3=y
12157 # CONFIG_NFSD_V4 is not set
12158 -# CONFIG_NFSD_TCP is not set
12163 --- linux-2.6.7/net/sunrpc/svcauth_unix.c.lsec 2004-06-15 23:19:37.000000000 -0600
12164 +++ linux-2.6.7/net/sunrpc/svcauth_unix.c 2005-03-23 14:28:24.295356240 -0700
12165 @@ -55,12 +55,10 @@ struct auth_domain *unix_domain_find(cha
12168 cache_init(&new->h.h);
12169 - atomic_inc(&new->h.h.refcnt);
12170 new->h.name = strdup(name);
12171 new->h.flavour = RPC_AUTH_UNIX;
12172 new->addr_changes = 0;
12173 new->h.h.expiry_time = NEVER;
12174 - new->h.h.flags = 0;
12176 rv = auth_domain_lookup(&new->h, 2);
12177 if (rv == &new->h) {
12178 @@ -262,7 +260,7 @@ struct cache_detail ip_map_cache = {
12179 .cache_show = ip_map_show,
12182 -static DefineSimpleCacheLookup(ip_map, 0)
12183 +static DefineSimpleCacheLookup(ip_map)
12186 int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom)
12187 @@ -318,7 +316,8 @@ struct auth_domain *auth_unix_lookup(str
12190 if ((ipm->m_client->addr_changes - ipm->m_add_change) >0) {
12191 - set_bit(CACHE_NEGATIVE, &ipm->h.flags);
12192 + if (test_and_set_bit(CACHE_NEGATIVE, &ipm->h.flags) == 0)
12193 + auth_domain_put(&ipm->m_client->h);
12196 rv = &ipm->m_client->h;
12197 @@ -405,6 +404,9 @@ svcauth_null_release(struct svc_rqst *rq
12198 if (rqstp->rq_client)
12199 auth_domain_put(rqstp->rq_client);
12200 rqstp->rq_client = NULL;
12201 + if (rqstp->rq_cred.cr_group_info)
12202 + put_group_info(rqstp->rq_cred.cr_group_info);
12203 + rqstp->rq_cred.cr_group_info = NULL;
12205 return 0; /* don't drop */
12207 --- linux-2.6.7/net/sunrpc/xprt.c.lsec 2004-06-15 23:19:42.000000000 -0600
12208 +++ linux-2.6.7/net/sunrpc/xprt.c 2005-03-23 14:28:23.706445768 -0700
12209 @@ -1099,7 +1099,7 @@ xprt_write_space(struct sock *sk)
12212 spin_lock_bh(&xprt->sock_lock);
12213 - if (xprt->snd_task && xprt->snd_task->tk_rpcwait == &xprt->pending)
12214 + if (xprt->snd_task)
12215 rpc_wake_up_task(xprt->snd_task);
12216 spin_unlock_bh(&xprt->sock_lock);
12218 @@ -1357,6 +1357,7 @@ xprt_request_init(struct rpc_task *task,
12219 req->rq_task = task;
12220 req->rq_xprt = xprt;
12221 req->rq_xid = xprt_alloc_xid(xprt);
12222 + req->rq_release_snd_buf = NULL;
12223 dprintk("RPC: %4d reserved req %p xid %08x\n", task->tk_pid,
12226 @@ -1382,6 +1383,8 @@ xprt_release(struct rpc_task *task)
12227 mod_timer(&xprt->timer, xprt->last_used + XPRT_IDLE_TIMEOUT);
12228 spin_unlock_bh(&xprt->sock_lock);
12229 task->tk_rqstp = NULL;
12230 + if (req->rq_release_snd_buf)
12231 + req->rq_release_snd_buf(req);
12232 memset(req, 0, sizeof(*req)); /* mark unused */
12234 dprintk("RPC: %4d release request %p\n", task->tk_pid, req);
12235 --- linux-2.6.7/net/sunrpc/sched.c.lsec 2004-06-15 23:19:35.000000000 -0600
12236 +++ linux-2.6.7/net/sunrpc/sched.c 2005-03-23 14:28:23.651454128 -0700
12237 @@ -41,13 +41,7 @@ static mempool_t *rpc_buffer_mempool;
12239 static void __rpc_default_timer(struct rpc_task *task);
12240 static void rpciod_killall(void);
12243 - * When an asynchronous RPC task is activated within a bottom half
12244 - * handler, or while executing another RPC task, it is put on
12245 - * schedq, and rpciod is woken up.
12247 -static RPC_WAITQ(schedq, "schedq");
12248 +static void rpc_async_schedule(void *);
12251 * RPC tasks that create another task (e.g. for contacting the portmapper)
12252 @@ -68,26 +62,18 @@ static LIST_HEAD(all_tasks);
12254 * rpciod-related stuff
12256 -static DECLARE_WAIT_QUEUE_HEAD(rpciod_idle);
12257 -static DECLARE_COMPLETION(rpciod_killer);
12258 static DECLARE_MUTEX(rpciod_sema);
12259 static unsigned int rpciod_users;
12260 -static pid_t rpciod_pid;
12261 -static int rpc_inhibit;
12262 +static struct workqueue_struct *rpciod_workqueue;
12265 - * Spinlock for wait queues. Access to the latter also has to be
12266 - * interrupt-safe in order to allow timers to wake up sleeping tasks.
12268 -static spinlock_t rpc_queue_lock = SPIN_LOCK_UNLOCKED;
12270 * Spinlock for other critical sections of code.
12272 static spinlock_t rpc_sched_lock = SPIN_LOCK_UNLOCKED;
12275 * Disable the timer for a given RPC task. Should be called with
12276 - * rpc_queue_lock and bh_disabled in order to avoid races within
12277 + * queue->lock and bh_disabled in order to avoid races within
12281 @@ -105,16 +91,13 @@ __rpc_disable_timer(struct rpc_task *tas
12282 * without calling del_timer_sync(). The latter could cause a
12283 * deadlock if called while we're holding spinlocks...
12286 -rpc_run_timer(struct rpc_task *task)
12287 +static void rpc_run_timer(struct rpc_task *task)
12289 void (*callback)(struct rpc_task *);
12291 - spin_lock_bh(&rpc_queue_lock);
12292 callback = task->tk_timeout_fn;
12293 task->tk_timeout_fn = NULL;
12294 - spin_unlock_bh(&rpc_queue_lock);
12296 + if (callback && RPC_IS_QUEUED(task)) {
12297 dprintk("RPC: %4d running timer\n", task->tk_pid);
12300 @@ -140,19 +123,8 @@ __rpc_add_timer(struct rpc_task *task, r
12304 - * Set up a timer for an already sleeping task.
12306 -void rpc_add_timer(struct rpc_task *task, rpc_action timer)
12308 - spin_lock_bh(&rpc_queue_lock);
12309 - if (!RPC_IS_RUNNING(task))
12310 - __rpc_add_timer(task, timer);
12311 - spin_unlock_bh(&rpc_queue_lock);
12315 * Delete any timer for the current task. Because we use del_timer_sync(),
12316 - * this function should never be called while holding rpc_queue_lock.
12317 + * this function should never be called while holding queue->lock.
12320 rpc_delete_timer(struct rpc_task *task)
12321 @@ -169,16 +141,17 @@ static void __rpc_add_wait_queue_priorit
12322 struct list_head *q;
12323 struct rpc_task *t;
12325 + INIT_LIST_HEAD(&task->u.tk_wait.links);
12326 q = &queue->tasks[task->tk_priority];
12327 if (unlikely(task->tk_priority > queue->maxpriority))
12328 q = &queue->tasks[queue->maxpriority];
12329 - list_for_each_entry(t, q, tk_list) {
12330 + list_for_each_entry(t, q, u.tk_wait.list) {
12331 if (t->tk_cookie == task->tk_cookie) {
12332 - list_add_tail(&task->tk_list, &t->tk_links);
12333 + list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
12337 - list_add_tail(&task->tk_list, q);
12338 + list_add_tail(&task->u.tk_wait.list, q);
12342 @@ -189,37 +162,21 @@ static void __rpc_add_wait_queue_priorit
12343 * improve overall performance.
12344 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
12346 -static int __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
12347 +static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
12349 - if (task->tk_rpcwait == queue)
12351 + BUG_ON (RPC_IS_QUEUED(task));
12353 - if (task->tk_rpcwait) {
12354 - printk(KERN_WARNING "RPC: doubly enqueued task!\n");
12355 - return -EWOULDBLOCK;
12357 if (RPC_IS_PRIORITY(queue))
12358 __rpc_add_wait_queue_priority(queue, task);
12359 else if (RPC_IS_SWAPPER(task))
12360 - list_add(&task->tk_list, &queue->tasks[0]);
12361 + list_add(&task->u.tk_wait.list, &queue->tasks[0]);
12363 - list_add_tail(&task->tk_list, &queue->tasks[0]);
12364 - task->tk_rpcwait = queue;
12365 + list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
12366 + task->u.tk_wait.rpc_waitq = queue;
12367 + rpc_set_queued(task);
12369 dprintk("RPC: %4d added to queue %p \"%s\"\n",
12370 task->tk_pid, queue, rpc_qname(queue));
12375 -int rpc_add_wait_queue(struct rpc_wait_queue *q, struct rpc_task *task)
12379 - spin_lock_bh(&rpc_queue_lock);
12380 - result = __rpc_add_wait_queue(q, task);
12381 - spin_unlock_bh(&rpc_queue_lock);
12386 @@ -229,12 +186,12 @@ static void __rpc_remove_wait_queue_prio
12388 struct rpc_task *t;
12390 - if (!list_empty(&task->tk_links)) {
12391 - t = list_entry(task->tk_links.next, struct rpc_task, tk_list);
12392 - list_move(&t->tk_list, &task->tk_list);
12393 - list_splice_init(&task->tk_links, &t->tk_links);
12394 + if (!list_empty(&task->u.tk_wait.links)) {
12395 + t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
12396 + list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
12397 + list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
12399 - list_del(&task->tk_list);
12400 + list_del(&task->u.tk_wait.list);
12404 @@ -243,31 +200,17 @@ static void __rpc_remove_wait_queue_prio
12406 static void __rpc_remove_wait_queue(struct rpc_task *task)
12408 - struct rpc_wait_queue *queue = task->tk_rpcwait;
12412 + struct rpc_wait_queue *queue;
12413 + queue = task->u.tk_wait.rpc_waitq;
12415 if (RPC_IS_PRIORITY(queue))
12416 __rpc_remove_wait_queue_priority(task);
12418 - list_del(&task->tk_list);
12419 - task->tk_rpcwait = NULL;
12421 + list_del(&task->u.tk_wait.list);
12422 dprintk("RPC: %4d removed from queue %p \"%s\"\n",
12423 task->tk_pid, queue, rpc_qname(queue));
12427 -rpc_remove_wait_queue(struct rpc_task *task)
12429 - if (!task->tk_rpcwait)
12431 - spin_lock_bh(&rpc_queue_lock);
12432 - __rpc_remove_wait_queue(task);
12433 - spin_unlock_bh(&rpc_queue_lock);
12436 static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
12438 queue->priority = priority;
12439 @@ -290,6 +233,7 @@ static void __rpc_init_priority_wait_que
12443 + spin_lock_init(&queue->lock);
12444 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
12445 INIT_LIST_HEAD(&queue->tasks[i]);
12446 queue->maxpriority = maxprio;
12447 @@ -316,34 +260,27 @@ EXPORT_SYMBOL(rpc_init_wait_queue);
12448 * Note: If the task is ASYNC, this must be called with
12449 * the spinlock held to protect the wait queue operation.
12451 -static inline void
12452 -rpc_make_runnable(struct rpc_task *task)
12453 +static void rpc_make_runnable(struct rpc_task *task)
12455 - if (task->tk_timeout_fn) {
12456 - printk(KERN_ERR "RPC: task w/ running timer in rpc_make_runnable!!\n");
12457 + if (rpc_test_and_set_running(task))
12460 - rpc_set_running(task);
12461 + BUG_ON(task->tk_timeout_fn);
12462 if (RPC_IS_ASYNC(task)) {
12463 - if (RPC_IS_SLEEPING(task)) {
12465 - status = __rpc_add_wait_queue(&schedq, task);
12466 - if (status < 0) {
12467 - printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
12468 - task->tk_status = status;
12471 - rpc_clear_sleeping(task);
12472 - wake_up(&rpciod_idle);
12475 + INIT_WORK(&task->u.tk_work, rpc_async_schedule, (void *)task);
12476 + status = queue_work(task->tk_workqueue, &task->u.tk_work);
12477 + if (status < 0) {
12478 + printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
12479 + task->tk_status = status;
12483 - rpc_clear_sleeping(task);
12484 - wake_up(&task->tk_wait);
12487 + wake_up(&task->u.tk_wait.waitq);
12491 - * Place a newly initialized task on the schedq.
12492 + * Place a newly initialized task on the workqueue.
12495 rpc_schedule_run(struct rpc_task *task)
12496 @@ -352,33 +289,18 @@ rpc_schedule_run(struct rpc_task *task)
12497 if (RPC_IS_ACTIVATED(task))
12499 task->tk_active = 1;
12500 - rpc_set_sleeping(task);
12501 rpc_make_runnable(task);
12505 - * For other people who may need to wake the I/O daemon
12506 - * but should (for now) know nothing about its innards
12508 -void rpciod_wake_up(void)
12510 - if(rpciod_pid==0)
12511 - printk(KERN_ERR "rpciod: wot no daemon?\n");
12512 - wake_up(&rpciod_idle);
12516 * Prepare for sleeping on a wait queue.
12517 * By always appending tasks to the list we ensure FIFO behavior.
12518 * NB: An RPC task will only receive interrupt-driven events as long
12519 * as it's on a wait queue.
12522 -__rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
12523 +static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
12524 rpc_action action, rpc_action timer)
12528 dprintk("RPC: %4d sleep_on(queue \"%s\" time %ld)\n", task->tk_pid,
12529 rpc_qname(q), jiffies);
12531 @@ -388,49 +310,36 @@ __rpc_sleep_on(struct rpc_wait_queue *q,
12534 /* Mark the task as being activated if so needed */
12535 - if (!RPC_IS_ACTIVATED(task)) {
12536 + if (!RPC_IS_ACTIVATED(task))
12537 task->tk_active = 1;
12538 - rpc_set_sleeping(task);
12541 - status = __rpc_add_wait_queue(q, task);
12543 - printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
12544 - task->tk_status = status;
12546 - rpc_clear_running(task);
12547 - if (task->tk_callback) {
12548 - dprintk(KERN_ERR "RPC: %4d overwrites an active callback\n", task->tk_pid);
12551 - task->tk_callback = action;
12552 - __rpc_add_timer(task, timer);
12554 + __rpc_add_wait_queue(q, task);
12556 + BUG_ON(task->tk_callback != NULL);
12557 + task->tk_callback = action;
12558 + __rpc_add_timer(task, timer);
12562 -rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
12563 +void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
12564 rpc_action action, rpc_action timer)
12567 * Protect the queue operations.
12569 - spin_lock_bh(&rpc_queue_lock);
12570 + spin_lock_bh(&q->lock);
12571 __rpc_sleep_on(q, task, action, timer);
12572 - spin_unlock_bh(&rpc_queue_lock);
12573 + spin_unlock_bh(&q->lock);
12577 - * __rpc_wake_up_task - wake up a single rpc_task
12578 + * __rpc_do_wake_up_task - wake up a single rpc_task
12579 * @task: task to be woken up
12581 - * Caller must hold rpc_queue_lock
12582 + * Caller must hold queue->lock, and have cleared the task queued flag.
12585 -__rpc_wake_up_task(struct rpc_task *task)
12586 +static void __rpc_do_wake_up_task(struct rpc_task *task)
12588 - dprintk("RPC: %4d __rpc_wake_up_task (now %ld inh %d)\n",
12589 - task->tk_pid, jiffies, rpc_inhibit);
12590 + dprintk("RPC: %4d __rpc_wake_up_task (now %ld)\n", task->tk_pid, jiffies);
12593 if (task->tk_magic != 0xf00baa) {
12594 @@ -445,12 +354,9 @@ __rpc_wake_up_task(struct rpc_task *task
12595 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
12598 - if (RPC_IS_RUNNING(task))
12601 __rpc_disable_timer(task);
12602 - if (task->tk_rpcwait != &schedq)
12603 - __rpc_remove_wait_queue(task);
12604 + __rpc_remove_wait_queue(task);
12606 rpc_make_runnable(task);
12608 @@ -458,6 +364,15 @@ __rpc_wake_up_task(struct rpc_task *task
12612 + * Wake up the specified task
12614 +static void __rpc_wake_up_task(struct rpc_task *task)
12616 + if (rpc_test_and_clear_queued(task))
12617 + __rpc_do_wake_up_task(task);
12621 * Default timeout handler if none specified by user
12624 @@ -471,14 +386,15 @@ __rpc_default_timer(struct rpc_task *tas
12626 * Wake up the specified task
12629 -rpc_wake_up_task(struct rpc_task *task)
12630 +void rpc_wake_up_task(struct rpc_task *task)
12632 - if (RPC_IS_RUNNING(task))
12634 - spin_lock_bh(&rpc_queue_lock);
12635 - __rpc_wake_up_task(task);
12636 - spin_unlock_bh(&rpc_queue_lock);
12637 + if (rpc_test_and_clear_queued(task)) {
12638 + struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq;
12640 + spin_lock_bh(&queue->lock);
12641 + __rpc_do_wake_up_task(task);
12642 + spin_unlock_bh(&queue->lock);
12647 @@ -494,11 +410,11 @@ static struct rpc_task * __rpc_wake_up_n
12649 q = &queue->tasks[queue->priority];
12650 if (!list_empty(q)) {
12651 - task = list_entry(q->next, struct rpc_task, tk_list);
12652 + task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
12653 if (queue->cookie == task->tk_cookie) {
12656 - list_move_tail(&task->tk_list, q);
12657 + list_move_tail(&task->u.tk_wait.list, q);
12660 * Check if we need to switch queues.
12661 @@ -516,7 +432,7 @@ static struct rpc_task * __rpc_wake_up_n
12664 if (!list_empty(q)) {
12665 - task = list_entry(q->next, struct rpc_task, tk_list);
12666 + task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
12669 } while (q != &queue->tasks[queue->priority]);
12670 @@ -541,14 +457,14 @@ struct rpc_task * rpc_wake_up_next(struc
12671 struct rpc_task *task = NULL;
12673 dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
12674 - spin_lock_bh(&rpc_queue_lock);
12675 + spin_lock_bh(&queue->lock);
12676 if (RPC_IS_PRIORITY(queue))
12677 task = __rpc_wake_up_next_priority(queue);
12679 task_for_first(task, &queue->tasks[0])
12680 __rpc_wake_up_task(task);
12682 - spin_unlock_bh(&rpc_queue_lock);
12683 + spin_unlock_bh(&queue->lock);
12687 @@ -557,25 +473,25 @@ struct rpc_task * rpc_wake_up_next(struc
12688 * rpc_wake_up - wake up all rpc_tasks
12689 * @queue: rpc_wait_queue on which the tasks are sleeping
12691 - * Grabs rpc_queue_lock
12692 + * Grabs queue->lock
12694 void rpc_wake_up(struct rpc_wait_queue *queue)
12696 struct rpc_task *task;
12698 struct list_head *head;
12699 - spin_lock_bh(&rpc_queue_lock);
12700 + spin_lock_bh(&queue->lock);
12701 head = &queue->tasks[queue->maxpriority];
12703 while (!list_empty(head)) {
12704 - task = list_entry(head->next, struct rpc_task, tk_list);
12705 + task = list_entry(head->next, struct rpc_task, u.tk_wait.list);
12706 __rpc_wake_up_task(task);
12708 if (head == &queue->tasks[0])
12712 - spin_unlock_bh(&rpc_queue_lock);
12713 + spin_unlock_bh(&queue->lock);
12717 @@ -583,18 +499,18 @@ void rpc_wake_up(struct rpc_wait_queue *
12718 * @queue: rpc_wait_queue on which the tasks are sleeping
12719 * @status: status value to set
12721 - * Grabs rpc_queue_lock
12722 + * Grabs queue->lock
12724 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
12726 struct list_head *head;
12727 struct rpc_task *task;
12729 - spin_lock_bh(&rpc_queue_lock);
12730 + spin_lock_bh(&queue->lock);
12731 head = &queue->tasks[queue->maxpriority];
12733 while (!list_empty(head)) {
12734 - task = list_entry(head->next, struct rpc_task, tk_list);
12735 + task = list_entry(head->next, struct rpc_task, u.tk_wait.list);
12736 task->tk_status = status;
12737 __rpc_wake_up_task(task);
12739 @@ -602,7 +518,7 @@ void rpc_wake_up_status(struct rpc_wait_
12743 - spin_unlock_bh(&rpc_queue_lock);
12744 + spin_unlock_bh(&queue->lock);
12748 @@ -626,18 +542,14 @@ __rpc_atrun(struct rpc_task *task)
12750 * This is the RPC `scheduler' (or rather, the finite state machine).
12753 -__rpc_execute(struct rpc_task *task)
12754 +static int __rpc_execute(struct rpc_task *task)
12758 dprintk("RPC: %4d rpc_execute flgs %x\n",
12759 task->tk_pid, task->tk_flags);
12761 - if (!RPC_IS_RUNNING(task)) {
12762 - printk(KERN_WARNING "RPC: rpc_execute called for sleeping task!!\n");
12765 + BUG_ON(RPC_IS_QUEUED(task));
12769 @@ -657,7 +569,9 @@ __rpc_execute(struct rpc_task *task)
12771 save_callback=task->tk_callback;
12772 task->tk_callback=NULL;
12774 save_callback(task);
12779 @@ -665,43 +579,41 @@ __rpc_execute(struct rpc_task *task)
12780 * tk_action may be NULL when the task has been killed
12783 - if (RPC_IS_RUNNING(task)) {
12784 + if (!RPC_IS_QUEUED(task)) {
12786 * Garbage collection of pending timers...
12788 rpc_delete_timer(task);
12789 if (!task->tk_action)
12792 task->tk_action(task);
12793 - /* micro-optimization to avoid spinlock */
12794 - if (RPC_IS_RUNNING(task))
12800 - * Check whether task is sleeping.
12801 + * Lockless check for whether task is sleeping or not.
12803 - spin_lock_bh(&rpc_queue_lock);
12804 - if (!RPC_IS_RUNNING(task)) {
12805 - rpc_set_sleeping(task);
12806 - if (RPC_IS_ASYNC(task)) {
12807 - spin_unlock_bh(&rpc_queue_lock);
12808 + if (!RPC_IS_QUEUED(task))
12810 + if (RPC_IS_ASYNC(task)) {
12811 + rpc_clear_running(task);
12812 + /* Careful! we may have raced... */
12813 + if (RPC_IS_QUEUED(task))
12816 + if (rpc_test_and_set_running(task))
12820 - spin_unlock_bh(&rpc_queue_lock);
12822 - if (!RPC_IS_SLEEPING(task))
12824 + init_waitqueue_head(&task->u.tk_wait.waitq);
12825 + rpc_clear_running(task);
12826 /* sync task: sleep here */
12827 dprintk("RPC: %4d sync task going to sleep\n", task->tk_pid);
12828 - if (current->pid == rpciod_pid)
12829 - printk(KERN_ERR "RPC: rpciod waiting on sync task!\n");
12831 if (!task->tk_client->cl_intr) {
12832 - __wait_event(task->tk_wait, !RPC_IS_SLEEPING(task));
12833 + __wait_event(task->u.tk_wait.waitq, RPC_IS_RUNNING(task));
12835 - __wait_event_interruptible(task->tk_wait, !RPC_IS_SLEEPING(task), status);
12836 + __wait_event_interruptible(task->u.tk_wait.waitq, RPC_IS_RUNNING(task), status);
12838 * When a sync task receives a signal, it exits with
12839 * -ERESTARTSYS. In order to catch any callbacks that
12840 @@ -719,7 +631,9 @@ __rpc_execute(struct rpc_task *task)
12843 if (task->tk_exit) {
12845 task->tk_exit(task);
12847 /* If tk_action is non-null, the user wants us to restart */
12848 if (task->tk_action) {
12849 if (!RPC_ASSASSINATED(task)) {
12850 @@ -738,7 +652,6 @@ __rpc_execute(struct rpc_task *task)
12852 /* Release all resources associated with the task */
12853 rpc_release_task(task);
12858 @@ -754,57 +667,16 @@ __rpc_execute(struct rpc_task *task)
12860 rpc_execute(struct rpc_task *task)
12862 - int status = -EIO;
12863 - if (rpc_inhibit) {
12864 - printk(KERN_INFO "RPC: execution inhibited!\n");
12865 - goto out_release;
12868 - status = -EWOULDBLOCK;
12869 - if (task->tk_active) {
12870 - printk(KERN_ERR "RPC: active task was run twice!\n");
12873 + BUG_ON(task->tk_active);
12875 task->tk_active = 1;
12876 rpc_set_running(task);
12877 return __rpc_execute(task);
12879 - rpc_release_task(task);
12885 - * This is our own little scheduler for async RPC tasks.
12888 -__rpc_schedule(void)
12889 +static void rpc_async_schedule(void *arg)
12891 - struct rpc_task *task;
12894 - dprintk("RPC: rpc_schedule enter\n");
12897 - task_for_first(task, &schedq.tasks[0]) {
12898 - __rpc_remove_wait_queue(task);
12899 - spin_unlock_bh(&rpc_queue_lock);
12901 - __rpc_execute(task);
12902 - spin_lock_bh(&rpc_queue_lock);
12907 - if (++count >= 200 || need_resched()) {
12909 - spin_unlock_bh(&rpc_queue_lock);
12911 - spin_lock_bh(&rpc_queue_lock);
12914 - dprintk("RPC: rpc_schedule leave\n");
12915 + __rpc_execute((struct rpc_task *)arg);
12919 @@ -862,7 +734,6 @@ void rpc_init_task(struct rpc_task *task
12920 task->tk_client = clnt;
12921 task->tk_flags = flags;
12922 task->tk_exit = callback;
12923 - init_waitqueue_head(&task->tk_wait);
12924 if (current->uid != current->fsuid || current->gid != current->fsgid)
12925 task->tk_flags |= RPC_TASK_SETUID;
12927 @@ -873,7 +744,9 @@ void rpc_init_task(struct rpc_task *task
12929 task->tk_priority = RPC_PRIORITY_NORMAL;
12930 task->tk_cookie = (unsigned long)current;
12931 - INIT_LIST_HEAD(&task->tk_links);
12933 + /* Initialize workqueue for async tasks */
12934 + task->tk_workqueue = rpciod_workqueue;
12936 /* Add to global list of all tasks */
12937 spin_lock(&rpc_sched_lock);
12938 @@ -942,8 +815,7 @@ cleanup:
12943 -rpc_release_task(struct rpc_task *task)
12944 +void rpc_release_task(struct rpc_task *task)
12946 dprintk("RPC: %4d release task\n", task->tk_pid);
12948 @@ -961,19 +833,9 @@ rpc_release_task(struct rpc_task *task)
12949 list_del(&task->tk_task);
12950 spin_unlock(&rpc_sched_lock);
12952 - /* Protect the execution below. */
12953 - spin_lock_bh(&rpc_queue_lock);
12955 - /* Disable timer to prevent zombie wakeup */
12956 - __rpc_disable_timer(task);
12958 - /* Remove from any wait queue we're still on */
12959 - __rpc_remove_wait_queue(task);
12961 + BUG_ON (rpc_test_and_clear_queued(task));
12962 task->tk_active = 0;
12964 - spin_unlock_bh(&rpc_queue_lock);
12966 /* Synchronously delete any running timer */
12967 rpc_delete_timer(task);
12969 @@ -1003,10 +865,9 @@ rpc_release_task(struct rpc_task *task)
12970 * queue 'childq'. If so returns a pointer to the parent.
12971 * Upon failure returns NULL.
12973 - * Caller must hold rpc_queue_lock
12974 + * Caller must hold childq.lock
12976 -static inline struct rpc_task *
12977 -rpc_find_parent(struct rpc_task *child)
12978 +static inline struct rpc_task *rpc_find_parent(struct rpc_task *child)
12980 struct rpc_task *task, *parent;
12981 struct list_head *le;
12982 @@ -1019,17 +880,16 @@ rpc_find_parent(struct rpc_task *child)
12987 -rpc_child_exit(struct rpc_task *child)
12988 +static void rpc_child_exit(struct rpc_task *child)
12990 struct rpc_task *parent;
12992 - spin_lock_bh(&rpc_queue_lock);
12993 + spin_lock_bh(&childq.lock);
12994 if ((parent = rpc_find_parent(child)) != NULL) {
12995 parent->tk_status = child->tk_status;
12996 __rpc_wake_up_task(parent);
12998 - spin_unlock_bh(&rpc_queue_lock);
12999 + spin_unlock_bh(&childq.lock);
13003 @@ -1052,22 +912,20 @@ fail:
13008 -rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func)
13009 +void rpc_run_child(struct rpc_task *task, struct rpc_task *child, rpc_action func)
13011 - spin_lock_bh(&rpc_queue_lock);
13012 + spin_lock_bh(&childq.lock);
13013 /* N.B. Is it possible for the child to have already finished? */
13014 __rpc_sleep_on(&childq, task, func, NULL);
13015 rpc_schedule_run(child);
13016 - spin_unlock_bh(&rpc_queue_lock);
13017 + spin_unlock_bh(&childq.lock);
13021 * Kill all tasks for the given client.
13022 * XXX: kill their descendants as well?
13025 -rpc_killall_tasks(struct rpc_clnt *clnt)
13026 +void rpc_killall_tasks(struct rpc_clnt *clnt)
13028 struct rpc_task *rovr;
13029 struct list_head *le;
13030 @@ -1089,93 +947,14 @@ rpc_killall_tasks(struct rpc_clnt *clnt)
13032 static DECLARE_MUTEX_LOCKED(rpciod_running);
13035 -rpciod_task_pending(void)
13037 - return !list_empty(&schedq.tasks[0]);
13042 - * This is the rpciod kernel thread
13051 - * Let our maker know we're running ...
13053 - rpciod_pid = current->pid;
13054 - up(&rpciod_running);
13056 - daemonize("rpciod");
13057 - allow_signal(SIGKILL);
13059 - dprintk("RPC: rpciod starting (pid %d)\n", rpciod_pid);
13060 - spin_lock_bh(&rpc_queue_lock);
13061 - while (rpciod_users) {
13062 - DEFINE_WAIT(wait);
13063 - if (signalled()) {
13064 - spin_unlock_bh(&rpc_queue_lock);
13065 - rpciod_killall();
13066 - flush_signals(current);
13067 - spin_lock_bh(&rpc_queue_lock);
13069 - __rpc_schedule();
13070 - if (current->flags & PF_FREEZE) {
13071 - spin_unlock_bh(&rpc_queue_lock);
13072 - refrigerator(PF_FREEZE);
13073 - spin_lock_bh(&rpc_queue_lock);
13076 - if (++rounds >= 64) { /* safeguard */
13077 - spin_unlock_bh(&rpc_queue_lock);
13080 - spin_lock_bh(&rpc_queue_lock);
13083 - dprintk("RPC: rpciod back to sleep\n");
13084 - prepare_to_wait(&rpciod_idle, &wait, TASK_INTERRUPTIBLE);
13085 - if (!rpciod_task_pending() && !signalled()) {
13086 - spin_unlock_bh(&rpc_queue_lock);
13089 - spin_lock_bh(&rpc_queue_lock);
13091 - finish_wait(&rpciod_idle, &wait);
13092 - dprintk("RPC: switch to rpciod\n");
13094 - spin_unlock_bh(&rpc_queue_lock);
13096 - dprintk("RPC: rpciod shutdown commences\n");
13097 - if (!list_empty(&all_tasks)) {
13098 - printk(KERN_ERR "rpciod: active tasks at shutdown?!\n");
13099 - rpciod_killall();
13102 - dprintk("RPC: rpciod exiting\n");
13106 - complete_and_exit(&rpciod_killer, 0);
13111 -rpciod_killall(void)
13112 +static void rpciod_killall(void)
13114 unsigned long flags;
13116 while (!list_empty(&all_tasks)) {
13117 clear_thread_flag(TIF_SIGPENDING);
13118 rpc_killall_tasks(NULL);
13119 - spin_lock_bh(&rpc_queue_lock);
13120 - __rpc_schedule();
13121 - spin_unlock_bh(&rpc_queue_lock);
13122 + flush_workqueue(rpciod_workqueue);
13123 if (!list_empty(&all_tasks)) {
13124 dprintk("rpciod_killall: waiting for tasks to exit\n");
13126 @@ -1193,28 +972,30 @@ rpciod_killall(void)
13130 + struct workqueue_struct *wq;
13133 down(&rpciod_sema);
13134 - dprintk("rpciod_up: pid %d, users %d\n", rpciod_pid, rpciod_users);
13135 + dprintk("rpciod_up: users %d\n", rpciod_users);
13138 + if (rpciod_workqueue)
13141 * If there's no pid, we should be the first user.
13143 if (rpciod_users > 1)
13144 - printk(KERN_WARNING "rpciod_up: no pid, %d users??\n", rpciod_users);
13145 + printk(KERN_WARNING "rpciod_up: no workqueue, %d users??\n", rpciod_users);
13147 * Create the rpciod thread and wait for it to start.
13149 - error = kernel_thread(rpciod, NULL, 0);
13151 - printk(KERN_WARNING "rpciod_up: create thread failed, error=%d\n", error);
13153 + wq = create_workqueue("rpciod");
13154 + if (wq == NULL) {
13155 + printk(KERN_WARNING "rpciod_up: create workqueue failed, error=%d\n", error);
13159 - down(&rpciod_running);
13160 + rpciod_workqueue = wq;
13164 @@ -1225,20 +1006,21 @@ void
13167 down(&rpciod_sema);
13168 - dprintk("rpciod_down pid %d sema %d\n", rpciod_pid, rpciod_users);
13169 + dprintk("rpciod_down sema %d\n", rpciod_users);
13170 if (rpciod_users) {
13171 if (--rpciod_users)
13174 - printk(KERN_WARNING "rpciod_down: pid=%d, no users??\n", rpciod_pid);
13175 + printk(KERN_WARNING "rpciod_down: no users??\n");
13177 - if (!rpciod_pid) {
13178 + if (!rpciod_workqueue) {
13179 dprintk("rpciod_down: Nothing to do!\n");
13182 + rpciod_killall();
13184 - kill_proc(rpciod_pid, SIGKILL, 1);
13185 - wait_for_completion(&rpciod_killer);
13186 + destroy_workqueue(rpciod_workqueue);
13187 + rpciod_workqueue = NULL;
13191 @@ -1256,7 +1038,12 @@ void rpc_show_tasks(void)
13193 printk("-pid- proc flgs status -client- -prog- --rqstp- -timeout "
13194 "-rpcwait -action- --exit--\n");
13195 - alltask_for_each(t, le, &all_tasks)
13196 + alltask_for_each(t, le, &all_tasks) {
13197 + const char *rpc_waitq = "none";
13199 + if (RPC_IS_QUEUED(t))
13200 + rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq);
13202 printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n",
13204 (t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1),
13205 @@ -1264,8 +1051,9 @@ void rpc_show_tasks(void)
13207 (t->tk_client ? t->tk_client->cl_prog : 0),
13208 t->tk_rqstp, t->tk_timeout,
13209 - rpc_qname(t->tk_rpcwait),
13211 t->tk_action, t->tk_exit);
13213 spin_unlock(&rpc_sched_lock);
13216 --- linux-2.6.7/net/sunrpc/svcsock.c.lsec 2004-06-15 23:18:57.000000000 -0600
13217 +++ linux-2.6.7/net/sunrpc/svcsock.c 2005-03-23 14:28:24.029396672 -0700
13218 @@ -414,7 +414,6 @@ svc_sendto(struct svc_rqst *rqstp, struc
13221 if (xdr->tail[0].iov_len) {
13222 - /* The tail *will* be in respages[0]; */
13223 result = sock->ops->sendpage(sock, rqstp->rq_respages[rqstp->rq_restailpage],
13224 ((unsigned long)xdr->tail[0].iov_base)& (PAGE_SIZE-1),
13225 xdr->tail[0].iov_len, 0);
13226 --- linux-2.6.7/net/sunrpc/clnt.c.lsec 2004-06-15 23:19:13.000000000 -0600
13227 +++ linux-2.6.7/net/sunrpc/clnt.c 2005-03-23 14:28:23.595462640 -0700
13228 @@ -351,7 +351,9 @@ int rpc_call_sync(struct rpc_clnt *clnt,
13229 rpc_clnt_sigmask(clnt, &oldset);
13231 /* Create/initialize a new RPC task */
13232 - rpc_init_task(task, clnt, NULL, flags);
13233 + task = rpc_new_task(clnt, NULL, flags);
13234 + if (task == NULL)
13236 rpc_call_setup(task, msg, 0);
13238 /* Set up the call info struct and execute the task */
13239 @@ -620,8 +622,14 @@ call_encode(struct rpc_task *task)
13240 rpc_exit(task, -EIO);
13243 - if (encode && (status = rpcauth_wrap_req(task, encode, req, p,
13244 - task->tk_msg.rpc_argp)) < 0) {
13245 + if (encode == NULL)
13248 + status = rpcauth_wrap_req(task, encode, req, p, task->tk_msg.rpc_argp);
13249 + if (status == -EAGAIN) {
13250 + printk("XXXJBF: out of memeory? Should retry here!!!\n");
13252 + if (status < 0) {
13253 printk(KERN_WARNING "%s: can't encode arguments: %d\n",
13254 clnt->cl_protname, -status);
13255 rpc_exit(task, status);
13256 --- linux-2.6.7/net/sunrpc/sunrpc_syms.c.lsec 2004-06-15 23:19:52.000000000 -0600
13257 +++ linux-2.6.7/net/sunrpc/sunrpc_syms.c 2005-03-23 14:32:35.589153776 -0700
13258 @@ -58,6 +58,8 @@ EXPORT_SYMBOL(rpc_unlink);
13259 EXPORT_SYMBOL(rpc_wake_up);
13260 EXPORT_SYMBOL(rpc_queue_upcall);
13261 EXPORT_SYMBOL(rpc_mkpipe);
13262 +EXPORT_SYMBOL(rpc_mkdir);
13263 +EXPORT_SYMBOL(rpc_rmdir);
13265 /* Client transport */
13266 EXPORT_SYMBOL(xprt_create_proto);
13267 @@ -89,6 +91,7 @@ EXPORT_SYMBOL(svc_makesock);
13268 EXPORT_SYMBOL(svc_reserve);
13269 EXPORT_SYMBOL(svc_auth_register);
13270 EXPORT_SYMBOL(auth_domain_lookup);
13271 +EXPORT_SYMBOL(svc_authenticate);
13273 /* RPC statistics */
13274 #ifdef CONFIG_PROC_FS
13275 --- linux-2.6.7/net/sunrpc/pmap_clnt.c.lsec 2004-06-15 23:19:23.000000000 -0600
13276 +++ linux-2.6.7/net/sunrpc/pmap_clnt.c 2005-03-23 14:28:24.134380712 -0700
13277 @@ -183,8 +183,10 @@ rpc_register(u32 prog, u32 vers, int pro
13278 map.pm_prot = prot;
13279 map.pm_port = port;
13282 error = rpc_call(pmap_clnt, port? PMAP_SET : PMAP_UNSET,
13287 printk(KERN_WARNING
13288 --- linux-2.6.7/net/sunrpc/auth_gss/gss_krb5_unseal.c.lsec 2004-06-15 23:19:44.000000000 -0600
13289 +++ linux-2.6.7/net/sunrpc/auth_gss/gss_krb5_unseal.c 2005-03-23 14:28:23.761437408 -0700
13290 @@ -68,20 +68,13 @@
13294 -/* message_buffer is an input if toktype is MIC and an output if it is WRAP:
13295 - * If toktype is MIC: read_token is a mic token, and message_buffer is the
13296 - * data that the mic was supposedly taken over.
13297 - * If toktype is WRAP: read_token is a wrap token, and message_buffer is used
13298 - * to return the decrypted data.
13300 +/* read_token is a mic token, and message_buffer is the data that the mic was
13301 + * supposedly taken over. */
13303 -/* XXX will need to change prototype and/or just split into a separate function
13304 - * when we add privacy (because read_token will be in pages too). */
13306 krb5_read_token(struct krb5_ctx *ctx,
13307 struct xdr_netobj *read_token,
13308 - struct xdr_buf *message_buffer,
13309 - int *qop_state, int toktype)
13310 + struct xdr_buf *message_buffer, int *qop_state)
13314 @@ -96,20 +89,16 @@ krb5_read_token(struct krb5_ctx *ctx,
13316 dprintk("RPC: krb5_read_token\n");
13318 - if (g_verify_token_header(&ctx->mech_used, &bodysize, &ptr, toktype,
13319 + if (g_verify_token_header(&ctx->mech_used, &bodysize, &ptr,
13323 - if ((*ptr++ != ((toktype>>8)&0xff)) || (*ptr++ != (toktype&0xff)))
13324 + if ((*ptr++ != ((KG_TOK_MIC_MSG>>8)&0xff)) ||
13325 + (*ptr++ != ( KG_TOK_MIC_MSG &0xff)) )
13328 /* XXX sanity-check bodysize?? */
13330 - if (toktype == KG_TOK_WRAP_MSG) {
13335 /* get the sign and seal algorithms */
13337 signalg = ptr[0] + (ptr[1] << 8);
13338 @@ -120,14 +109,7 @@ krb5_read_token(struct krb5_ctx *ctx,
13339 if ((ptr[4] != 0xff) || (ptr[5] != 0xff))
13342 - if (((toktype != KG_TOK_WRAP_MSG) && (sealalg != 0xffff)) ||
13343 - ((toktype == KG_TOK_WRAP_MSG) && (sealalg == 0xffff)))
13346 - /* in the current spec, there is only one valid seal algorithm per
13347 - key type, so a simple comparison is ok */
13349 - if ((toktype == KG_TOK_WRAP_MSG) && !(sealalg == ctx->sealalg))
13350 + if (sealalg != 0xffff)
13353 /* there are several mappings of seal algorithms to sign algorithms,
13354 @@ -154,7 +136,7 @@ krb5_read_token(struct krb5_ctx *ctx,
13356 case SGN_ALG_DES_MAC_MD5:
13357 ret = make_checksum(checksum_type, ptr - 2, 8,
13358 - message_buffer, &md5cksum);
13359 + message_buffer, 0, &md5cksum);
13363 --- linux-2.6.7/net/sunrpc/auth_gss/auth_gss.c.lsec 2004-06-15 23:19:22.000000000 -0600
13364 +++ linux-2.6.7/net/sunrpc/auth_gss/auth_gss.c 2005-03-23 14:28:24.185372960 -0700
13366 #include <linux/socket.h>
13367 #include <linux/in.h>
13368 #include <linux/sched.h>
13369 +#include <linux/pagemap.h>
13370 #include <linux/sunrpc/clnt.h>
13371 #include <linux/sunrpc/auth.h>
13372 #include <linux/sunrpc/auth_gss.h>
13373 @@ -397,7 +398,7 @@ retry:
13374 spin_unlock(&gss_auth->lock);
13376 gss_release_msg(gss_msg);
13377 - dprintk("RPC: %4u gss_upcall for uid %u result %d", task->tk_pid,
13378 + dprintk("RPC: %4u gss_upcall for uid %u result %d\n", task->tk_pid,
13382 @@ -740,6 +741,8 @@ gss_marshal(struct rpc_task *task, u32 *
13383 maj_stat = gss_get_mic(ctx->gc_gss_ctx,
13386 + if (maj_stat == GSS_S_CONTEXT_EXPIRED)
13387 + cred->cr_flags |= RPCAUTH_CRED_DEAD;
13389 printk("gss_marshal: gss_get_mic FAILED (%d)\n", maj_stat);
13391 @@ -779,6 +782,7 @@ gss_validate(struct rpc_task *task, u32
13392 struct xdr_netobj mic;
13397 dprintk("RPC: %4u gss_validate\n", task->tk_pid);
13399 @@ -794,8 +798,11 @@ gss_validate(struct rpc_task *task, u32
13400 mic.data = (u8 *)p;
13403 - if (gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic, &qop_state))
13405 + maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &verf_buf, &mic, &qop_state);
13406 + if (maj_stat == GSS_S_CONTEXT_EXPIRED)
13407 + cred->cr_flags |= RPCAUTH_CRED_DEAD;
13410 service = gss_pseudoflavor_to_service(ctx->gc_gss_ctx->mech_type,
13411 gss_cred->gc_flavor);
13413 @@ -807,6 +814,11 @@ gss_validate(struct rpc_task *task, u32
13414 /* verifier data, flavor, length, length, sequence number: */
13415 task->tk_auth->au_rslack = XDR_QUADLEN(len) + 4;
13417 + case RPC_GSS_SVC_PRIVACY:
13418 + /* XXXJBF: Ugh. Going for a wild overestimate.
13419 + * Need some info from krb5 layer? */
13420 + task->tk_auth->au_rslack = XDR_QUADLEN(len) + 32;
13425 @@ -821,11 +833,11 @@ out_bad:
13429 -gss_wrap_req_integ(struct gss_cl_ctx *ctx,
13430 - kxdrproc_t encode, void *rqstp, u32 *p, void *obj)
13431 +gss_wrap_req_integ(struct rpc_cred *cred, kxdrproc_t encode,
13432 + struct rpc_rqst *rqstp, u32 *p, void *obj)
13434 - struct rpc_rqst *req = (struct rpc_rqst *)rqstp;
13435 - struct xdr_buf *snd_buf = &req->rq_snd_buf;
13436 + struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
13437 + struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
13438 struct xdr_buf integ_buf;
13439 u32 *integ_len = NULL;
13440 struct xdr_netobj mic;
13441 @@ -836,7 +848,7 @@ gss_wrap_req_integ(struct gss_cl_ctx *ct
13444 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
13445 - *p++ = htonl(req->rq_seqno);
13446 + *p++ = htonl(rqstp->rq_seqno);
13448 status = encode(rqstp, p, obj);
13450 @@ -848,7 +860,7 @@ gss_wrap_req_integ(struct gss_cl_ctx *ct
13451 *integ_len = htonl(integ_buf.len);
13453 /* guess whether we're in the head or the tail: */
13454 - if (snd_buf->page_len || snd_buf->tail[0].iov_len)
13455 + if (snd_buf->page_len || snd_buf->tail[0].iov_len)
13456 iov = snd_buf->tail;
13458 iov = snd_buf->head;
13459 @@ -857,6 +869,8 @@ gss_wrap_req_integ(struct gss_cl_ctx *ct
13461 maj_stat = gss_get_mic(ctx->gc_gss_ctx,
13462 GSS_C_QOP_DEFAULT, &integ_buf, &mic);
13463 + if (maj_stat == GSS_S_CONTEXT_EXPIRED)
13464 + cred->cr_flags |= RPCAUTH_CRED_DEAD;
13465 status = -EIO; /* XXX? */
13468 @@ -868,6 +882,113 @@ gss_wrap_req_integ(struct gss_cl_ctx *ct
13473 +priv_release_snd_buf(struct rpc_rqst *rqstp)
13477 + for (i=0; i < rqstp->rq_enc_pages_num; i++)
13478 + __free_page(rqstp->rq_enc_pages[i]);
13479 + kfree(rqstp->rq_enc_pages);
13483 +alloc_enc_pages(struct rpc_rqst *rqstp)
13485 + struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
13486 + int first, last, i;
13488 + if (snd_buf->page_len == 0) {
13489 + rqstp->rq_enc_pages_num = 0;
13493 + first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
13494 + last = (snd_buf->page_base + snd_buf->page_len - 1) >> PAGE_CACHE_SHIFT;
13495 + rqstp->rq_enc_pages_num = last - first + 1 + 1;
13496 + rqstp->rq_enc_pages
13497 + = kmalloc(rqstp->rq_enc_pages_num * sizeof(struct page *),
13499 + if (!rqstp->rq_enc_pages)
13501 + for (i=0; i < rqstp->rq_enc_pages_num; i++) {
13502 + rqstp->rq_enc_pages[i] = alloc_page(GFP_NOFS);
13503 + if (rqstp->rq_enc_pages[i] == NULL)
13506 + rqstp->rq_release_snd_buf = priv_release_snd_buf;
13509 + for (i--; i >= 0; i--) {
13510 + __free_page(rqstp->rq_enc_pages[i]);
13517 +gss_wrap_req_priv(struct rpc_cred *cred, kxdrproc_t encode,
13518 + struct rpc_rqst *rqstp, u32 *p, void *obj)
13520 + struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
13521 + struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
13526 + struct page **inpages;
13529 + struct iovec *iov;
13532 + opaque_len = p++;
13533 + offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
13534 + *p++ = htonl(rqstp->rq_seqno);
13536 + status = encode(rqstp, p, obj);
13540 + status = alloc_enc_pages(rqstp);
13543 + /* XXXJBF: Oops! Do we need rq_enc_pages really any more?? */
13544 + first = snd_buf->page_base >> PAGE_CACHE_SHIFT;
13545 + inpages = snd_buf->pages + first;
13546 + snd_buf->pages = rqstp->rq_enc_pages;
13547 + snd_buf->page_base -= first << PAGE_CACHE_SHIFT;
13548 + /* XXX?: tail needs to be separate if we want to be able to expand
13549 + * the head (since it's often put right after the head). But is
13550 + * expanding the head safe in any case? */
13551 + if (snd_buf->page_len || snd_buf->tail[0].iov_len) {
13552 + tmp = page_address(rqstp->rq_enc_pages[rqstp->rq_enc_pages_num - 1]);
13553 + memcpy(tmp, snd_buf->tail[0].iov_base, snd_buf->tail[0].iov_len);
13554 + snd_buf->tail[0].iov_base = tmp;
13556 + maj_stat = gss_wrap(ctx->gc_gss_ctx, GSS_C_QOP_DEFAULT, offset,
13557 + snd_buf, inpages);
13558 + if (maj_stat == GSS_S_CONTEXT_EXPIRED)
13559 + cred->cr_flags |= RPCAUTH_CRED_DEAD;
13560 + status = -EIO; /* XXX? */
13564 + *opaque_len = htonl(snd_buf->len - offset);
13565 + /* guess whether we're in the head or the tail: */
13566 + if (snd_buf->page_len || snd_buf->tail[0].iov_len)
13567 + iov = snd_buf->tail;
13569 + iov = snd_buf->head;
13570 + p = iov->iov_base + iov->iov_len;
13571 + pad = 3 - ((snd_buf->len - offset - 1) & 3);
13572 + memset(p, 0, pad);
13573 + iov->iov_len += pad;
13574 + snd_buf->len += pad;
13580 gss_wrap_req(struct rpc_task *task,
13581 kxdrproc_t encode, void *rqstp, u32 *p, void *obj)
13582 @@ -894,9 +1015,11 @@ gss_wrap_req(struct rpc_task *task,
13583 status = encode(rqstp, p, obj);
13585 case RPC_GSS_SVC_INTEGRITY:
13586 - status = gss_wrap_req_integ(ctx, encode, rqstp, p, obj);
13587 + status = gss_wrap_req_integ(cred, encode, rqstp, p, obj);
13589 case RPC_GSS_SVC_PRIVACY:
13590 + status = gss_wrap_req_priv(cred, encode, rqstp, p, obj);
13595 @@ -907,11 +1030,10 @@ out:
13599 -gss_unwrap_resp_integ(struct gss_cl_ctx *ctx,
13600 - kxdrproc_t decode, void *rqstp, u32 **p, void *obj)
13601 +gss_unwrap_resp_integ(struct rpc_cred *cred, struct rpc_rqst *rqstp, u32 **p)
13603 - struct rpc_rqst *req = (struct rpc_rqst *)rqstp;
13604 - struct xdr_buf *rcv_buf = &req->rq_rcv_buf;
13605 + struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
13606 + struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
13607 struct xdr_buf integ_buf;
13608 struct xdr_netobj mic;
13609 u32 data_offset, mic_offset;
13610 @@ -926,7 +1048,7 @@ gss_unwrap_resp_integ(struct gss_cl_ctx
13611 mic_offset = integ_len + data_offset;
13612 if (mic_offset > rcv_buf->len)
13614 - if (ntohl(*(*p)++) != req->rq_seqno)
13615 + if (ntohl(*(*p)++) != rqstp->rq_seqno)
13618 if (xdr_buf_subsegment(rcv_buf, &integ_buf, data_offset,
13619 @@ -938,11 +1060,44 @@ gss_unwrap_resp_integ(struct gss_cl_ctx
13621 maj_stat = gss_verify_mic(ctx->gc_gss_ctx, &integ_buf,
13623 + if (maj_stat == GSS_S_CONTEXT_EXPIRED)
13624 + cred->cr_flags |= RPCAUTH_CRED_DEAD;
13625 if (maj_stat != GSS_S_COMPLETE)
13631 +gss_unwrap_resp_priv(struct rpc_cred *cred, struct rpc_rqst *rqstp, u32 **p)
13633 + struct gss_cl_ctx *ctx = gss_cred_get_ctx(cred);
13634 + struct xdr_buf *rcv_buf = &rqstp->rq_rcv_buf;
13635 + u32 offset, out_offset;
13638 + int status = -EIO;
13640 + opaque_len = ntohl(*(*p)++);
13641 + offset = (u8 *)(*p) - (u8 *)rcv_buf->head[0].iov_base;
13642 + if (offset + opaque_len > rcv_buf->len)
13644 + /* remove padding: */
13645 + rcv_buf->len = offset + opaque_len;
13647 + maj_stat = gss_unwrap(ctx->gc_gss_ctx, GSS_C_QOP_DEFAULT,
13648 + offset, rcv_buf, &out_offset);
13649 + if (maj_stat == GSS_S_CONTEXT_EXPIRED)
13650 + cred->cr_flags |= RPCAUTH_CRED_DEAD;
13651 + if (maj_stat != GSS_S_COMPLETE)
13653 + *p = (u32 *)(rcv_buf->head[0].iov_base + out_offset);
13654 + if (ntohl(*(*p)++) != rqstp->rq_seqno)
13662 gss_unwrap_resp(struct rpc_task *task,
13663 kxdrproc_t decode, void *rqstp, u32 *p, void *obj)
13664 @@ -962,12 +1117,16 @@ gss_unwrap_resp(struct rpc_task *task,
13665 case RPC_GSS_SVC_NONE:
13667 case RPC_GSS_SVC_INTEGRITY:
13668 - status = gss_unwrap_resp_integ(ctx, decode,
13670 + status = gss_unwrap_resp_integ(cred, rqstp, &p);
13674 case RPC_GSS_SVC_PRIVACY:
13675 + status = gss_unwrap_resp_priv(cred, rqstp, &p);
13683 --- linux-2.6.7/net/sunrpc/auth_gss/gss_spkm3_mech.c.lsec 2005-03-23 14:28:24.187372656 -0700
13684 +++ linux-2.6.7/net/sunrpc/auth_gss/gss_spkm3_mech.c 2005-03-23 14:28:24.186372808 -0700
13687 + * linux/net/sunrpc/gss_spkm3_mech.c
13689 + * Copyright (c) 2003 The Regents of the University of Michigan.
13690 + * All rights reserved.
13692 + * Andy Adamson <andros@umich.edu>
13693 + * J. Bruce Fields <bfields@umich.edu>
13695 + * Redistribution and use in source and binary forms, with or without
13696 + * modification, are permitted provided that the following conditions
13699 + * 1. Redistributions of source code must retain the above copyright
13700 + * notice, this list of conditions and the following disclaimer.
13701 + * 2. Redistributions in binary form must reproduce the above copyright
13702 + * notice, this list of conditions and the following disclaimer in the
13703 + * documentation and/or other materials provided with the distribution.
13704 + * 3. Neither the name of the University nor the names of its
13705 + * contributors may be used to endorse or promote products derived
13706 + * from this software without specific prior written permission.
13708 + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
13709 + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
13710 + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
13711 + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
13712 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
13713 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
13714 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
13715 + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
13716 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
13717 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
13718 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
13722 +#include <linux/module.h>
13723 +#include <linux/init.h>
13724 +#include <linux/types.h>
13725 +#include <linux/slab.h>
13726 +#include <linux/sunrpc/auth.h>
13727 +#include <linux/in.h>
13728 +#include <linux/sunrpc/svcauth_gss.h>
13729 +#include <linux/sunrpc/gss_spkm3.h>
13730 +#include <linux/sunrpc/xdr.h>
13731 +#include <linux/crypto.h>
13734 +# define RPCDBG_FACILITY RPCDBG_AUTH
13737 +struct xdr_netobj gss_mech_spkm3_oid =
13738 + {7, "\053\006\001\005\005\001\003"};
13741 +get_bytes(char **ptr, const char *end, void *res, int len)
13746 + if (q > end || q < p)
13748 + memcpy(res, p, len);
13754 +get_netobj(char **ptr, const char *end, struct xdr_netobj *res)
13758 + if (get_bytes(&p, end, &res->len, sizeof(res->len)))
13760 + q = p + res->len;
13761 + if(res->len == 0)
13763 + if (q > end || q < p)
13765 + if (!(res->data = kmalloc(res->len, GFP_KERNEL)))
13767 + memcpy(res->data, p, res->len);
13774 +get_key(char **p, char *end, struct crypto_tfm **res, int *resalg)
13776 + struct xdr_netobj key = {
13780 + int alg_mode,setkey = 0;
13783 + if (get_bytes(p, end, resalg, sizeof(int)))
13785 + if ((get_netobj(p, end, &key)))
13788 + switch (*resalg) {
13789 + case NID_des_cbc:
13790 + alg_name = "des";
13791 + alg_mode = CRYPTO_TFM_MODE_CBC;
13795 + if (key.len == 0) {
13796 + dprintk("RPC: SPKM3 get_key: NID_md5 zero Key length\n");
13798 + alg_name = "md5";
13802 + case NID_cast5_cbc:
13803 + dprintk("RPC: SPKM3 get_key: case cast5_cbc, UNSUPPORTED \n");
13807 + dprintk("RPC: SPKM3 get_key: unsupported algorithm %d", *resalg);
13808 + goto out_err_free_key;
13810 + if (!(*res = crypto_alloc_tfm(alg_name, alg_mode)))
13811 + goto out_err_free_key;
13813 + if (crypto_cipher_setkey(*res, key.data, key.len))
13814 + goto out_err_free_tfm;
13822 + crypto_free_tfm(*res);
13831 +gss_import_sec_context_spkm3(struct xdr_netobj *inbuf,
13832 + struct gss_ctx *ctx_id)
13834 + char *p = inbuf->data;
13835 + char *end = inbuf->data + inbuf->len;
13836 + struct spkm3_ctx *ctx;
13838 + if (!(ctx = kmalloc(sizeof(*ctx), GFP_KERNEL)))
13840 + memset(ctx, 0, sizeof(*ctx));
13842 + if (get_netobj(&p, end, &ctx->ctx_id))
13843 + goto out_err_free_ctx;
13845 + if (get_bytes(&p, end, &ctx->qop, sizeof(ctx->qop)))
13846 + goto out_err_free_ctx_id;
13848 + if (get_netobj(&p, end, &ctx->mech_used))
13849 + goto out_err_free_mech;
13851 + if (get_bytes(&p, end, &ctx->ret_flags, sizeof(ctx->ret_flags)))
13852 + goto out_err_free_mech;
13854 + if (get_bytes(&p, end, &ctx->req_flags, sizeof(ctx->req_flags)))
13855 + goto out_err_free_mech;
13857 + if (get_netobj(&p, end, &ctx->share_key))
13858 + goto out_err_free_s_key;
13860 + if (get_key(&p, end, &ctx->derived_conf_key, &ctx->conf_alg)) {
13861 + dprintk("RPC: SPKM3 confidentiality key will be NULL\n");
13864 + if (get_key(&p, end, &ctx->derived_integ_key, &ctx->intg_alg)) {
13865 + dprintk("RPC: SPKM3 integrity key will be NULL\n");
13868 + if (get_bytes(&p, end, &ctx->owf_alg, sizeof(ctx->owf_alg)))
13869 + goto out_err_free_s_key;
13871 + if (get_bytes(&p, end, &ctx->owf_alg, sizeof(ctx->owf_alg)))
13872 + goto out_err_free_s_key;
13875 + goto out_err_free_s_key;
13877 + ctx_id->internal_ctx_id = ctx;
13879 + dprintk("Succesfully imported new spkm context.\n");
13882 +out_err_free_s_key:
13883 + kfree(ctx->share_key.data);
13884 +out_err_free_mech:
13885 + kfree(ctx->mech_used.data);
13886 +out_err_free_ctx_id:
13887 + kfree(ctx->ctx_id.data);
13891 + return GSS_S_FAILURE;
13895 +gss_delete_sec_context_spkm3(void *internal_ctx) {
13896 + struct spkm3_ctx *sctx = internal_ctx;
13898 + if(sctx->derived_integ_key)
13899 + crypto_free_tfm(sctx->derived_integ_key);
13900 + if(sctx->derived_conf_key)
13901 + crypto_free_tfm(sctx->derived_conf_key);
13902 + if(sctx->share_key.data)
13903 + kfree(sctx->share_key.data);
13904 + if(sctx->mech_used.data)
13905 + kfree(sctx->mech_used.data);
13910 +gss_verify_mic_spkm3(struct gss_ctx *ctx,
13911 + struct xdr_buf *signbuf,
13912 + struct xdr_netobj *checksum,
13914 + u32 maj_stat = 0;
13915 + int qop_state = 0;
13916 + struct spkm3_ctx *sctx = ctx->internal_ctx_id;
13918 + dprintk("RPC: gss_verify_mic_spkm3 calling spkm3_read_token\n");
13919 + maj_stat = spkm3_read_token(sctx, checksum, signbuf, &qop_state,
13922 + if (!maj_stat && qop_state)
13923 + *qstate = qop_state;
13925 + dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat);
13930 +gss_get_mic_spkm3(struct gss_ctx *ctx,
13932 + struct xdr_buf *message_buffer,
13933 + struct xdr_netobj *message_token) {
13935 + struct spkm3_ctx *sctx = ctx->internal_ctx_id;
13937 + dprintk("RPC: gss_get_mic_spkm3\n");
13939 + err = spkm3_make_token(sctx, qop, message_buffer,
13940 + message_token, SPKM_MIC_TOK);
13944 +static struct gss_api_ops gss_spkm3_ops = {
13945 + .gss_import_sec_context = gss_import_sec_context_spkm3,
13946 + .gss_get_mic = gss_get_mic_spkm3,
13947 + .gss_verify_mic = gss_verify_mic_spkm3,
13948 + .gss_delete_sec_context = gss_delete_sec_context_spkm3,
13951 +static struct pf_desc gss_spkm3_pfs[] = {
13952 + {RPC_AUTH_GSS_SPKM, 0, RPC_GSS_SVC_NONE, "spkm3"},
13953 + {RPC_AUTH_GSS_SPKMI, 0, RPC_GSS_SVC_INTEGRITY, "spkm3i"},
13956 +static struct gss_api_mech gss_spkm3_mech = {
13957 + .gm_name = "spkm3",
13958 + .gm_owner = THIS_MODULE,
13959 + .gm_ops = &gss_spkm3_ops,
13960 + .gm_pf_num = ARRAY_SIZE(gss_spkm3_pfs),
13961 + .gm_pfs = gss_spkm3_pfs,
13964 +static int __init init_spkm3_module(void)
13968 + status = gss_mech_register(&gss_spkm3_mech);
13970 + printk("Failed to register spkm3 gss mechanism!\n");
13974 +static void __exit cleanup_spkm3_module(void)
13976 + gss_mech_unregister(&gss_spkm3_mech);
13979 +MODULE_LICENSE("GPL");
13980 +module_init(init_spkm3_module);
13981 +module_exit(cleanup_spkm3_module);
13982 --- linux-2.6.7/net/sunrpc/auth_gss/gss_krb5_crypto.c.lsec 2004-06-15 23:18:55.000000000 -0600
13983 +++ linux-2.6.7/net/sunrpc/auth_gss/gss_krb5_crypto.c 2005-03-23 14:28:24.840273400 -0700
13984 @@ -139,17 +139,91 @@ buf_to_sg(struct scatterlist *sg, char *
13989 +process_xdr_buf(struct xdr_buf *buf, int offset, int len,
13990 + int (*actor)(struct scatterlist *, void *), void *data)
13992 + int i, page_len, thislen, page_offset, ret = 0;
13993 + struct scatterlist sg[1];
13995 + if (offset >= buf->head[0].iov_len) {
13996 + offset -= buf->head[0].iov_len;
13998 + thislen = buf->head[0].iov_len - offset;
13999 + if (thislen > len)
14001 + buf_to_sg(sg, buf->head[0].iov_base + offset, thislen);
14002 + ret = actor(sg, data);
14011 + if (offset >= buf->page_len) {
14012 + offset -= buf->page_len;
14014 + page_len = buf->page_len - offset;
14015 + if (page_len > len)
14018 + page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
14019 + i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
14020 + thislen = PAGE_CACHE_SIZE - page_offset;
14022 + if (thislen > page_len)
14023 + thislen = page_len;
14024 + sg->page = buf->pages[i];
14025 + sg->offset = page_offset;
14026 + sg->length = thislen;
14027 + ret = actor(sg, data);
14030 + page_len -= thislen;
14033 + thislen = PAGE_CACHE_SIZE;
14034 + } while (page_len != 0);
14040 + if (offset < buf->tail[0].iov_len) {
14041 + thislen = buf->tail[0].iov_len - offset;
14042 + if (thislen > len)
14044 + buf_to_sg(sg, buf->tail[0].iov_base + offset, thislen);
14045 + ret = actor(sg, data);
14055 +checksummer(struct scatterlist *sg, void *data)
14057 + struct crypto_tfm *tfm = (struct crypto_tfm *)data;
14059 + crypto_digest_update(tfm, sg, 1);
14064 /* checksum the plaintext data and hdrlen bytes of the token header */
14066 make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
14067 - struct xdr_netobj *cksum)
14068 + int body_offset, struct xdr_netobj *cksum)
14071 struct crypto_tfm *tfm = NULL; /* XXX add to ctx? */
14072 struct scatterlist sg[1];
14073 u32 code = GSS_S_FAILURE;
14074 - int len, thislen, offset;
14077 switch (cksumtype) {
14078 case CKSUMTYPE_RSA_MD5:
14079 @@ -169,35 +243,8 @@ make_checksum(s32 cksumtype, char *heade
14080 crypto_digest_init(tfm);
14081 buf_to_sg(sg, header, hdrlen);
14082 crypto_digest_update(tfm, sg, 1);
14083 - if (body->head[0].iov_len) {
14084 - buf_to_sg(sg, body->head[0].iov_base, body->head[0].iov_len);
14085 - crypto_digest_update(tfm, sg, 1);
14088 - len = body->page_len;
14090 - offset = body->page_base & (PAGE_CACHE_SIZE - 1);
14091 - i = body->page_base >> PAGE_CACHE_SHIFT;
14092 - thislen = PAGE_CACHE_SIZE - offset;
14094 - if (thislen > len)
14096 - sg->page = body->pages[i];
14097 - sg->offset = offset;
14098 - sg->length = thislen;
14099 - kmap(sg->page); /* XXX kmap_atomic? */
14100 - crypto_digest_update(tfm, sg, 1);
14101 - kunmap(sg->page);
14105 - thislen = PAGE_CACHE_SIZE;
14106 - } while(len != 0);
14108 - if (body->tail[0].iov_len) {
14109 - buf_to_sg(sg, body->tail[0].iov_base, body->tail[0].iov_len);
14110 - crypto_digest_update(tfm, sg, 1);
14112 + process_xdr_buf(body, body_offset, body->len - body_offset,
14113 + checksummer, tfm);
14114 crypto_digest_final(tfm, cksum->data);
14117 @@ -207,3 +254,154 @@ out:
14120 EXPORT_SYMBOL(make_checksum);
14122 +struct encryptor_desc {
14123 + u8 iv[8]; /* XXX hard-coded blocksize */
14124 + struct crypto_tfm *tfm;
14126 + struct xdr_buf *outbuf;
14127 + struct page **pages;
14128 + struct scatterlist infrags[4];
14129 + struct scatterlist outfrags[4];
14135 +encryptor(struct scatterlist *sg, void *data)
14137 + struct encryptor_desc *desc = data;
14138 + struct xdr_buf *outbuf = desc->outbuf;
14139 + struct page *in_page;
14140 + int thislen = desc->fraglen + sg->length;
14141 + int fraglen, ret;
14144 + /* Worst case is 4 fragments: head, end of page 1, start
14145 + * of page 2, tail. Anything more is a bug. */
14146 + BUG_ON(desc->fragno > 3);
14147 + desc->infrags[desc->fragno] = *sg;
14148 + desc->outfrags[desc->fragno] = *sg;
14150 + page_pos = desc->pos - outbuf->head[0].iov_len;
14151 + if (page_pos >= 0 && page_pos < outbuf->page_len) {
14152 + /* pages are not in place: */
14153 + int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
14154 + in_page = desc->pages[i];
14156 + in_page = sg->page;
14158 + desc->infrags[desc->fragno].page = in_page;
14160 + desc->fraglen += sg->length;
14161 + desc->pos += sg->length;
14163 + fraglen = thislen & 7; /* XXX hardcoded blocksize */
14164 + thislen -= fraglen;
14166 + if (thislen == 0)
14169 + ret = crypto_cipher_encrypt_iv(desc->tfm, desc->outfrags, desc->infrags,
14170 + thislen, desc->iv);
14174 + desc->outfrags[0].page = sg->page;
14175 + desc->outfrags[0].offset = sg->offset + sg->length - fraglen;
14176 + desc->outfrags[0].length = fraglen;
14177 + desc->infrags[0] = desc->outfrags[0];
14178 + desc->infrags[0].page = in_page;
14179 + desc->fragno = 1;
14180 + desc->fraglen = fraglen;
14182 + desc->fragno = 0;
14183 + desc->fraglen = 0;
14189 +gss_encrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset,
14190 + struct page **pages)
14193 + struct encryptor_desc desc;
14195 + BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0);
14197 + memset(desc.iv, 0, sizeof(desc.iv));
14199 + desc.pos = offset;
14200 + desc.outbuf = buf;
14201 + desc.pages = pages;
14203 + desc.fraglen = 0;
14205 + ret = process_xdr_buf(buf, offset, buf->len - offset, encryptor, &desc);
14209 +EXPORT_SYMBOL(gss_encrypt_xdr_buf);
14211 +struct decryptor_desc {
14212 + u8 iv[8]; /* XXX hard-coded blocksize */
14213 + struct crypto_tfm *tfm;
14214 + struct scatterlist frags[4];
14220 +decryptor(struct scatterlist *sg, void *data)
14222 + struct decryptor_desc *desc = data;
14223 + int thislen = desc->fraglen + sg->length;
14224 + int fraglen, ret;
14226 + /* Worst case is 4 fragments: head, end of page 1, start
14227 + * of page 2, tail. Anything more is a bug. */
14228 + BUG_ON(desc->fragno > 3);
14229 + desc->frags[desc->fragno] = *sg;
14231 + desc->fraglen += sg->length;
14233 + fraglen = thislen & 7; /* XXX hardcoded blocksize */
14234 + thislen -= fraglen;
14236 + if (thislen == 0)
14239 + ret = crypto_cipher_decrypt_iv(desc->tfm, desc->frags, desc->frags,
14240 + thislen, desc->iv);
14244 + desc->frags[0].page = sg->page;
14245 + desc->frags[0].offset = sg->offset + sg->length - fraglen;
14246 + desc->frags[0].length = fraglen;
14247 + desc->fragno = 1;
14248 + desc->fraglen = fraglen;
14250 + desc->fragno = 0;
14251 + desc->fraglen = 0;
14257 +gss_decrypt_xdr_buf(struct crypto_tfm *tfm, struct xdr_buf *buf, int offset)
14259 + struct decryptor_desc desc;
14262 + BUG_ON((buf->len - offset) % crypto_tfm_alg_blocksize(tfm) != 0);
14264 + memset(desc.iv, 0, sizeof(desc.iv));
14267 + desc.fraglen = 0;
14268 + return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc);
14271 +EXPORT_SYMBOL(gss_decrypt_xdr_buf);
14272 --- linux-2.6.7/net/sunrpc/auth_gss/gss_spkm3_seal.c.lsec 2005-03-23 14:28:24.239364752 -0700
14273 +++ linux-2.6.7/net/sunrpc/auth_gss/gss_spkm3_seal.c 2005-03-23 14:28:24.238364904 -0700
14276 + * linux/net/sunrpc/gss_spkm3_seal.c
14278 + * Copyright (c) 2003 The Regents of the University of Michigan.
14279 + * All rights reserved.
14281 + * Andy Adamson <andros@umich.edu>
14283 + * Redistribution and use in source and binary forms, with or without
14284 + * modification, are permitted provided that the following conditions
14287 + * 1. Redistributions of source code must retain the above copyright
14288 + * notice, this list of conditions and the following disclaimer.
14289 + * 2. Redistributions in binary form must reproduce the above copyright
14290 + * notice, this list of conditions and the following disclaimer in the
14291 + * documentation and/or other materials provided with the distribution.
14292 + * 3. Neither the name of the University nor the names of its
14293 + * contributors may be used to endorse or promote products derived
14294 + * from this software without specific prior written permission.
14296 + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
14297 + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
14298 + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
14299 + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
14300 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
14301 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
14302 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
14303 + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
14304 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
14305 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
14306 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
14310 +#include <linux/types.h>
14311 +#include <linux/slab.h>
14312 +#include <linux/jiffies.h>
14313 +#include <linux/sunrpc/gss_spkm3.h>
14314 +#include <linux/random.h>
14315 +#include <linux/crypto.h>
14318 +# define RPCDBG_FACILITY RPCDBG_AUTH
14322 + * spkm3_make_token()
14324 + * Only SPKM_MIC_TOK with md5 intg-alg is supported
14328 +spkm3_make_token(struct spkm3_ctx *ctx, int qop_req,
14329 + struct xdr_buf * text, struct xdr_netobj * token,
14332 + s32 checksum_type;
14333 + char tokhdrbuf[25];
14334 + struct xdr_netobj md5cksum = {.len = 0, .data = NULL};
14335 + struct xdr_netobj mic_hdr = {.len = 0, .data = tokhdrbuf};
14336 + int tmsglen, tokenlen = 0;
14337 + unsigned char *ptr;
14339 + int ctxelen = 0, ctxzbit = 0;
14340 + int md5elen = 0, md5zbit = 0;
14342 + dprintk("RPC: spkm3_make_token\n");
14345 + if (qop_req != 0)
14348 + if (ctx->ctx_id.len != 16) {
14349 + dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n",
14350 + ctx->ctx_id.len);
14354 + switch (ctx->intg_alg) {
14356 + checksum_type = CKSUMTYPE_RSA_MD5;
14359 + dprintk("RPC: gss_spkm3_seal: ctx->signalg %d not"
14360 + " supported\n", ctx->intg_alg);
14363 + /* XXX since we don't support WRAP, perhaps we don't care... */
14364 + if (ctx->conf_alg != NID_cast5_cbc) {
14365 + dprintk("RPC: gss_spkm3_seal: ctx->sealalg %d not supported\n",
14370 + if (toktype == SPKM_MIC_TOK) {
14372 + /* Calculate checksum over the mic-header */
14373 + asn1_bitstring_len(&ctx->ctx_id, &ctxelen, &ctxzbit);
14374 + spkm3_mic_header(&mic_hdr.data, &mic_hdr.len, ctx->ctx_id.data,
14375 + ctxelen, ctxzbit);
14377 + if (make_checksum(checksum_type, mic_hdr.data, mic_hdr.len,
14378 + text, &md5cksum))
14381 + asn1_bitstring_len(&md5cksum, &md5elen, &md5zbit);
14382 + tokenlen = 10 + ctxelen + 1 + 2 + md5elen + 1;
14384 + /* Create token header using generic routines */
14385 + token->len = g_token_size(&ctx->mech_used, tokenlen + tmsglen);
14387 + ptr = token->data;
14388 + g_make_token_header(&ctx->mech_used, tokenlen + tmsglen, &ptr);
14390 + spkm3_make_mic_token(&ptr, tokenlen, &mic_hdr, &md5cksum, md5elen, md5zbit);
14391 + } else if (toktype == SPKM_WRAP_TOK) { /* Not Supported */
14392 + dprintk("RPC: gss_spkm3_seal: SPKM_WRAP_TOK not supported\n");
14395 + kfree(md5cksum.data);
14397 + /* XXX need to implement sequence numbers, and ctx->expired */
14399 + return GSS_S_COMPLETE;
14401 + if (md5cksum.data)
14402 + kfree(md5cksum.data);
14405 + return GSS_S_FAILURE;
14407 --- linux-2.6.7/net/sunrpc/auth_gss/svcauth_gss.c.lsec 2004-06-15 23:19:22.000000000 -0600
14408 +++ linux-2.6.7/net/sunrpc/auth_gss/svcauth_gss.c 2005-03-23 14:28:24.405339520 -0700
14413 +#include <asm/bitops.h>
14414 #include <linux/types.h>
14415 #include <linux/module.h>
14416 #include <linux/pagemap.h>
14417 @@ -78,7 +79,6 @@ struct rsi {
14419 static struct cache_head *rsi_table[RSI_HASHMAX];
14420 static struct cache_detail rsi_cache;
14421 -static struct rsi *rsi_lookup(struct rsi *item, int set);
14423 static void rsi_free(struct rsi *rsii)
14425 @@ -125,38 +125,6 @@ static inline int dup_netobj(struct xdr_
14426 return dup_to_netobj(dst, src->data, src->len);
14429 -static inline void rsi_init(struct rsi *new, struct rsi *item)
14431 - new->out_handle.data = NULL;
14432 - new->out_handle.len = 0;
14433 - new->out_token.data = NULL;
14434 - new->out_token.len = 0;
14435 - new->in_handle.len = item->in_handle.len;
14436 - item->in_handle.len = 0;
14437 - new->in_token.len = item->in_token.len;
14438 - item->in_token.len = 0;
14439 - new->in_handle.data = item->in_handle.data;
14440 - item->in_handle.data = NULL;
14441 - new->in_token.data = item->in_token.data;
14442 - item->in_token.data = NULL;
14445 -static inline void rsi_update(struct rsi *new, struct rsi *item)
14447 - BUG_ON(new->out_handle.data || new->out_token.data);
14448 - new->out_handle.len = item->out_handle.len;
14449 - item->out_handle.len = 0;
14450 - new->out_token.len = item->out_token.len;
14451 - item->out_token.len = 0;
14452 - new->out_handle.data = item->out_handle.data;
14453 - item->out_handle.data = NULL;
14454 - new->out_token.data = item->out_token.data;
14455 - item->out_token.data = NULL;
14457 - new->major_status = item->major_status;
14458 - new->minor_status = item->minor_status;
14461 static void rsi_request(struct cache_detail *cd,
14462 struct cache_head *h,
14463 char **bpp, int *blen)
14464 @@ -168,6 +136,75 @@ static void rsi_request(struct cache_det
14469 +gssd_reply(struct rsi *item)
14472 + struct cache_head **hp, **head;
14474 + head = &rsi_cache.hash_table[rsi_hash(item)];
14475 + write_lock(&rsi_cache.hash_lock);
14476 + for (hp = head; *hp != NULL; hp = &tmp->h.next) {
14477 + tmp = container_of(*hp, struct rsi, h);
14478 + if (rsi_match(tmp, item)) {
14479 + cache_get(&tmp->h);
14480 + clear_bit(CACHE_HASHED, &tmp->h.flags);
14481 + *hp = tmp->h.next;
14482 + tmp->h.next = NULL;
14483 + rsi_cache.entries--;
14484 + if (test_bit(CACHE_VALID, &tmp->h.flags)) {
14485 + write_unlock(&rsi_cache.hash_lock);
14486 + rsi_put(&tmp->h, &rsi_cache);
14489 + set_bit(CACHE_HASHED, &item->h.flags);
14490 + item->h.next = *hp;
14492 + rsi_cache.entries++;
14493 + set_bit(CACHE_VALID, &item->h.flags);
14494 + item->h.last_refresh = get_seconds();
14495 + write_unlock(&rsi_cache.hash_lock);
14496 + cache_fresh(&rsi_cache, &tmp->h, 0);
14497 + rsi_put(&tmp->h, &rsi_cache);
14501 + write_unlock(&rsi_cache.hash_lock);
14505 +static inline struct rsi *
14506 +gssd_upcall(struct rsi *item, struct svc_rqst *rqstp)
14509 + struct cache_head **hp, **head;
14511 + head = &rsi_cache.hash_table[rsi_hash(item)];
14512 + read_lock(&rsi_cache.hash_lock);
14513 + for (hp = head; *hp != NULL; hp = &tmp->h.next) {
14514 + tmp = container_of(*hp, struct rsi, h);
14515 + if (rsi_match(tmp, item)) {
14516 + if (!test_bit(CACHE_VALID, &tmp->h.flags)) {
14517 + read_unlock(&rsi_cache.hash_lock);
14520 + *hp = tmp->h.next;
14521 + tmp->h.next = NULL;
14522 + rsi_cache.entries--;
14523 + read_unlock(&rsi_cache.hash_lock);
14527 + cache_get(&item->h);
14528 + item->h.next = *head;
14529 + *head = &item->h;
14530 + rsi_cache.entries++;
14531 + read_unlock(&rsi_cache.hash_lock);
14532 + cache_get(&item->h);
14533 + if (cache_check(&rsi_cache, &item->h, &rqstp->rq_chandle))
14538 static int rsi_parse(struct cache_detail *cd,
14539 char *mesg, int mlen)
14540 @@ -176,17 +213,22 @@ static int rsi_parse(struct cache_detail
14544 - struct rsi rsii, *rsip = NULL;
14545 + struct rsi *rsii;
14547 int status = -EINVAL;
14549 - memset(&rsii, 0, sizeof(rsii));
14550 + rsii = kmalloc(sizeof(*rsii), GFP_KERNEL);
14553 + memset(rsii, 0, sizeof(*rsii));
14554 + cache_init(&rsii->h);
14557 len = qword_get(&mesg, buf, mlen);
14561 - if (dup_to_netobj(&rsii.in_handle, buf, len))
14562 + if (dup_to_netobj(&rsii->in_handle, buf, len))
14566 @@ -195,10 +237,9 @@ static int rsi_parse(struct cache_detail
14570 - if (dup_to_netobj(&rsii.in_token, buf, len))
14571 + if (dup_to_netobj(&rsii->in_token, buf, len))
14574 - rsii.h.flags = 0;
14576 expiry = get_expiry(&mesg);
14578 @@ -212,13 +253,13 @@ static int rsi_parse(struct cache_detail
14582 - rsii.major_status = simple_strtoul(buf, &ep, 10);
14583 + rsii->major_status = simple_strtoul(buf, &ep, 10);
14586 len = qword_get(&mesg, buf, mlen);
14589 - rsii.minor_status = simple_strtoul(buf, &ep, 10);
14590 + rsii->minor_status = simple_strtoul(buf, &ep, 10);
14594 @@ -227,7 +268,7 @@ static int rsi_parse(struct cache_detail
14598 - if (dup_to_netobj(&rsii.out_handle, buf, len))
14599 + if (dup_to_netobj(&rsii->out_handle, buf, len))
14603 @@ -236,16 +277,14 @@ static int rsi_parse(struct cache_detail
14607 - if (dup_to_netobj(&rsii.out_token, buf, len))
14608 + if (dup_to_netobj(&rsii->out_token, buf, len))
14611 - rsii.h.expiry_time = expiry;
14612 - rsip = rsi_lookup(&rsii, 1);
14614 + rsii->h.expiry_time = expiry;
14615 + status = gssd_reply(rsii);
14619 - rsi_put(&rsip->h, &rsi_cache);
14621 + rsi_put(&rsii->h, &rsi_cache);
14625 @@ -258,8 +297,6 @@ static struct cache_detail rsi_cache = {
14626 .cache_parse = rsi_parse,
14629 -static DefineSimpleCacheLookup(rsi, 0)
14632 * The rpcsec_context cache is used to store a context that is
14633 * used in data exchange.
14634 @@ -292,7 +329,6 @@ struct rsc {
14636 static struct cache_head *rsc_table[RSC_HASHMAX];
14637 static struct cache_detail rsc_cache;
14638 -static struct rsc *rsc_lookup(struct rsc *item, int set);
14640 static void rsc_free(struct rsc *rsci)
14642 @@ -325,26 +361,44 @@ rsc_match(struct rsc *new, struct rsc *t
14643 return netobj_equal(&new->handle, &tmp->handle);
14646 -static inline void
14647 -rsc_init(struct rsc *new, struct rsc *tmp)
14648 +static struct rsc *rsc_lookup(struct rsc *item, int set)
14650 - new->handle.len = tmp->handle.len;
14651 - tmp->handle.len = 0;
14652 - new->handle.data = tmp->handle.data;
14653 - tmp->handle.data = NULL;
14654 - new->mechctx = NULL;
14655 - new->cred.cr_group_info = NULL;
14658 -static inline void
14659 -rsc_update(struct rsc *new, struct rsc *tmp)
14661 - new->mechctx = tmp->mechctx;
14662 - tmp->mechctx = NULL;
14663 - memset(&new->seqdata, 0, sizeof(new->seqdata));
14664 - spin_lock_init(&new->seqdata.sd_lock);
14665 - new->cred = tmp->cred;
14666 - tmp->cred.cr_group_info = NULL;
14667 + struct rsc *tmp = NULL;
14668 + struct cache_head **hp, **head;
14669 + head = &rsc_cache.hash_table[rsc_hash(item)];
14672 + write_lock(&rsc_cache.hash_lock);
14674 + read_lock(&rsc_cache.hash_lock);
14675 + for (hp = head; *hp != NULL; hp = &tmp->h.next) {
14676 + tmp = container_of(*hp, struct rsc, h);
14677 + if (!rsc_match(tmp, item))
14679 + cache_get(&tmp->h);
14682 + *hp = tmp->h.next;
14683 + tmp->h.next = NULL;
14684 + clear_bit(CACHE_HASHED, &tmp->h.flags);
14685 + rsc_put(&tmp->h, &rsc_cache);
14688 + /* Didn't find anything */
14691 + rsc_cache.entries++;
14693 + set_bit(CACHE_HASHED, &item->h.flags);
14694 + item->h.next = *head;
14695 + *head = &item->h;
14696 + write_unlock(&rsc_cache.hash_lock);
14697 + cache_fresh(&rsc_cache, &item->h, item->h.expiry_time);
14698 + cache_get(&item->h);
14701 + read_unlock(&rsc_cache.hash_lock);
14705 static int rsc_parse(struct cache_detail *cd,
14706 @@ -353,19 +407,22 @@ static int rsc_parse(struct cache_detail
14707 /* contexthandle expiry [ uid gid N <n gids> mechname ...mechdata... ] */
14710 - struct rsc rsci, *rscp = NULL;
14711 + struct rsc *rsci, *res = NULL;
14713 int status = -EINVAL;
14715 - memset(&rsci, 0, sizeof(rsci));
14716 + rsci = kmalloc(sizeof(*rsci), GFP_KERNEL);
14719 + memset(rsci, 0, sizeof(*rsci));
14720 + cache_init(&rsci->h);
14721 /* context handle */
14722 len = qword_get(&mesg, buf, mlen);
14723 if (len < 0) goto out;
14725 - if (dup_to_netobj(&rsci.handle, buf, len))
14726 + if (dup_to_netobj(&rsci->handle, buf, len))
14729 - rsci.h.flags = 0;
14731 expiry = get_expiry(&mesg);
14733 @@ -373,26 +430,26 @@ static int rsc_parse(struct cache_detail
14736 /* uid, or NEGATIVE */
14737 - rv = get_int(&mesg, &rsci.cred.cr_uid);
14738 + rv = get_int(&mesg, &rsci->cred.cr_uid);
14742 - set_bit(CACHE_NEGATIVE, &rsci.h.flags);
14743 + set_bit(CACHE_NEGATIVE, &rsci->h.flags);
14746 struct gss_api_mech *gm;
14747 struct xdr_netobj tmp_buf;
14750 - if (get_int(&mesg, &rsci.cred.cr_gid))
14751 + if (get_int(&mesg, &rsci->cred.cr_gid))
14754 /* number of additional gid's */
14755 if (get_int(&mesg, &N))
14758 - rsci.cred.cr_group_info = groups_alloc(N);
14759 - if (rsci.cred.cr_group_info == NULL)
14760 + rsci->cred.cr_group_info = groups_alloc(N);
14761 + if (rsci->cred.cr_group_info == NULL)
14765 @@ -401,7 +458,7 @@ static int rsc_parse(struct cache_detail
14767 if (get_int(&mesg, &gid))
14769 - GROUP_AT(rsci.cred.cr_group_info, i) = gid;
14770 + GROUP_AT(rsci->cred.cr_group_info, i) = gid;
14774 @@ -422,19 +479,21 @@ static int rsc_parse(struct cache_detail
14777 tmp_buf.data = buf;
14778 - if (gss_import_sec_context(&tmp_buf, gm, &rsci.mechctx)) {
14779 + if (gss_import_sec_context(&tmp_buf, gm, &rsci->mechctx)) {
14785 - rsci.h.expiry_time = expiry;
14786 - rscp = rsc_lookup(&rsci, 1);
14787 + rsci->h.expiry_time = expiry;
14788 + spin_lock_init(&rsci->seqdata.sd_lock);
14789 + res = rsc_lookup(rsci, 1);
14790 + rsc_put(&res->h, &rsc_cache);
14796 - rsc_put(&rscp->h, &rsc_cache);
14798 + rsc_put(&rsci->h, &rsc_cache);
14802 @@ -446,19 +505,14 @@ static struct cache_detail rsc_cache = {
14803 .cache_parse = rsc_parse,
14806 -static DefineSimpleCacheLookup(rsc, 0);
14809 gss_svc_searchbyctx(struct xdr_netobj *handle)
14814 - memset(&rsci, 0, sizeof(rsci));
14815 - if (dup_to_netobj(&rsci.handle, handle->data, handle->len))
14817 + rsci.handle = *handle;
14818 found = rsc_lookup(&rsci, 0);
14822 if (cache_check(&rsc_cache, &found->h, NULL))
14823 @@ -643,7 +697,6 @@ svcauth_gss_register_pseudoflavor(u32 ps
14826 cache_init(&new->h.h);
14827 - atomic_inc(&new->h.h.refcnt);
14828 new->h.name = kmalloc(strlen(name) + 1, GFP_KERNEL);
14831 @@ -651,7 +704,6 @@ svcauth_gss_register_pseudoflavor(u32 ps
14832 new->h.flavour = RPC_AUTH_GSS;
14833 new->pseudoflavor = pseudoflavor;
14834 new->h.h.expiry_time = NEVER;
14835 - new->h.h.flags = 0;
14837 test = auth_domain_lookup(&new->h, 1);
14838 if (test == &new->h) {
14839 @@ -723,6 +775,45 @@ out:
14844 +unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
14846 + int stat = -EINVAL;
14849 + u32 priv_len, maj_stat;
14852 + lenp = buf->head[0].iov_base;
14853 + priv_len = ntohl(svc_getu32(&buf->head[0]));
14854 + if (priv_len > buf->len) /* XXXJBF: wrong check */
14856 + /* XXXJBF: bizarre hack: to handle revisits (and not decrypt
14857 + * twice), the first time through we write an offset
14858 + * telling us where to skip to find the already-decrypted data */
14859 + if (rqstp->rq_deferred) {
14860 + buf->head[0].iov_base += priv_len;
14861 + buf->head[0].iov_len -= priv_len;
14864 + saved_len = buf->len; /* XXX HACK */
14865 + buf->len = priv_len;
14866 + maj_stat = gss_unwrap(ctx, GSS_C_QOP_DEFAULT, 0, buf, &out_offset);
14867 + buf->len = saved_len;
14868 + buf->head[0].iov_base += out_offset;
14869 + buf->head[0].iov_len -= out_offset;
14870 + BUG_ON(buf->head[0].iov_len <= 0);
14871 + if (maj_stat != GSS_S_COMPLETE)
14873 + if (ntohl(svc_getu32(&buf->head[0])) != seq)
14875 + /* XXXJBF: see "bizarre hack", above. */
14876 + *lenp = htonl(out_offset + 4);
14882 struct gss_svc_data {
14883 /* decoded gss client cred: */
14884 struct rpc_gss_wire_cred clcred;
14885 @@ -750,7 +841,7 @@ svcauth_gss_accept(struct svc_rqst *rqst
14886 struct gss_svc_data *svcdata = rqstp->rq_auth_data;
14887 struct rpc_gss_wire_cred *gc;
14888 struct rsc *rsci = NULL;
14889 - struct rsi *rsip, rsikey;
14890 + struct rsi *rsip, *rsikey = NULL;
14892 u32 *reject_stat = resv->iov_base + resv->iov_len;
14894 @@ -843,30 +934,23 @@ svcauth_gss_accept(struct svc_rqst *rqst
14895 *authp = rpc_autherr_badcred;
14896 if (gc->gc_proc == RPC_GSS_PROC_INIT && gc->gc_ctx.len != 0)
14898 - memset(&rsikey, 0, sizeof(rsikey));
14899 - if (dup_netobj(&rsikey.in_handle, &gc->gc_ctx))
14900 + rsikey = kmalloc(sizeof(*rsikey), GFP_KERNEL);
14903 + memset(rsikey, 0, sizeof(*rsikey));
14904 + cache_init(&rsikey->h);
14905 + if (dup_netobj(&rsikey->in_handle, &gc->gc_ctx))
14907 *authp = rpc_autherr_badverf;
14908 - if (svc_safe_getnetobj(argv, &tmpobj)) {
14909 - kfree(rsikey.in_handle.data);
14910 + if (svc_safe_getnetobj(argv, &tmpobj))
14913 - if (dup_netobj(&rsikey.in_token, &tmpobj)) {
14914 - kfree(rsikey.in_handle.data);
14915 + if (dup_netobj(&rsikey->in_token, &tmpobj))
14919 - rsip = rsi_lookup(&rsikey, 0);
14920 - rsi_free(&rsikey);
14924 - switch(cache_check(&rsi_cache, &rsip->h, &rqstp->rq_chandle)) {
14926 + rsip = gssd_upcall(rsikey, rqstp);
14933 rsci = gss_svc_searchbyctx(&rsip->out_handle);
14936 @@ -921,7 +1005,16 @@ svcauth_gss_accept(struct svc_rqst *rqst
14937 svc_putu32(resv, 0);
14939 case RPC_GSS_SVC_PRIVACY:
14940 - /* currently unsupported */
14941 + if (unwrap_priv_data(rqstp, &rqstp->rq_arg,
14942 + gc->gc_seq, rsci->mechctx))
14944 + svcdata->rsci = rsci;
14945 + cache_get(&rsci->h);
14946 + /* placeholders for length and seq. number: */
14947 + svcdata->body_start = resv->iov_base + resv->iov_len;
14948 + svc_putu32(resv, 0);
14949 + svc_putu32(resv, 0);
14954 @@ -939,13 +1032,15 @@ complete:
14959 + rsi_put(&rsikey->h, &rsi_cache);
14961 rsc_put(&rsci->h, &rsc_cache);
14966 -svcauth_gss_release(struct svc_rqst *rqstp)
14968 +svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
14970 struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
14971 struct rpc_gss_wire_cred *gc = &gsd->clcred;
14972 @@ -957,6 +1052,156 @@ svcauth_gss_release(struct svc_rqst *rqs
14973 int integ_offset, integ_len;
14974 int stat = -EINVAL;
14976 + p = gsd->body_start;
14977 + gsd->body_start = 0;
14978 + /* move accept_stat to right place: */
14979 + memcpy(p, p + 2, 4);
14980 + /* Don't wrap in failure case: */
14981 + /* Counting on not getting here if call was not even accepted! */
14982 + if (*p != rpc_success) {
14983 + resbuf->head[0].iov_len -= 2 * 4;
14987 + integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base;
14988 + integ_len = resbuf->len - integ_offset;
14989 + BUG_ON(integ_len % 4);
14990 + *p++ = htonl(integ_len);
14991 + *p++ = htonl(gc->gc_seq);
14992 + if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset,
14995 + if (resbuf->page_len == 0
14996 + && resbuf->tail[0].iov_len + RPC_MAX_AUTH_SIZE
14998 + BUG_ON(resbuf->tail[0].iov_len);
14999 + /* Use head for everything */
15000 + resv = &resbuf->head[0];
15001 + } else if (resbuf->tail[0].iov_base == NULL) {
15002 + /* copied from nfsd4_encode_read */
15003 + svc_take_page(rqstp);
15004 + resbuf->tail[0].iov_base = page_address(rqstp
15005 + ->rq_respages[rqstp->rq_resused-1]);
15006 + rqstp->rq_restailpage = rqstp->rq_resused-1;
15007 + resbuf->tail[0].iov_len = 0;
15008 + resv = &resbuf->tail[0];
15010 + resv = &resbuf->tail[0];
15012 + mic.data = (u8 *)resv->iov_base + resv->iov_len + 4;
15013 + if (gss_get_mic(gsd->rsci->mechctx, 0, &integ_buf, &mic))
15015 + svc_putu32(resv, htonl(mic.len));
15016 + memset(mic.data + mic.len, 0,
15017 + round_up_to_quad(mic.len) - mic.len);
15018 + resv->iov_len += XDR_QUADLEN(mic.len) << 2;
15019 + /* not strictly required: */
15020 + resbuf->len += XDR_QUADLEN(mic.len) << 2;
15021 + BUG_ON(resv->iov_len > PAGE_SIZE);
15028 +/* XXXJBF: Look for chances to share code with client */
15029 +/* XXXJBF: Do we need to preallocate these pages somehow? E.g. see
15030 + * buffer size calculations in svcsock.c */
15031 +/* XXXJBF: how does reference counting on pages work? */
15032 +static struct page **
15033 +svc_alloc_enc_pages(struct xdr_buf *buf)
15035 + struct page **ret;
15038 + if (buf->page_len == 0)
15040 + BUG_ON(buf->page_base >> PAGE_CACHE_SHIFT);
15041 + last = (buf->page_base + buf->page_len - 1) >> PAGE_CACHE_SHIFT;
15042 + ret = kmalloc((last + 1) * sizeof(struct page *), GFP_KERNEL);
15045 + for (i = 0; i<= last; i++) {
15046 + ret[i] = alloc_page(GFP_KERNEL);
15047 + if (ret[i] == NULL)
15053 + for (i--; i >= 0; i--) {
15054 + __free_page(ret[i]);
15060 +svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
15062 + struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
15063 + struct rpc_gss_wire_cred *gc = &gsd->clcred;
15064 + struct xdr_buf *resbuf = &rqstp->rq_res;
15065 + struct page **inpages;
15067 + int offset, *len;
15069 + int stat = -EINVAL;
15071 + p = gsd->body_start;
15072 + gsd->body_start = 0;
15073 + /* move accept_stat to right place: */
15074 + memcpy(p, p + 2, 4);
15075 + /* Don't wrap in failure case: */
15076 + /* Counting on not getting here if call was not even accepted! */
15077 + if (*p != rpc_success) {
15078 + resbuf->head[0].iov_len -= 2 * 4;
15083 + offset = (u8 *)p - (u8 *)resbuf->head[0].iov_base;
15084 + *p++ = htonl(gc->gc_seq);
15086 + inpages = resbuf->pages;
15087 + /* XXXJBF: huge memory leaks here: allocated pages probably aren't
15088 + * freed, and neither is memory used to hold page array. */
15089 + resbuf->pages = svc_alloc_enc_pages(resbuf);
15090 + if (resbuf->page_len && !resbuf->pages)
15091 + goto out_err; /* XXX sleep and retry? Reserve ahead of time
15093 + if (resbuf->tail[0].iov_len == 0 || resbuf->tail[0].iov_base == NULL) {
15094 + /* copied from nfsd4_encode_read */
15095 + {int i = svc_take_page(rqstp); BUG_ON(i); }
15096 + resbuf->tail[0].iov_base = page_address(rqstp
15097 + ->rq_respages[rqstp->rq_resused-1]);
15098 + rqstp->rq_restailpage = rqstp->rq_resused-1;
15099 + resbuf->tail[0].iov_len = 0;
15101 + /* XXX: Will svc code attempt to free stuff in xdr_buf->pages?
15102 + * Or can we leave it in any old state on error?? */
15104 + if (gss_wrap(gsd->rsci->mechctx, GSS_C_QOP_DEFAULT, offset,
15105 + resbuf, inpages))
15107 + *len = htonl(resbuf->len - offset);
15108 + pad = 3 - ((resbuf->len - offset - 1)&3);
15109 + p = (u32 *)(resbuf->tail[0].iov_base + resbuf->tail[0].iov_len);
15110 + memset(p, 0, pad);
15111 + resbuf->tail[0].iov_len += pad;
15119 +svcauth_gss_release(struct svc_rqst *rqstp)
15121 + struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
15122 + struct rpc_gss_wire_cred *gc = &gsd->clcred;
15123 + struct xdr_buf *resbuf = &rqstp->rq_res;
15124 + int stat = -EINVAL;
15126 if (gc->gc_proc != RPC_GSS_PROC_DATA)
15128 /* Release can be called twice, but we only wrap once. */
15129 @@ -969,55 +1214,15 @@ svcauth_gss_release(struct svc_rqst *rqs
15130 case RPC_GSS_SVC_NONE:
15132 case RPC_GSS_SVC_INTEGRITY:
15133 - p = gsd->body_start;
15134 - gsd->body_start = 0;
15135 - /* move accept_stat to right place: */
15136 - memcpy(p, p + 2, 4);
15137 - /* don't wrap in failure case: */
15138 - /* Note: counting on not getting here if call was not even
15140 - if (*p != rpc_success) {
15141 - resbuf->head[0].iov_len -= 2 * 4;
15145 - integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base;
15146 - integ_len = resbuf->len - integ_offset;
15147 - BUG_ON(integ_len % 4);
15148 - *p++ = htonl(integ_len);
15149 - *p++ = htonl(gc->gc_seq);
15150 - if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset,
15153 - if (resbuf->page_len == 0
15154 - && resbuf->tail[0].iov_len + RPC_MAX_AUTH_SIZE
15156 - BUG_ON(resbuf->tail[0].iov_len);
15157 - /* Use head for everything */
15158 - resv = &resbuf->head[0];
15159 - } else if (resbuf->tail[0].iov_base == NULL) {
15160 - /* copied from nfsd4_encode_read */
15161 - svc_take_page(rqstp);
15162 - resbuf->tail[0].iov_base = page_address(rqstp
15163 - ->rq_respages[rqstp->rq_resused-1]);
15164 - rqstp->rq_restailpage = rqstp->rq_resused-1;
15165 - resbuf->tail[0].iov_len = 0;
15166 - resv = &resbuf->tail[0];
15168 - resv = &resbuf->tail[0];
15170 - mic.data = (u8 *)resv->iov_base + resv->iov_len + 4;
15171 - if (gss_get_mic(gsd->rsci->mechctx, 0, &integ_buf, &mic))
15172 + stat = svcauth_gss_wrap_resp_integ(rqstp);
15175 - svc_putu32(resv, htonl(mic.len));
15176 - memset(mic.data + mic.len, 0,
15177 - round_up_to_quad(mic.len) - mic.len);
15178 - resv->iov_len += XDR_QUADLEN(mic.len) << 2;
15179 - /* not strictly required: */
15180 - resbuf->len += XDR_QUADLEN(mic.len) << 2;
15181 - BUG_ON(resv->iov_len > PAGE_SIZE);
15183 case RPC_GSS_SVC_PRIVACY:
15184 + stat = svcauth_gss_wrap_resp_priv(rqstp);
15191 --- linux-2.6.7/net/sunrpc/auth_gss/gss_krb5_wrap.c.lsec 2005-03-23 14:28:24.900264280 -0700
15192 +++ linux-2.6.7/net/sunrpc/auth_gss/gss_krb5_wrap.c 2005-03-23 14:28:24.900264280 -0700
15194 +#include <linux/types.h>
15195 +#include <linux/slab.h>
15196 +#include <linux/jiffies.h>
15197 +#include <linux/sunrpc/gss_krb5.h>
15198 +#include <linux/random.h>
15199 +#include <linux/pagemap.h>
15200 +#include <asm/scatterlist.h>
15201 +#include <linux/crypto.h>
15204 +# define RPCDBG_FACILITY RPCDBG_AUTH
15208 +gss_krb5_padding(int blocksize, int length)
15210 + /* Most of the code is block-size independent but currently we
15212 + BUG_ON(blocksize != 8);
15213 + return 8 - (length & 7);
15216 +static inline void
15217 +gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
15219 + int padding = gss_krb5_padding(blocksize, buf->len - offset);
15221 + struct iovec *iov;
15223 + if (buf->page_len || buf->tail[0].iov_len)
15224 + iov = &buf->tail[0];
15226 + iov = &buf->head[0];
15227 + p = iov->iov_base + iov->iov_len;
15228 + iov->iov_len += padding;
15229 + buf->len += padding;
15230 + memset(p, padding, padding);
15234 +gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
15238 + int len = buf->len;
15240 + if (len <= buf->head[0].iov_len) {
15241 + pad = *(u8 *)(buf->head[0].iov_base + len - 1);
15244 + len -= buf->head[0].iov_len;
15245 + if (len <= buf->page_len) {
15246 + int last = (buf->page_base + len - 1)
15247 + >>PAGE_CACHE_SHIFT;
15248 + int offset = (buf->page_base + len - 1)
15249 + & (PAGE_CACHE_SIZE - 1);
15250 + ptr = kmap_atomic(buf->pages[last], KM_SKB_SUNRPC_DATA);
15251 + pad = *(ptr + offset);
15252 + kunmap_atomic(ptr, KM_SKB_SUNRPC_DATA);
15255 + len -= buf->page_len;
15256 + BUG_ON(len > buf->tail[0].iov_len);
15257 + pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
15259 + if (pad > blocksize)
15265 +static inline void
15266 +make_confounder(char *p, int blocksize)
15268 + /* XXX? Is this OK to do on every packet? */
15269 + get_random_bytes(p, blocksize);
15272 +/* Assumptions: the head and tail of inbuf are ours to play with.
15273 + * The pages, however, may be real pages in the page cache and we replace
15274 + * them with scratch pages from **pages before writing to them. */
15275 +/* XXX: obviously the above should be documentation of wrap interface,
15276 + * and shouldn't be in this kerberos-specific file. */
15278 +/* XXX factor out common code with seal/unseal. */
15281 +gss_wrap_kerberos(struct gss_ctx *ctx, u32 qop, int offset,
15282 + struct xdr_buf *buf, struct page **pages)
15284 + struct krb5_ctx *kctx = ctx->internal_ctx_id;
15285 + s32 checksum_type;
15286 + struct xdr_netobj md5cksum = {.len = 0, .data = NULL};
15287 + int blocksize = 0, plainlen;
15288 + unsigned char *ptr, *krb5_hdr, *msg_start;
15291 + struct page **tmp_pages;
15294 + dprintk("RPC: gss_wrap_kerberos\n");
15296 + now = get_seconds();
15301 + switch (kctx->signalg) {
15302 + case SGN_ALG_DES_MAC_MD5:
15303 + checksum_type = CKSUMTYPE_RSA_MD5;
15306 + dprintk("RPC: gss_krb5_seal: kctx->signalg %d not"
15307 + " supported\n", kctx->signalg);
15310 + if (kctx->sealalg != SEAL_ALG_NONE && kctx->sealalg != SEAL_ALG_DES) {
15311 + dprintk("RPC: gss_krb5_seal: kctx->sealalg %d not supported\n",
15316 + blocksize = crypto_tfm_alg_blocksize(kctx->enc);
15317 + gss_krb5_add_padding(buf, offset, blocksize);
15318 + BUG_ON((buf->len - offset) % blocksize);
15319 + plainlen = blocksize + buf->len - offset;
15321 + headlen = g_token_size(&kctx->mech_used, 22 + plainlen) -
15322 + (buf->len - offset);
15324 + ptr = buf->head[0].iov_base + offset;
15325 + /* shift data to make room for header. */
15326 + /* XXX Would be cleverer to encrypt while copying. */
15327 + /* XXX bounds checking, slack, etc. */
15328 + memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset);
15329 + buf->head[0].iov_len += headlen;
15330 + buf->len += headlen;
15331 + BUG_ON((buf->len - offset - headlen) % blocksize);
15333 + g_make_token_header(&kctx->mech_used, 22 + plainlen, &ptr);
15336 + *ptr++ = (unsigned char) ((KG_TOK_WRAP_MSG>>8)&0xff);
15337 + *ptr++ = (unsigned char) (KG_TOK_WRAP_MSG&0xff);
15339 + /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */
15340 + krb5_hdr = ptr - 2;
15341 + msg_start = krb5_hdr + 24;
15342 + /* XXXJBF: */ BUG_ON(buf->head[0].iov_base + offset + headlen != msg_start + blocksize);
15344 + *(u16 *)(krb5_hdr + 2) = htons(kctx->signalg);
15345 + memset(krb5_hdr + 4, 0xff, 4);
15346 + *(u16 *)(krb5_hdr + 4) = htons(kctx->sealalg);
15348 + make_confounder(msg_start, blocksize);
15350 + /* XXXJBF: UGH!: */
15351 + tmp_pages = buf->pages;
15352 + buf->pages = pages;
15353 + if (make_checksum(checksum_type, krb5_hdr, 8, buf,
15354 + offset + headlen - blocksize, &md5cksum))
15356 + buf->pages = tmp_pages;
15358 + switch (kctx->signalg) {
15359 + case SGN_ALG_DES_MAC_MD5:
15360 + if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
15361 + md5cksum.data, md5cksum.len))
15363 + memcpy(krb5_hdr + 16,
15364 + md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
15365 + KRB5_CKSUM_LENGTH);
15367 + dprintk("RPC: make_seal_token: cksum data: \n");
15368 + print_hexl((u32 *) (krb5_hdr + 16), KRB5_CKSUM_LENGTH, 0);
15374 + kfree(md5cksum.data);
15376 + spin_lock(&krb5_seq_lock);
15377 + seq_send = kctx->seq_send++;
15378 + spin_unlock(&krb5_seq_lock);
15380 + /* XXX would probably be more efficient to compute checksum
15381 + * and encrypt at the same time: */
15382 + if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff,
15383 + seq_send, krb5_hdr + 16, krb5_hdr + 8)))
15386 + if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize,
15390 + return ((kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE);
15392 + if (md5cksum.data) kfree(md5cksum.data);
15393 + return GSS_S_FAILURE;
15397 +gss_unwrap_kerberos(struct gss_ctx *ctx, u32 *qop, int offset,
15398 + struct xdr_buf *buf, int *out_offset)
15400 + struct krb5_ctx *kctx = ctx->internal_ctx_id;
15403 + s32 checksum_type;
15404 + struct xdr_netobj md5cksum = {.len = 0, .data = NULL};
15408 + unsigned char *ptr;
15410 + u32 ret = GSS_S_DEFECTIVE_TOKEN;
15414 + dprintk("RPC: gss_unwrap_kerberos\n");
15416 + ptr = (u8 *)buf->head[0].iov_base + offset;
15417 + if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
15418 + buf->len - offset))
15421 + if ((*ptr++ != ((KG_TOK_WRAP_MSG>>8)&0xff)) ||
15422 + (*ptr++ != (KG_TOK_WRAP_MSG &0xff)) )
15425 + /* XXX sanity-check bodysize?? */
15427 + /* get the sign and seal algorithms */
15429 + signalg = ptr[0] + (ptr[1] << 8);
15430 + sealalg = ptr[2] + (ptr[3] << 8);
15432 + /* Sanity checks */
15434 + if ((ptr[4] != 0xff) || (ptr[5] != 0xff))
15437 + if (sealalg == 0xffff)
15440 + /* in the current spec, there is only one valid seal algorithm per
15441 + key type, so a simple comparison is ok */
15443 + if (sealalg != kctx->sealalg)
15446 + /* there are several mappings of seal algorithms to sign algorithms,
15447 + but few enough that we can try them all. */
15449 + if ((kctx->sealalg == SEAL_ALG_NONE && signalg > 1) ||
15450 + (kctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) ||
15451 + (kctx->sealalg == SEAL_ALG_DES3KD &&
15452 + signalg != SGN_ALG_HMAC_SHA1_DES3_KD))
15455 + if (gss_decrypt_xdr_buf(kctx->enc, buf,
15456 + ptr + 22 - (unsigned char *)buf->head[0].iov_base))
15459 + /* compute the checksum of the message */
15461 + /* initialize the the cksum */
15462 + switch (signalg) {
15463 + case SGN_ALG_DES_MAC_MD5:
15464 + checksum_type = CKSUMTYPE_RSA_MD5;
15467 + ret = GSS_S_DEFECTIVE_TOKEN;
15471 + switch (signalg) {
15472 + case SGN_ALG_DES_MAC_MD5:
15473 + ret = make_checksum(checksum_type, ptr - 2, 8, buf,
15474 + ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum);
15478 + ret = krb5_encrypt(kctx->seq, NULL, md5cksum.data,
15479 + md5cksum.data, md5cksum.len);
15483 + if (memcmp(md5cksum.data + 8, ptr + 14, 8)) {
15484 + ret = GSS_S_BAD_SIG;
15489 + ret = GSS_S_DEFECTIVE_TOKEN;
15493 + /* it got through unscathed. Make sure the context is unexpired */
15496 + *qop = GSS_C_QOP_DEFAULT;
15498 + now = get_seconds();
15500 + ret = GSS_S_CONTEXT_EXPIRED;
15501 + if (now > kctx->endtime)
15504 + /* do sequencing checks */
15506 + ret = GSS_S_BAD_SIG;
15507 + if ((ret = krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction,
15511 + if ((kctx->initiate && direction != 0xff) ||
15512 + (!kctx->initiate && direction != 0))
15515 + /* Copy the data back to the right position. XXX: Would probably be
15516 + * better to copy and encrypt at the same time. */
15518 + blocksize = crypto_tfm_alg_blocksize(kctx->enc);
15519 + data_start = ptr + 22 + blocksize;
15520 + *out_offset = data_start - (u8 *)buf->head[0].iov_base;
15522 + ret = GSS_S_DEFECTIVE_TOKEN;
15523 + if (gss_krb5_remove_padding(buf, blocksize))
15526 + ret = GSS_S_COMPLETE;
15528 + if (md5cksum.data) kfree(md5cksum.data);
15531 --- linux-2.6.7/net/sunrpc/auth_gss/gss_mech_switch.c.lsec 2004-06-15 23:19:37.000000000 -0600
15532 +++ linux-2.6.7/net/sunrpc/auth_gss/gss_mech_switch.c 2005-03-23 14:28:24.782282216 -0700
15533 @@ -279,6 +279,29 @@ gss_verify_mic(struct gss_ctx *context_
15538 +gss_wrap(struct gss_ctx *ctx_id,
15541 + struct xdr_buf *buf,
15542 + struct page **inpages)
15544 + return ctx_id->mech_type->gm_ops
15545 + ->gss_wrap(ctx_id, qop, offset, buf, inpages);
15549 +gss_unwrap(struct gss_ctx *ctx_id,
15552 + struct xdr_buf *buf,
15555 + return ctx_id->mech_type->gm_ops
15556 + ->gss_unwrap(ctx_id, qop, offset, buf, out_offset);
15560 /* gss_delete_sec_context: free all resources associated with context_handle.
15561 * Note this differs from the RFC 2744-specified prototype in that we don't
15562 * bother returning an output token, since it would never be used anyway. */
15563 --- linux-2.6.7/net/sunrpc/auth_gss/gss_krb5_mech.c.lsec 2004-06-15 23:19:42.000000000 -0600
15564 +++ linux-2.6.7/net/sunrpc/auth_gss/gss_krb5_mech.c 2005-03-23 14:28:24.841273248 -0700
15565 @@ -182,6 +182,7 @@ gss_delete_sec_context_kerberos(void *in
15569 +/* XXX the following wrappers have become pointless; kill them. */
15571 gss_verify_mic_kerberos(struct gss_ctx *ctx,
15572 struct xdr_buf *message,
15573 @@ -191,8 +192,7 @@ gss_verify_mic_kerberos(struct gss_ctx
15575 struct krb5_ctx *kctx = ctx->internal_ctx_id;
15577 - maj_stat = krb5_read_token(kctx, mic_token, message, &qop_state,
15579 + maj_stat = krb5_read_token(kctx, mic_token, message, &qop_state);
15580 if (!maj_stat && qop_state)
15581 *qstate = qop_state;
15583 @@ -208,7 +208,7 @@ gss_get_mic_kerberos(struct gss_ctx *ctx
15585 struct krb5_ctx *kctx = ctx->internal_ctx_id;
15587 - err = krb5_make_token(kctx, qop, message, mic_token, KG_TOK_MIC_MSG);
15588 + err = krb5_make_token(kctx, qop, message, mic_token);
15590 dprintk("RPC: gss_get_mic_kerberos returning %d\n",err);
15592 @@ -219,6 +219,8 @@ static struct gss_api_ops gss_kerberos_o
15593 .gss_import_sec_context = gss_import_sec_context_kerberos,
15594 .gss_get_mic = gss_get_mic_kerberos,
15595 .gss_verify_mic = gss_verify_mic_kerberos,
15596 + .gss_wrap = gss_wrap_kerberos,
15597 + .gss_unwrap = gss_unwrap_kerberos,
15598 .gss_delete_sec_context = gss_delete_sec_context_kerberos,
15601 @@ -233,6 +235,11 @@ static struct pf_desc gss_kerberos_pfs[]
15602 .service = RPC_GSS_SVC_INTEGRITY,
15606 + .pseudoflavor = RPC_AUTH_GSS_KRB5P,
15607 + .service = RPC_GSS_SVC_PRIVACY,
15612 static struct gss_api_mech gss_kerberos_mech = {
15613 --- linux-2.6.7/net/sunrpc/auth_gss/gss_krb5_seal.c.lsec 2004-06-15 23:18:37.000000000 -0600
15614 +++ linux-2.6.7/net/sunrpc/auth_gss/gss_krb5_seal.c 2005-03-23 14:28:24.898264584 -0700
15615 @@ -70,24 +70,17 @@
15616 # define RPCDBG_FACILITY RPCDBG_AUTH
15620 -gss_krb5_padding(int blocksize, int length) {
15621 - /* Most of the code is block-size independent but in practice we
15623 - BUG_ON(blocksize != 8);
15624 - return 8 - (length & 7);
15626 +spinlock_t krb5_seq_lock = SPIN_LOCK_UNLOCKED;
15629 krb5_make_token(struct krb5_ctx *ctx, int qop_req,
15630 - struct xdr_buf *text, struct xdr_netobj *token,
15632 + struct xdr_buf *text, struct xdr_netobj *token)
15635 struct xdr_netobj md5cksum = {.len = 0, .data = NULL};
15636 - int blocksize = 0, tmsglen;
15637 unsigned char *ptr, *krb5_hdr, *msg_start;
15641 dprintk("RPC: gss_krb5_seal\n");
15643 @@ -111,21 +104,13 @@ krb5_make_token(struct krb5_ctx *ctx, in
15647 - if (toktype == KG_TOK_WRAP_MSG) {
15648 - blocksize = crypto_tfm_alg_blocksize(ctx->enc);
15649 - tmsglen = blocksize + text->len
15650 - + gss_krb5_padding(blocksize, blocksize + text->len);
15655 - token->len = g_token_size(&ctx->mech_used, 22 + tmsglen);
15656 + token->len = g_token_size(&ctx->mech_used, 22);
15659 - g_make_token_header(&ctx->mech_used, 22 + tmsglen, &ptr);
15660 + g_make_token_header(&ctx->mech_used, 22, &ptr);
15662 - *ptr++ = (unsigned char) ((toktype>>8)&0xff);
15663 - *ptr++ = (unsigned char) (toktype&0xff);
15664 + *ptr++ = (unsigned char) ((KG_TOK_MIC_MSG>>8)&0xff);
15665 + *ptr++ = (unsigned char) (KG_TOK_MIC_MSG&0xff);
15667 /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */
15668 krb5_hdr = ptr - 2;
15669 @@ -133,17 +118,9 @@ krb5_make_token(struct krb5_ctx *ctx, in
15671 *(u16 *)(krb5_hdr + 2) = htons(ctx->signalg);
15672 memset(krb5_hdr + 4, 0xff, 4);
15673 - if (toktype == KG_TOK_WRAP_MSG)
15674 - *(u16 *)(krb5_hdr + 4) = htons(ctx->sealalg);
15676 - if (toktype == KG_TOK_WRAP_MSG) {
15677 - /* XXX removing support for now */
15679 - } else { /* Sign only. */
15680 - if (make_checksum(checksum_type, krb5_hdr, 8, text,
15682 + if (make_checksum(checksum_type, krb5_hdr, 8, text, 0, &md5cksum))
15686 switch (ctx->signalg) {
15687 case SGN_ALG_DES_MAC_MD5:
15688 @@ -163,12 +140,14 @@ krb5_make_token(struct krb5_ctx *ctx, in
15690 kfree(md5cksum.data);
15692 + spin_lock(&krb5_seq_lock);
15693 + seq_send = ctx->seq_send++;
15694 + spin_unlock(&krb5_seq_lock);
15696 if ((krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff,
15697 - ctx->seq_send, krb5_hdr + 16, krb5_hdr + 8)))
15698 + seq_send, krb5_hdr + 16, krb5_hdr + 8)))
15703 return ((ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE);
15705 if (md5cksum.data) kfree(md5cksum.data);
15706 --- linux-2.6.7/net/sunrpc/auth_gss/gss_spkm3_token.c.lsec 2005-03-23 14:28:24.240364600 -0700
15707 +++ linux-2.6.7/net/sunrpc/auth_gss/gss_spkm3_token.c 2005-03-23 14:28:24.239364752 -0700
15710 + * linux/net/sunrpc/gss_spkm3_token.c
15712 + * Copyright (c) 2003 The Regents of the University of Michigan.
15713 + * All rights reserved.
15715 + * Andy Adamson <andros@umich.edu>
15717 + * Redistribution and use in source and binary forms, with or without
15718 + * modification, are permitted provided that the following conditions
15721 + * 1. Redistributions of source code must retain the above copyright
15722 + * notice, this list of conditions and the following disclaimer.
15723 + * 2. Redistributions in binary form must reproduce the above copyright
15724 + * notice, this list of conditions and the following disclaimer in the
15725 + * documentation and/or other materials provided with the distribution.
15726 + * 3. Neither the name of the University nor the names of its
15727 + * contributors may be used to endorse or promote products derived
15728 + * from this software without specific prior written permission.
15730 + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
15731 + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
15732 + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
15733 + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
15734 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
15735 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
15736 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
15737 + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
15738 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
15739 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
15740 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
15744 +#include <linux/types.h>
15745 +#include <linux/slab.h>
15746 +#include <linux/jiffies.h>
15747 +#include <linux/sunrpc/gss_spkm3.h>
15748 +#include <linux/random.h>
15749 +#include <linux/crypto.h>
15752 +# define RPCDBG_FACILITY RPCDBG_AUTH
15756 + * asn1_bitstring_len()
15758 + * calculate the asn1 bitstring length of the xdr_netobject
15761 +asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits)
15763 + int i, zbit = 0,elen = in->len;
15766 + ptr = &in->data[in->len -1];
15768 + /* count trailing 0's */
15769 + for(i = in->len; i > 0; i--) {
15777 + /* count number of 0 bits in final octet */
15778 + ptr = &in->data[elen - 1];
15779 + for(i = 0; i < 8; i++) {
15780 + short mask = 0x01;
15782 + if (!((mask << i) & *ptr))
15788 + *zerobits = zbit;
15792 + * decode_asn1_bitstring()
15794 + * decode a bitstring into a buffer of the expected length.
15795 + * enclen = bit string length
15796 + * explen = expected length (define in rfc)
15799 +decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen)
15801 + if (!(out->data = kmalloc(explen,GFP_KERNEL)))
15803 + out->len = explen;
15804 + memset(out->data, 0, explen);
15805 + memcpy(out->data, in, enclen);
15810 + * SPKMInnerContextToken choice SPKM_MIC asn1 token layout
15812 + * contextid is always 16 bytes plain data. max asn1 bitstring len = 17.
15814 + * tokenlen = pos[0] to end of token (max pos[45] with MD5 cksum)
15818 + * [0] a4 SPKM-MIC tag
15819 + * [1] ?? innertoken length (max 44)
15822 + * tok_hdr piece of checksum data starts here
15824 + * the maximum mic-header len = 9 + 17 = 26
15827 + * [2] 30 SEQUENCE tag
15828 + * [3] ?? mic-header length: (max 23) = TokenID + ContextID
15830 + * TokenID - all fields constant and can be hardcoded
15833 + * [5] 02 Length 2
15834 + * [6][7] 01 01 TokenID (SPKM_MIC_TOK)
15836 + * ContextID - encoded length not constant, calculated
15839 + * [9] ?? encoded length
15840 + * [10] ?? ctxzbit
15843 + * mic_header piece of checksum data ends here.
15845 + * int-cksum - encoded length not constant, calculated
15848 + * [??] ?? encoded length
15849 + * [??] ?? md5zbit
15850 + * [??] int-cksum (NID_md5 = 16)
15852 + * maximum SPKM-MIC innercontext token length =
15853 + * 10 + encoded contextid_size(17 max) + 2 + encoded
15854 + * cksum_size (17 maxfor NID_md5) = 46
15858 + * spkm3_mic_header()
15860 + * Prepare the SPKM_MIC_TOK mic-header for check-sum calculation
15861 + * elen: 16 byte context id asn1 bitstring encoded length
15864 +spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen, unsigned char *ctxdata, int elen, int zbit)
15866 + char *hptr = *hdrbuf;
15867 + char *top = *hdrbuf;
15869 + *(u8 *)hptr++ = 0x30;
15870 + *(u8 *)hptr++ = elen + 7; /* on the wire header length */
15873 + *(u8 *)hptr++ = 0x02;
15874 + *(u8 *)hptr++ = 0x02;
15875 + *(u8 *)hptr++ = 0x01;
15876 + *(u8 *)hptr++ = 0x01;
15879 + *(u8 *)hptr++ = 0x03;
15880 + *(u8 *)hptr++ = elen + 1; /* add 1 to include zbit */
15881 + *(u8 *)hptr++ = zbit;
15882 + memcpy(hptr, ctxdata, elen);
15884 + *hdrlen = hptr - top;
15888 + * spkm3_mic_innercontext_token()
15890 + * *tokp points to the beginning of the SPKM_MIC token described
15891 + * in rfc 2025, section 3.2.1:
15895 +spkm3_make_mic_token(unsigned char **tokp, int toklen, struct xdr_netobj *mic_hdr, struct xdr_netobj *md5cksum, int md5elen, int md5zbit)
15897 + unsigned char *ict = *tokp;
15899 + *(u8 *)ict++ = 0xa4;
15900 + *(u8 *)ict++ = toklen - 2;
15901 + memcpy(ict, mic_hdr->data, mic_hdr->len);
15902 + ict += mic_hdr->len;
15904 + *(u8 *)ict++ = 0x03;
15905 + *(u8 *)ict++ = md5elen + 1; /* add 1 to include zbit */
15906 + *(u8 *)ict++ = md5zbit;
15907 + memcpy(ict, md5cksum->data, md5elen);
15911 +spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **cksum)
15913 + struct xdr_netobj spkm3_ctx_id = {.len =0, .data = NULL};
15914 + unsigned char *ptr = *tokp;
15916 + u32 ret = GSS_S_DEFECTIVE_TOKEN;
15918 + /* spkm3 innercontext token preamble */
15919 + if ((ptr[0] != 0xa4) || (ptr[2] != 0x30)) {
15920 + dprintk("RPC: BAD SPKM ictoken preamble\n");
15924 + *mic_hdrlen = ptr[3];
15927 + if ((ptr[4] != 0x02) || (ptr[5] != 0x02)) {
15928 + dprintk("RPC: BAD asn1 SPKM3 token type\n");
15932 + /* only support SPKM_MIC_TOK */
15933 + if((ptr[6] != 0x01) || (ptr[7] != 0x01)) {
15934 + dprintk("RPC: ERROR unsupported SPKM3 token \n");
15939 + if (ptr[8] != 0x03) {
15940 + dprintk("RPC: BAD SPKM3 asn1 context-id type\n");
15944 + ctxelen = ptr[9];
15945 + if (ctxelen > 17) { /* length includes asn1 zbit octet */
15946 + dprintk("RPC: BAD SPKM3 contextid len %d\n", ctxelen);
15950 + /* ignore ptr[10] */
15952 + if(!decode_asn1_bitstring(&spkm3_ctx_id, &ptr[11], ctxelen - 1, 16))
15956 + * in the current implementation: the optional int-alg is not present
15957 + * so the default int-alg (md5) is used the optional snd-seq field is
15958 + * also not present
15961 + if (*mic_hdrlen != 6 + ctxelen) {
15962 + dprintk("RPC: BAD SPKM_ MIC_TOK header len %d: we only support default int-alg (should be absent) and do not support snd-seq\n", *mic_hdrlen);
15966 + *cksum = (&ptr[10] + ctxelen); /* ctxelen includes ptr[10] */
15968 + ret = GSS_S_COMPLETE;
15970 + if (spkm3_ctx_id.data)
15971 + kfree(spkm3_ctx_id.data);
15975 --- linux-2.6.7/net/sunrpc/auth_gss/gss_generic_token.c.lsec 2004-06-15 23:19:10.000000000 -0600
15976 +++ linux-2.6.7/net/sunrpc/auth_gss/gss_generic_token.c 2005-03-23 14:28:23.707445616 -0700
15977 @@ -179,7 +179,7 @@ EXPORT_SYMBOL(g_make_token_header);
15980 g_verify_token_header(struct xdr_netobj *mech, int *body_size,
15981 - unsigned char **buf_in, int tok_type, int toksize)
15982 + unsigned char **buf_in, int toksize)
15984 unsigned char *buf = *buf_in;
15986 --- linux-2.6.7/net/sunrpc/auth_gss/gss_spkm3_unseal.c.lsec 2005-03-23 14:28:24.240364600 -0700
15987 +++ linux-2.6.7/net/sunrpc/auth_gss/gss_spkm3_unseal.c 2005-03-23 14:28:24.240364600 -0700
15990 + * linux/net/sunrpc/gss_spkm3_unseal.c
15992 + * Copyright (c) 2003 The Regents of the University of Michigan.
15993 + * All rights reserved.
15995 + * Andy Adamson <andros@umich.edu>
15997 + * Redistribution and use in source and binary forms, with or without
15998 + * modification, are permitted provided that the following conditions
16001 + * 1. Redistributions of source code must retain the above copyright
16002 + * notice, this list of conditions and the following disclaimer.
16003 + * 2. Redistributions in binary form must reproduce the above copyright
16004 + * notice, this list of conditions and the following disclaimer in the
16005 + * documentation and/or other materials provided with the distribution.
16006 + * 3. Neither the name of the University nor the names of its
16007 + * contributors may be used to endorse or promote products derived
16008 + * from this software without specific prior written permission.
16010 + * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
16011 + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
16012 + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
16013 + * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
16014 + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
16015 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
16016 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
16017 + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
16018 + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
16019 + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
16020 + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
16024 +#include <linux/types.h>
16025 +#include <linux/slab.h>
16026 +#include <linux/jiffies.h>
16027 +#include <linux/sunrpc/gss_spkm3.h>
16028 +#include <linux/crypto.h>
16031 +# define RPCDBG_FACILITY RPCDBG_AUTH
16035 + * spkm3_read_token()
16037 + * only SPKM_MIC_TOK with md5 intg-alg is supported
16040 +spkm3_read_token(struct spkm3_ctx *ctx,
16041 + struct xdr_netobj *read_token, /* checksum */
16042 + struct xdr_buf *message_buffer, /* signbuf */
16043 + int *qop_state, int toktype)
16046 + struct xdr_netobj wire_cksum = {.len =0, .data = NULL};
16047 + struct xdr_netobj md5cksum = {.len = 0, .data = NULL};
16048 + unsigned char *ptr = (unsigned char *)read_token->data;
16049 + unsigned char *cksum;
16050 + int bodysize, md5elen;
16052 + u32 ret = GSS_S_DEFECTIVE_TOKEN;
16054 + dprintk("RPC: spkm3_read_token read_token->len %d\n", read_token->len);
16056 + if (g_verify_token_header((struct xdr_netobj *) &ctx->mech_used,
16057 + &bodysize, &ptr, read_token->len))
16060 + /* decode the token */
16062 + if (toktype == SPKM_MIC_TOK) {
16064 + if ((ret = spkm3_verify_mic_token(&ptr, &mic_hdrlen, &cksum)))
16067 + if (*cksum++ != 0x03) {
16068 + dprintk("RPC: spkm3_read_token BAD checksum type\n");
16071 + md5elen = *cksum++;
16072 + cksum++; /* move past the zbit */
16074 + if(!decode_asn1_bitstring(&wire_cksum, cksum, md5elen - 1, 16))
16077 + /* HARD CODED FOR MD5 */
16079 + /* compute the checksum of the message.
16080 + * ptr + 2 = start of header piece of checksum
16081 + * mic_hdrlen + 2 = length of header piece of checksum
16083 + ret = GSS_S_DEFECTIVE_TOKEN;
16084 + code = make_checksum(CKSUMTYPE_RSA_MD5, ptr + 2,
16086 + message_buffer, &md5cksum);
16091 + dprintk("RPC: spkm3_read_token: digest wire_cksum.len %d:\n",
16093 + dprintk(" md5cksum.data\n");
16094 + print_hexl((u32 *) md5cksum.data, 16, 0);
16095 + dprintk(" cksum.data:\n");
16096 + print_hexl((u32 *) wire_cksum.data, wire_cksum.len, 0);
16098 + ret = GSS_S_BAD_SIG;
16099 + code = memcmp(md5cksum.data, wire_cksum.data, wire_cksum.len);
16104 + dprintk("RPC: BAD or UNSUPPORTED SPKM3 token type: %d\n",toktype);
16108 + /* XXX: need to add expiration and sequencing */
16109 + ret = GSS_S_COMPLETE;
16111 + if (md5cksum.data)
16112 + kfree(md5cksum.data);
16113 + if (wire_cksum.data)
16114 + kfree(wire_cksum.data);
16117 --- linux-2.6.7/net/sunrpc/auth_gss/Makefile.lsec 2004-06-15 23:19:22.000000000 -0600
16118 +++ linux-2.6.7/net/sunrpc/auth_gss/Makefile 2005-03-23 14:28:24.294356392 -0700
16119 @@ -10,5 +10,9 @@ auth_rpcgss-objs := auth_gss.o gss_gener
16120 obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o
16122 rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \
16123 - gss_krb5_seqnum.o
16124 + gss_krb5_seqnum.o gss_krb5_wrap.o
16126 +obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o
16128 +rpcsec_gss_spkm3-objs := gss_spkm3_mech.o gss_spkm3_seal.o gss_spkm3_unseal.o \
16129 + gss_spkm3_token.o
16130 --- linux-2.6.7/net/sunrpc/cache.c.lsec 2004-06-15 23:19:36.000000000 -0600
16131 +++ linux-2.6.7/net/sunrpc/cache.c 2005-03-23 14:28:24.406339368 -0700
16132 @@ -38,7 +38,7 @@ void cache_init(struct cache_head *h)
16133 time_t now = get_seconds();
16136 - atomic_set(&h->refcnt, 0);
16137 + atomic_set(&h->refcnt, 1);
16138 h->expiry_time = now + CACHE_NEW_EXPIRY;
16139 h->last_refresh = now;
16141 --- linux-2.6.7/net/sunrpc/svc.c.lsec 2004-06-15 23:20:03.000000000 -0600
16142 +++ linux-2.6.7/net/sunrpc/svc.c 2005-03-23 14:28:23.652453976 -0700
16143 @@ -263,6 +263,7 @@ svc_process(struct svc_serv *serv, struc
16145 u32 dir, prog, vers, proc,
16146 auth_stat, rpc_stat;
16149 rpc_stat = rpc_success;
16151 @@ -304,12 +305,17 @@ svc_process(struct svc_serv *serv, struc
16152 rqstp->rq_vers = vers = ntohl(svc_getu32(argv)); /* version number */
16153 rqstp->rq_proc = proc = ntohl(svc_getu32(argv)); /* procedure number */
16155 + progp = serv->sv_program;
16157 * Decode auth data, and add verifier to reply buffer.
16158 * We do this before anything else in order to get a decent
16161 - switch (svc_authenticate(rqstp, &auth_stat)) {
16162 + if (progp->pg_authenticate != NULL)
16163 + auth_res = progp->pg_authenticate(rqstp, &auth_stat);
16165 + auth_res = svc_authenticate(rqstp, &auth_stat);
16166 + switch (auth_res) {
16170 @@ -326,7 +332,6 @@ svc_process(struct svc_serv *serv, struc
16174 - progp = serv->sv_program;
16175 if (prog != progp->pg_prog)
16178 --- linux-2.6.7/net/sunrpc/svcauth.c.lsec 2004-06-15 23:19:44.000000000 -0600
16179 +++ linux-2.6.7/net/sunrpc/svcauth.c 2005-03-23 14:28:24.407339216 -0700
16180 @@ -156,25 +156,47 @@ static inline int auth_domain_match(stru
16182 return strcmp(tmp->name, item->name) == 0;
16184 -DefineCacheLookup(struct auth_domain,
16186 - auth_domain_lookup,
16187 - (struct auth_domain *item, int set),
16189 - &auth_domain_cache,
16190 - auth_domain_hash(item),
16191 - auth_domain_match(tmp, item),
16192 - kfree(new); if(!set) {
16194 - write_unlock(&auth_domain_cache.hash_lock);
16196 - read_unlock(&auth_domain_cache.hash_lock);
16199 - new=item; atomic_inc(&new->h.refcnt),
16201 - 0 /* no inplace updates */
16204 +struct auth_domain *
16205 +auth_domain_lookup(struct auth_domain *item, int set)
16207 + struct auth_domain *tmp = NULL;
16208 + struct cache_head **hp, **head;
16209 + head = &auth_domain_cache.hash_table[auth_domain_hash(item)];
16212 + write_lock(&auth_domain_cache.hash_lock);
16214 + read_lock(&auth_domain_cache.hash_lock);
16215 + for (hp=head; *hp != NULL; hp = &tmp->h.next) {
16216 + tmp = container_of(*hp, struct auth_domain, h);
16217 + if (!auth_domain_match(tmp, item))
16219 + cache_get(&tmp->h);
16222 + *hp = tmp->h.next;
16223 + tmp->h.next = NULL;
16224 + clear_bit(CACHE_HASHED, &tmp->h.flags);
16225 + auth_domain_drop(&tmp->h, &auth_domain_cache);
16228 + /* Didn't find anything */
16231 + auth_domain_cache.entries++;
16233 + set_bit(CACHE_HASHED, &item->h.flags);
16234 + item->h.next = *head;
16235 + *head = &item->h;
16236 + write_unlock(&auth_domain_cache.hash_lock);
16237 + cache_fresh(&auth_domain_cache, &item->h, item->h.expiry_time);
16238 + cache_get(&item->h);
16241 + read_unlock(&auth_domain_cache.hash_lock);
16245 struct auth_domain *auth_domain_find(char *name)