some cleanup.
} u;
};
-#define c_inode u.client.inode
-#define c_handle u.client.handle
-#define c_bvalid u.filter.bvalid
-#define c_bkeyid u.filter.bkeyid
-#define c_bhmac u.filter.bhmac
+#define c_inode u.client.inode
+#define c_handle u.client.handle
+#define c_lli_list u.client.lli_list
+#define c_bvalid u.filter.bvalid
+#define c_bkeyid u.filter.bkeyid
+#define c_bhmac u.filter.bhmac
enum lustre_capa_type {
CLIENT_CAPA = 0,
int capa_op(int flags);
void __capa_get(struct obd_capa *ocapa);
struct obd_capa *capa_get(uid_t uid, int capa_op, __u64 mdsid,
- unsigned long ino, int type,
- struct lustre_capa *capa, struct inode *inode,
- struct lustre_handle *handle);
+ unsigned long ino, int type);
void capa_put(struct obd_capa *ocapa);
struct obd_capa *capa_renew(struct lustre_capa *capa, int type);
void capa_hmac(struct crypto_tfm *tfm, __u8 *key, struct lustre_capa *capa);
int rc;
ENTRY;
+ if (capa_expired(&ocapa->c_capa)) {
+ /* this is the second time try to renew since the last
+ * renewal failed, it means on one is openning it and
+ * should be put now. */
+ capa_put(ocapa);
+ RETURN(0);
+ }
+
rc = md_getattr(md_exp, &lli->lli_id, valid, NULL, NULL, 0,
0, ocapa, &req);
RETURN(rc);
struct lustre_capa *capa;
struct obd_capa *ocapa;
struct ll_inode_info *lli = ll_i2info(inode);
- __u64 mdsid = lli->lli_id.li_fid.lf_group;
- unsigned long ino = lli->lli_id.li_stc.u.e3s.l3s_ino;
- int capa_op = (it->it_flags & MAY_WRITE) ? MAY_WRITE : MAY_READ;
unsigned long expiry;
ENTRY;
LASSERT(capa != NULL); /* reply already checked out */
LASSERT_REPSWABBED(req, 7); /* and swabbed down */
- ocapa = capa_get(current->uid, capa_op, mdsid, ino, CLIENT_CAPA, capa,
- inode, &body->handle);
+ ocapa = capa_renew(capa, CLIENT_CAPA);
if (!ocapa)
RETURN(-ENOMEM);
+ spin_lock(&capa_lock);
+ ocapa->c_inode = inode;
+ ocapa->c_handle = body->handle;
+ spin_unlock(&capa_lock);
+
spin_lock(&lli->lli_lock);
/* in case it was linked to lli_capas already */
- if (list_empty(&ocapa->u.client.lli_list))
- list_add(&ocapa->u.client.lli_list, &lli->lli_capas);
+ if (list_empty(&ocapa->c_lli_list))
+ list_add(&ocapa->c_lli_list, &lli->lli_capas);
spin_unlock(&lli->lli_lock);
expiry = expiry_to_jiffies(capa->lc_expiry - capa_pre_expiry(capa));
spin_lock(&lli->lli_lock);
/* in case it was linked to lli_capas already */
- if (list_empty(&ocapa->u.client.lli_list))
- list_add(&ocapa->u.client.lli_list, &lli->lli_capas);
+ if (list_empty(&ocapa->c_lli_list))
+ list_add(&ocapa->c_lli_list, &lli->lli_capas);
spin_unlock(&lli->lli_lock);
RETURN(0);
}
+
+struct obd_capa *ll_get_capa(struct inode *inode, uid_t uid, int op)
+{
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct obd_capa *ocapa, *tmp;
+ ENTRY;
+
+ list_for_each_entry_safe(ocapa, tmp, &lli->lli_capas, c_lli_list) {
+ if (ocapa->c_capa.lc_ruid != uid)
+ continue;
+ if (ocapa->c_capa.lc_op != op)
+ continue;
+
+ RETURN(ocapa);
+ }
+
+ RETURN(NULL);
+}
int ll_set_capa(struct inode *inode, struct lookup_intent *it);
int ll_set_trunc_capa(struct ptlrpc_request *req, int offset,
struct inode *inode);
+struct obd_capa *ll_get_capa(struct inode *inode, uid_t uid, int op);
/* llite/dcache.c */
void ll_intent_drop_lock(struct lookup_intent *);
lli->lli_size_pid = 0;
up(&lli->lli_size_sem);
- ocapa = capa_get(current->fsuid, CAPA_TRUNC, id_group(&lli->lli_id),
- id_ino(&lli->lli_id), CLIENT_CAPA, NULL, NULL, NULL);
+ ocapa = ll_get_capa(inode, current->fsuid, CAPA_TRUNC);
if (ocapa)
capa = &ocapa->c_capa;
{
uid_t uid;
- if (!med->med_remote)
+ if (!med->med_remote) {
+ /* when not remote uid, ruid == uid */
+ capa->lc_ruid = capa->lc_uid;
return;
+ }
ENTRY;
uid = mds_idmap_lookup_uid(med->med_idmap, 1, capa->lc_uid);
mfd = mds_handle2mfd(&req_body->handle);
if (mfd == NULL) {
- CERROR("no handle for capa renewal ino "LPD64
- ": cookie "LPX64"\n",
- req_capa->lc_ino, req_body->handle.cookie);
+ DEBUG_CAPA(D_ERROR, req_capa, "no handle "LPX64" for",
+ req_body->handle.cookie);
RETURN(-ESTALE);
}
mode = accmode(mfd->mfd_mode);
if (!(req_capa->lc_op & mode)) {
- CERROR("invalid capa to renew ino "LPD64
- ": op %d mismatch with mode %d\n",
- req_capa->lc_ino, req_capa->lc_op,
- mfd->mfd_mode);
+ DEBUG_CAPA(D_ERROR, req_capa, "accmode %d mismatch",
+ mode);
RETURN(-EACCES);
}
}
LASSERT(capa != NULL);
ocapa = capa_get(req_capa->lc_uid, req_capa->lc_op, req_capa->lc_mdsid,
- req_capa->lc_ino, MDS_CAPA, NULL, NULL, NULL);
+ req_capa->lc_ino, MDS_CAPA);
if (ocapa) {
expired = capa_is_to_expire(ocapa);
if (!expired) {
int capa_op(int flags)
{
if (flags & (FMODE_WRITE|MDS_OPEN_TRUNC))
- return MAY_WRITE;
+ return CAPA_WRITE;
else if (flags & FMODE_READ)
- return MAY_READ;
+ return CAPA_READ;
LBUG(); /* should be either MAY_READ or MAY_WRITE */
return 0;
if (ocapa->c_type != type)
continue;
- if (ocapa->c_type == CLIENT_CAPA &&
- ocapa->c_capa.lc_flags & CAPA_FL_REMUID)
+ if (ocapa->c_type == CLIENT_CAPA)
ouid = ocapa->c_capa.lc_ruid;
else
ouid = ocapa->c_capa.lc_uid;
hlist_for_each_entry_safe(ocapa, pos, n, capa_hash, c_hash) {
LASSERT(ocapa->c_type != CLIENT_CAPA);
-// list_del_init(&ocapa->u.client.lli_list);
__capa_put(ocapa);
destroy_capa(ocapa);
}
}
static struct obd_capa *
-get_new_capa_locked(struct hlist_head *head, int type, struct lustre_capa *capa,
- struct inode *inode, struct lustre_handle *handle)
+get_new_capa_locked(struct hlist_head *head, int type, struct lustre_capa *capa)
{
uid_t uid = capa->lc_uid;
int capa_op = capa->lc_op;
if (!old) {
do_update_capa(ocapa, capa);
ocapa->c_type = type;
-
- if (type == CLIENT_CAPA && inode) {
- LASSERT(handle);
- ocapa->c_inode = inode;
- memcpy(&ocapa->c_handle, handle, sizeof(*handle));
- INIT_LIST_HEAD(&ocapa->u.client.lli_list);
- }
-
- DEBUG_CAPA(D_CACHE, &ocapa->c_capa, "new");
-
list_add_capa(ocapa, &capa_list[type]);
hlist_add_head(&ocapa->c_hash, capa_hash);
+ if (type == CLIENT_CAPA)
+ INIT_LIST_HEAD(&ocapa->c_lli_list);
+
capa_count[type]++;
+ DEBUG_CAPA(D_CACHE, &ocapa->c_capa, "new");
+
if (type != CLIENT_CAPA && capa_count[type] > CAPA_CACHE_SIZE) {
struct list_head *node = capa_list[type].next;
struct obd_capa *tcapa;
/* free 12 unused capa from head */
while (node->next != &capa_list[type] && count < 12) {
- tcapa = list_entry(node, struct obd_capa, c_list);
+ tcapa = list_entry(node, struct obd_capa,
+ c_list);
node = node->next;
if (atomic_read(&tcapa->c_refc) > 0)
continue;
+ DEBUG_CAPA(D_CACHE, &ocapa->c_capa,
+ "free unused");
__capa_put(tcapa);
destroy_capa(tcapa);
count++;
}
struct obd_capa *
-capa_get(uid_t uid, int capa_op,__u64 mdsid, unsigned long ino,
- int type, struct lustre_capa *capa, struct inode *inode,
- struct lustre_handle *handle)
+capa_get(uid_t uid, int capa_op,__u64 mdsid, unsigned long ino, int type)
{
struct hlist_head *head = capa_hash +
capa_hashfn(uid, capa_op, mdsid, ino);
struct obd_capa *ocapa;
ocapa = find_capa_locked(head, uid, capa_op, mdsid, ino, type);
- if (ocapa)
- return ocapa;
- if (capa) {
- ocapa = get_new_capa_locked(head, type, capa, inode, handle);
- if (ocapa)
- __capa_get(ocapa);
- }
return ocapa;
}
DEBUG_CAPA(D_CACHE, &ocapa->c_capa, "put");
spin_lock(&capa_lock);
if (ocapa->c_type == CLIENT_CAPA) {
- list_del_init(&ocapa->u.client.lli_list);
+ list_del_init(&ocapa->c_lli_list);
__capa_put(ocapa);
destroy_capa(ocapa);
} else {
do_update_capa(ocapa, capa);
spin_unlock(&capa_lock);
- if (ocapa)
- return ocapa;
-
- if (type == MDS_CAPA ||
- (type == CLIENT_CAPA && capa->lc_op == CAPA_TRUNC))
- ocapa = get_new_capa_locked(head, type, capa, NULL, NULL);
+ if (!ocapa)
+ ocapa = get_new_capa_locked(head, type, capa);
return ocapa;
}
RETURN(-ESTALE);
ocapa = capa_get(capa->lc_uid, capa->lc_op, capa->lc_mdsid,
- capa->lc_ino, FILTER_CAPA, NULL, NULL, NULL);
+ capa->lc_ino, FILTER_CAPA);
verify:
if (ocapa) {
/* fo_capa_lock protects capa too */
capa_hmac(filter->fo_capa_hmac, hmac_key, &tcapa);
/* store in capa cache */
- ocapa = capa_get(capa->lc_uid, capa->lc_op, capa->lc_mdsid,
- capa->lc_ino, FILTER_CAPA, capa, NULL, NULL);
+ ocapa = capa_renew(capa, FILTER_CAPA);
if (!ocapa)
GOTO(out, rc = -ENOMEM);
/* TODO: this could be optimized: thie capability can be
* found from ll_inode_info->lli_capas. */
- capa_op = (opc == OST_WRITE) ? MAY_WRITE : MAY_READ;
+ capa_op = (opc == OST_WRITE) ? CAPA_WRITE : CAPA_READ;
get_capa:
ocapa = capa_get(oa->o_fsuid, capa_op, raw_id->li_fid.lf_group,
- raw_id->li_stc.u.e3s.l3s_ino, CLIENT_CAPA,
- NULL, NULL, NULL);
+ raw_id->li_stc.u.e3s.l3s_ino, CLIENT_CAPA);
if (!ocapa) {
if (opc == OST_READ && capa_op == MAY_READ) {
/* partial write might cause read, MAY_WRITE capability