+
+static inline void drop_ucred_ginfo(struct lvfs_ucred *ucred)
+{
+ if (ucred->luc_ginfo) {
+ put_group_info(ucred->luc_ginfo);
+ ucred->luc_ginfo = NULL;
+ }
+}
+
+/*
+ * root could set any group_info if we allowed setgroups, while
+ * normal user only could 'reduce' their group members -- which
+ * is somewhat expensive.
+ */
+int mds_init_ucred(struct lvfs_ucred *ucred, struct mds_req_sec_desc *rsd)
+{
+ struct group_info *gnew;
+
+ ENTRY;
+ LASSERT(ucred);
+ LASSERT(rsd);
+
+ ucred->luc_fsuid = rsd->rsd_fsuid;
+ ucred->luc_fsgid = rsd->rsd_fsgid;
+ ucred->luc_cap = rsd->rsd_cap;
+ ucred->luc_uid = rsd->rsd_uid;
+ ucred->luc_ghash = mds_get_group_entry(NULL, rsd->rsd_uid);
+ ucred->luc_ginfo = NULL;
+
+ if (ucred->luc_ghash && ucred->luc_ghash->ge_group_info) {
+ ucred->luc_ginfo = ucred->luc_ghash->ge_group_info;
+ get_group_info(ucred->luc_ginfo);
+ }
+
+ /* everything is done if we don't allow set groups */
+ if (!mds_allow_setgroups())
+ RETURN(0);
+
+ if (rsd->rsd_ngroups > LUSTRE_MAX_GROUPS) {
+ CERROR("client provide too many groups: %d\n",
+ rsd->rsd_ngroups);
+ drop_ucred_ginfo(ucred);
+ mds_put_group_entry(NULL, ucred->luc_ghash);
+ RETURN(-EFAULT);
+ }
+
+ if (ucred->luc_uid == 0) {
+ if (rsd->rsd_ngroups == 0) {
+ drop_ucred_ginfo(ucred);
+ RETURN(0);
+ }
+
+ gnew = groups_alloc(rsd->rsd_ngroups);
+ if (!gnew) {
+ CERROR("out of memory\n");
+ drop_ucred_ginfo(ucred);
+ mds_put_group_entry(NULL, ucred->luc_ghash);
+ RETURN(-ENOMEM);
+ }
+ groups_from_buffer(gnew, rsd->rsd_groups);
+ /* can't rely on client to sort them */
+ groups_sort(gnew);
+
+ drop_ucred_ginfo(ucred);
+ ucred->luc_ginfo = gnew;
+ } else {
+ __u32 set = 0, cur = 0;
+ struct group_info *ginfo;
+
+ /* if no group info in hash, we don't
+ * bother createing new
+ */
+ if (!ucred->luc_ginfo)
+ RETURN(0);
+
+ /* Note: freeing a group_info count on 'nblocks' instead of
+ * 'ngroups', thus we can safely alloc enough buffer and reduce
+ * and ngroups number later.
+ */
+ gnew = groups_alloc(rsd->rsd_ngroups);
+ if (!gnew) {
+ CERROR("out of memory\n");
+ drop_ucred_ginfo(ucred);
+ mds_put_group_entry(NULL, ucred->luc_ghash);
+ RETURN(-ENOMEM);
+ }
+
+ ginfo = ucred->luc_ginfo;
+ while (cur < rsd->rsd_ngroups) {
+ if (groups_search(ginfo, rsd->rsd_groups[cur])) {
+ GROUP_AT(gnew, set) = rsd->rsd_groups[cur];
+ set++;
+ }
+ cur++;
+ }
+ gnew->ngroups = set;
+
+ put_group_info(ucred->luc_ginfo);
+ ucred->luc_ginfo = gnew;
+ }
+ RETURN(0);
+}
+
+void mds_exit_ucred(struct lvfs_ucred *ucred)
+{
+ ENTRY;
+
+ if (ucred->luc_ginfo)
+ put_group_info(ucred->luc_ginfo);
+ if (ucred->luc_ghash)
+ mds_put_group_entry(NULL, ucred->luc_ghash);
+
+ EXIT;
+}