Change cfs_curproc_xxx to stadard linux kernel API.
Signed-off-by: Liu Xuezhao <xuezhao.liu@emc.com>
Signed-off-by: Peng Tao <tao.peng@emc.com>
Signed-off-by: James Simmons <uja.ornl@gmail.com>
Change-Id: Ic526d02f547b04550e6049f2e343dd678bdbdf3c
Reviewed-on: http://review.whamcloud.com/4779
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
Reviewed-by: Keith Mannthey <keith.mannthey@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
/#[ \t]*define[ \t]*\bget_seconds\b *( *)[ \t]*\bget_seconds\b *( *)/d
s/\bCFS_NR_CPUS\b/NR_CPUS/g
/#[ \t]*define[ \t]*\bNR_CPUS\b[ \t]*\bNR_CPUS\b/d
+
+################################################################################
+# cfs_curproc_xxx macros
+s/\bcfs_curproc_uid\b/current_uid/g
+/#[ \t]*define[ \t]*\bcurrent_uid\b *( *)[ \t]*\bcurrent_uid\b *( *)/d
+s/\bcfs_curproc_gid\b/current_gid/g
+/#[ \t]*define[ \t]*\bcurrent_gid\b *( *)[ \t]*\bcurrent_gid\b *( *)/d
+s/\bcfs_curproc_euid\b/current_euid/g
+/#[ \t]*define[ \t]*\bcurrent_euid\b *( *)[ \t]*\bcurrent_euid\b *( *)/d
+s/\bcfs_curproc_egid\b/current_egid/g
+/#[ \t]*define[ \t]*\bcurrent_egid\b *( *)[ \t]*\bcurrent_egid\b *( *)/d
+s/\bcfs_curproc_fsuid\b/current_fsuid/g
+/#[ \t]*define[ \t]*\bcurrent_fsuid\b *( *)[ \t]*\bcurrent_fsuid\b *( *)/d
+s/\bcfs_curproc_fsgid\b/current_fsgid/g
+/#[ \t]*define[ \t]*\bcurrent_fsgid\b *( *)[ \t]*\bcurrent_fsgid\b *( *)/d
+s/\bcfs_curproc_pid\b/current_pid/g
+s/\bcfs_curproc_is_in_groups\b/in_group_p/g
+s/\bcfs_curproc_umask\b/current_umask/g
+s/\bcfs_curproc_comm\b/current_comm/g
+s/\bcfs_curproc_is_32bit\b/current_is_32bit/g
#ifndef __LIBCFS_CURPROC_H__
#define __LIBCFS_CURPROC_H__
-#ifdef __KERNEL__
-/*
- * Portable API to access common characteristics of "current" UNIX process.
- *
- * Implemented in portals/include/libcfs/<os>/
- */
-uid_t cfs_curproc_euid(void);
-gid_t cfs_curproc_egid(void);
-pid_t cfs_curproc_pid(void);
-int cfs_curproc_groups_nr(void);
-int cfs_curproc_is_in_groups(gid_t group);
-void cfs_curproc_groups_dump(gid_t *array, int size);
-mode_t cfs_curproc_umask(void);
-char *cfs_curproc_comm(void);
-
-/*
- * Plus, platform-specific constant
- *
- * CFS_CURPROC_COMM_MAX,
- *
- * and opaque scalar type
- *
- * kernel_cap_t
- */
-#endif
-uid_t cfs_curproc_uid(void);
-gid_t cfs_curproc_gid(void);
-uid_t cfs_curproc_fsuid(void);
-gid_t cfs_curproc_fsgid(void);
int cfs_get_environ(const char *key, char *value, int *val_len);
typedef __u32 cfs_cap_t;
#undef WITH_WATCHDOG
#endif
+/*
+ * Portable API to access common characteristics of "current" UNIX process.
+ */
+uid_t current_uid(void);
+gid_t current_gid(void);
+uid_t current_euid(void);
+gid_t current_egid(void);
+uid_t current_fsuid(void);
+gid_t current_fsgid(void);
+pid_t current_pid(void);
+int in_group_p(gid_t group);
+mode_t current_umask(void);
+char *current_comm(void);
+
+/* check if task is running in compat mode.*/
+int current_is_32bit(void);
+
#endif /* __KERNEL__ */
#endif /* _XNU_LIBCFS_H */
#define WITH_WATCHDOG
#endif
+/*
+ * Macros to access common characteristics of "current" UNIX process.
+ */
+#define current_pid() (current->pid)
+#define current_umask() (current->fs->umask)
+#define current_comm() (current->comm)
+
+/* check if task is running in compat mode.*/
+int current_is_32bit(void);
+
#endif /* _LINUX_LIBCFS_H */
#define kthread_run(f, a, n, ...) LBUG()
#endif
-uid_t cfs_curproc_uid(void);
-gid_t cfs_curproc_gid(void);
-uid_t cfs_curproc_fsuid(void);
-gid_t cfs_curproc_fsgid(void);
+uid_t current_uid(void);
+gid_t current_gid(void);
+uid_t current_fsuid(void);
+gid_t current_fsgid(void);
#ifndef HAVE_STRLCPY /* not in glibc for RHEL 5.x, remove when obsolete */
size_t strlcpy(char *tgt, const char *src, size_t tgt_len);
#define EXIT_NESTING do {} while (0)
#define __current_nesting_level() (0)
+/*
+ * Portable API to access common characteristics of "current" UNIX process.
+ */
+uid_t current_uid(void);
+gid_t current_gid(void);
+uid_t current_euid(void);
+gid_t current_egid(void);
+uid_t current_fsuid(void);
+gid_t current_fsgid(void);
+pid_t current_pid(void);
+int in_group_p(gid_t group);
+mode_t current_umask(void);
+char *current_comm(void);
+
+/* check if task is running in compat mode.*/
+int current_is_32bit(void);
+
#endif /* _WINNT_LIBCFS_H */
#endif
}
-uid_t cfs_curproc_uid(void)
+uid_t current_uid(void)
{
return curproc_ucred()->cr_uid;
}
-gid_t cfs_curproc_gid(void)
+gid_t current_gid(void)
{
LASSERT(curproc_ucred()->cr_ngroups > 0);
return curproc_ucred()->cr_groups[0];
}
-uid_t cfs_curproc_fsuid(void)
+uid_t current_fsuid(void)
{
#ifdef __DARWIN8__
return curproc_ucred()->cr_ruid;
#endif
}
-gid_t cfs_curproc_fsgid(void)
+gid_t current_fsgid(void)
{
#ifdef __DARWIN8__
return curproc_ucred()->cr_rgid;
#endif
}
-pid_t cfs_curproc_pid(void)
+pid_t current_pid(void)
{
#ifdef __DARWIN8__
/* no pid for each thread, return address of thread struct */
#endif
}
-int cfs_curproc_groups_nr(void)
-{
- LASSERT(curproc_ucred()->cr_ngroups > 0);
- return curproc_ucred()->cr_ngroups - 1;
-}
-
-int cfs_curproc_is_in_groups(gid_t gid)
+int in_group_p(gid_t gid)
{
int i;
struct ucred *cr;
return 0;
}
-void cfs_curproc_groups_dump(gid_t *array, int size)
-{
- struct ucred *cr;
-
- cr = curproc_ucred();
- LASSERT(cr != NULL);
- CLASSERT(sizeof array[0] == sizeof (__u32));
-
- size = min_t(int, size, cr->cr_ngroups);
- memcpy(array, &cr->cr_groups[1], size * sizeof(gid_t));
-}
-
-mode_t cfs_curproc_umask(void)
+mode_t current_umask(void)
{
#ifdef __DARWIN8__
/*
#endif
}
-char *cfs_curproc_comm(void)
+char *current_comm(void)
{
#ifdef __DARWIN8__
/*
{
libcfs_catastrophe = 1;
CEMERG("LBUG: pid: %u thread: %#x\n",
- (unsigned)cfs_curproc_pid(), (unsigned)current_thread());
+ (unsigned)current_pid(), (unsigned)current_thread());
libcfs_debug_dumplog();
libcfs_run_lbug_upcall(msgdata);
while (1)
if (th != THREAD_NULL) {
/*
* FIXME: change child thread name...
- * cfs_curproc_comm() is already broken. So it is left as is...
+ * current_comm() is already broken. So it is left as is...
va_list args;
va_start(args, namefmt);
- snprintf(cfs_curproc_comm(), CFS_CURPROC_COMM_MAX,
+ snprintf(current_comm(), CFS_CURPROC_COMM_MAX,
namefmt, args);
va_end(args);
*/
header->ph_sec = (__u32)tv.tv_sec;
header->ph_usec = tv.tv_usec;
header->ph_stack = stack;
- header->ph_pid = cfs_curproc_pid();
+ header->ph_pid = current_pid();
header->ph_line_num = line;
header->ph_extern_pid = (__u32)current_thread();
}
cfs_waitq_add(&debug_ctlwq, &wait);
dumper = kthread_run(libcfs_debug_dumplog_thread,
- (void *)(long)cfs_curproc_pid(),
+ (void *)(long)current_pid(),
"libcfs_debug_dumper");
if (IS_ERR(dumper))
printk(CFS_KERN_ERR "LustreError: cannot start log dump thread:"
* for Linux kernel.
*/
-uid_t cfs_curproc_uid(void)
-{
- return current_uid();
-}
-
-gid_t cfs_curproc_gid(void)
-{
- return current_gid();
-}
-
-uid_t cfs_curproc_fsuid(void)
-{
- return current_fsuid();
-}
-
-uid_t cfs_curproc_euid(void)
-{
- return current_euid();
-}
-
-uid_t cfs_curproc_egid(void)
-{
- return current_egid();
-}
-
-gid_t cfs_curproc_fsgid(void)
-{
- return current_fsgid();
-}
-
-pid_t cfs_curproc_pid(void)
-{
- return current->pid;
-}
-
-int cfs_curproc_groups_nr(void)
-{
- int nr;
-
- task_lock(current);
- nr = current_cred()->group_info->ngroups;
- task_unlock(current);
- return nr;
-}
-
-void cfs_curproc_groups_dump(gid_t *array, int size)
-{
- task_lock(current);
- size = min_t(int, size, current_cred()->group_info->ngroups);
- memcpy(array, current_cred()->group_info->blocks[0], size * sizeof(__u32));
- task_unlock(current);
-}
-
-
-int cfs_curproc_is_in_groups(gid_t gid)
-{
- return in_group_p(gid);
-}
-
-mode_t cfs_curproc_umask(void)
-{
- return current->fs->umask;
-}
-
-char *cfs_curproc_comm(void)
-{
- return current->comm;
-}
-
/* Currently all the CFS_CAP_* defines match CAP_* ones. */
#define cfs_cap_pack(cap) (cap)
#define cfs_cap_unpack(cap) (cap)
}
EXPORT_SYMBOL(cfs_get_environ);
-EXPORT_SYMBOL(cfs_curproc_uid);
-EXPORT_SYMBOL(cfs_curproc_pid);
-EXPORT_SYMBOL(cfs_curproc_euid);
-EXPORT_SYMBOL(cfs_curproc_gid);
-EXPORT_SYMBOL(cfs_curproc_egid);
-EXPORT_SYMBOL(cfs_curproc_fsuid);
-EXPORT_SYMBOL(cfs_curproc_fsgid);
-EXPORT_SYMBOL(cfs_curproc_umask);
-EXPORT_SYMBOL(cfs_curproc_comm);
-EXPORT_SYMBOL(cfs_curproc_groups_nr);
-EXPORT_SYMBOL(cfs_curproc_groups_dump);
-EXPORT_SYMBOL(cfs_curproc_is_in_groups);
EXPORT_SYMBOL(cfs_cap_raise);
EXPORT_SYMBOL(cfs_cap_lower);
EXPORT_SYMBOL(cfs_cap_raised);
while (kernel_locked())
unlock_kernel();
# endif
- libcfs_debug_dumplog_internal((void *)(long)cfs_curproc_pid());
+ libcfs_debug_dumplog_internal((void *)(long)current_pid());
}
#endif
return 0;
}
#endif
-uid_t cfs_curproc_uid(void)
+uid_t current_uid(void)
{
return getuid();
}
-gid_t cfs_curproc_gid(void)
+gid_t current_gid(void)
{
return getgid();
}
-uid_t cfs_curproc_fsuid(void)
+uid_t current_fsuid(void)
{
return getuid();
}
-gid_t cfs_curproc_fsgid(void)
+gid_t current_fsgid(void)
{
return getgid();
}
}
spin_lock_init(&lcw->lcw_lock);
- lcw->lcw_refcount = 1; /* refcount for owner */
- lcw->lcw_task = cfs_current();
- lcw->lcw_pid = cfs_curproc_pid();
- lcw->lcw_callback = (callback != NULL) ? callback : lc_watchdog_dumplog;
- lcw->lcw_data = data;
- lcw->lcw_state = LC_WATCHDOG_DISABLED;
+ lcw->lcw_refcount = 1; /* refcount for owner */
+ lcw->lcw_task = cfs_current();
+ lcw->lcw_pid = current_pid();
+ lcw->lcw_callback = (callback != NULL) ? callback : lc_watchdog_dumplog;
+ lcw->lcw_data = data;
+ lcw->lcw_state = LC_WATCHDOG_DISABLED;
CFS_INIT_LIST_HEAD(&lcw->lcw_list);
cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw);
};
-uid_t cfs_curproc_uid(void)
+uid_t current_uid(void)
{
return this_task.uid;
}
-gid_t cfs_curproc_gid(void)
+gid_t current_gid(void)
{
return this_task.gid;
}
-uid_t cfs_curproc_fsuid(void)
+uid_t current_fsuid(void)
{
return this_task.fsuid;
}
-gid_t cfs_curproc_fsgid(void)
+gid_t current_fsgid(void)
{
return this_task.fsgid;
}
-pid_t cfs_curproc_pid(void)
+pid_t current_pid(void)
{
return cfs_current()->pid;
}
-int cfs_curproc_groups_nr(void)
-{
- return this_task.ngroups;
-}
-
-void cfs_curproc_groups_dump(gid_t *array, int size)
-{
- LASSERT(size <= NGROUPS);
- size = min_t(int, size, this_task.ngroups);
- memcpy(array, this_task.groups, size * sizeof(__u32));
-}
-
-int cfs_curproc_is_in_groups(gid_t gid)
-{
- return in_group_p(gid);
-}
-
-mode_t cfs_curproc_umask(void)
+mode_t current_umask(void)
{
return this_task.umask;
}
-char *cfs_curproc_comm(void)
+char *current_comm(void)
{
return this_task.comm;
}
cfs_waitq_del(waitq, &link);
}
-EXPORT_SYMBOL(cfs_curproc_uid);
-EXPORT_SYMBOL(cfs_curproc_pid);
-EXPORT_SYMBOL(cfs_curproc_gid);
-EXPORT_SYMBOL(cfs_curproc_fsuid);
-EXPORT_SYMBOL(cfs_curproc_fsgid);
-EXPORT_SYMBOL(cfs_curproc_umask);
-EXPORT_SYMBOL(cfs_curproc_comm);
-EXPORT_SYMBOL(cfs_curproc_groups_nr);
-EXPORT_SYMBOL(cfs_curproc_groups_dump);
-EXPORT_SYMBOL(cfs_curproc_is_in_groups);
+EXPORT_SYMBOL(current_uid);
+EXPORT_SYMBOL(current_pid);
+EXPORT_SYMBOL(current_gid);
+EXPORT_SYMBOL(current_fsuid);
+EXPORT_SYMBOL(current_fsgid);
+EXPORT_SYMBOL(current_umask);
+EXPORT_SYMBOL(current_comm);
EXPORT_SYMBOL(cfs_cap_raise);
EXPORT_SYMBOL(cfs_cap_lower);
EXPORT_SYMBOL(cfs_cap_raised);
void lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
{
- libcfs_catastrophe = 1;
- CEMERG("LBUG: pid: %u thread: %#x\n",
- cfs_curproc_pid(), PsGetCurrentThread());
- cfs_enter_debugger();
- libcfs_debug_dumplog();
- libcfs_run_lbug_upcall(msgdata);
+ libcfs_catastrophe = 1;
+ CEMERG("LBUG: pid: %u thread: %#x\n",
+ current_pid(), PsGetCurrentThread());
+ cfs_enter_debugger();
+ libcfs_debug_dumplog();
+ libcfs_run_lbug_upcall(msgdata);
}
void cfs_enter_debugger(void)
the_lnet.ln_pid = requested_pid;
#else
if (the_lnet.ln_server_mode_flag) {/* server case (uOSS) */
- LASSERT ((requested_pid & LNET_PID_USERFLAG) == 0);
+ LASSERT ((requested_pid & LNET_PID_USERFLAG) == 0);
- if (cfs_curproc_uid())/* Only root can run user-space server */
- return -EPERM;
- the_lnet.ln_pid = requested_pid;
+ if (current_uid() != 0) /* Only root can run user-space server */
+ return -EPERM;
+ the_lnet.ln_pid = requested_pid;
} else {/* client case (liblustre) */
typedef struct task_struct cfs_task_t;
#define cfs_current() current
-#define cfs_curproc_pid() (current->pid)
-#define cfs_curproc_comm() (current->comm)
-#define cfs_curproc_fsuid() (current->fsuid)
-#define cfs_curproc_fsgid() (current->fsgid)
-#define cfs_curproc_umask() ({ mode_t mask = umask(0); umask(mask); mask; })
+#define current_pid() (current->pid)
+#define current_comm() (current->comm)
+#define current_fsuid() (current->fsuid)
+#define current_fsgid() (current->fsgid)
+#define current_umask() ({ mode_t mask = umask(0); umask(mask); mask; })
extern struct task_struct *current;
-int cfs_curproc_is_in_groups(gid_t gid);
+int in_group_p(gid_t gid);
#define cfs_set_current_state(foo) do { current->state = foo; } while (0)
thread->t_data = NULL;
thread->t_watchdog = NULL;
- CDEBUG(D_HA, "%s: started recovery thread pid %d\n", obd->obd_name,
- cfs_curproc_pid());
- trd->trd_processing_task = cfs_curproc_pid();
+ CDEBUG(D_HA, "%s: started recovery thread pid %d\n", obd->obd_name,
+ current_pid());
+ trd->trd_processing_task = current_pid();
spin_lock(&obd->obd_dev_lock);
obd->obd_recovering = 1;
CDEBUG(D_INFO, "1: request replay stage - %d clients from t"LPU64"\n",
cfs_atomic_read(&obd->obd_req_replay_clients),
obd->obd_next_recovery_transno);
- while ((req = target_next_replay_req(obd))) {
- LASSERT(trd->trd_processing_task == cfs_curproc_pid());
- DEBUG_REQ(D_HA, req, "processing t"LPD64" from %s",
- lustre_msg_get_transno(req->rq_reqmsg),
- libcfs_nid2str(req->rq_peer.nid));
+ while ((req = target_next_replay_req(obd))) {
+ LASSERT(trd->trd_processing_task == current_pid());
+ DEBUG_REQ(D_HA, req, "processing t"LPD64" from %s",
+ lustre_msg_get_transno(req->rq_reqmsg),
+ libcfs_nid2str(req->rq_peer.nid));
handle_recovery_req(thread, req,
trd->trd_recovery_handler);
/**
*/
CDEBUG(D_INFO, "2: lock replay stage - %d clients\n",
cfs_atomic_read(&obd->obd_lock_replay_clients));
- while ((req = target_next_replay_lock(obd))) {
- LASSERT(trd->trd_processing_task == cfs_curproc_pid());
- DEBUG_REQ(D_HA, req, "processing lock from %s: ",
- libcfs_nid2str(req->rq_peer.nid));
+ while ((req = target_next_replay_lock(obd))) {
+ LASSERT(trd->trd_processing_task == current_pid());
+ DEBUG_REQ(D_HA, req, "processing lock from %s: ",
+ libcfs_nid2str(req->rq_peer.nid));
handle_recovery_req(thread, req,
trd->trd_recovery_handler);
target_request_copy_put(req);
spin_lock(&obd->obd_recovery_task_lock);
target_cancel_recovery_timer(obd);
spin_unlock(&obd->obd_recovery_task_lock);
- while ((req = target_next_final_ping(obd))) {
- LASSERT(trd->trd_processing_task == cfs_curproc_pid());
- DEBUG_REQ(D_HA, req, "processing final ping from %s: ",
- libcfs_nid2str(req->rq_peer.nid));
+ while ((req = target_next_final_ping(obd))) {
+ LASSERT(trd->trd_processing_task == current_pid());
+ DEBUG_REQ(D_HA, req, "processing final ping from %s: ",
+ libcfs_nid2str(req->rq_peer.nid));
handle_recovery_req(thread, req,
trd->trd_recovery_handler);
target_request_copy_put(req);
__u64 transno = lustre_msg_get_transno(req->rq_reqmsg);
ENTRY;
- if (obd->obd_recovery_data.trd_processing_task == cfs_curproc_pid()) {
- /* Processing the queue right now, don't re-add. */
- RETURN(1);
- }
+ if (obd->obd_recovery_data.trd_processing_task == current_pid()) {
+ /* Processing the queue right now, don't re-add. */
+ RETURN(1);
+ }
target_process_req_flags(obd, req);
if (lock == NULL)
RETURN(NULL);
- lock->l_req_mode = mode;
- lock->l_ast_data = data;
- lock->l_pid = cfs_curproc_pid();
+ lock->l_req_mode = mode;
+ lock->l_ast_data = data;
+ lock->l_pid = current_pid();
if (ns_is_server(ns))
lock->l_flags |= LDLM_FL_NS_SRV;
if (cbs) {
cfs_waitq_signal(&thread->t_ctl_waitq);
CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
- "ldlm_poold", cfs_curproc_pid());
+ "ldlm_poold", current_pid());
while (1) {
struct l_wait_info lwi;
cfs_waitq_signal(&thread->t_ctl_waitq);
CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
- "ldlm_poold", cfs_curproc_pid());
+ "ldlm_poold", current_pid());
complete_and_exit(&ldlm_pools_comp, 0);
}
lfsck->li_pos_current.lp_oit_cookie,
lfsck->li_pos_current.lp_dir_cookie,
PFID(&lfsck->li_pos_current.lp_dir_parent),
- cfs_curproc_pid());
+ current_pid());
spin_lock(&lfsck->li_lock);
thread_set_flags(thread, SVC_RUNNING);
lfsck->li_pos_current.lp_oit_cookie,
lfsck->li_pos_current.lp_dir_cookie,
PFID(&lfsck->li_pos_current.lp_dir_parent),
- cfs_curproc_pid(), rc);
+ current_pid(), rc);
if (!OBD_FAIL_CHECK(OBD_FAIL_LFSCK_CRASH))
rc = lfsck_post(&env, lfsck, rc);
* array in case it might be useful. Not needed if doing an MDS-side upcall. */
void ll_i2gids(__u32 *suppgids, struct inode *i1, struct inode *i2)
{
- LASSERT(i1 != NULL);
- LASSERT(suppgids != NULL);
-
- if (cfs_curproc_is_in_groups(i1->i_stbuf.st_gid))
- suppgids[0] = i1->i_stbuf.st_gid;
- else
- suppgids[0] = -1;
-
- if (i2) {
- if (cfs_curproc_is_in_groups(i2->i_stbuf.st_gid))
- suppgids[1] = i2->i_stbuf.st_gid;
- else
- suppgids[1] = -1;
- } else {
- suppgids[1] = -1;
- }
+ LASSERT(i1 != NULL);
+ LASSERT(suppgids != NULL);
+
+ if (in_group_p(i1->i_stbuf.st_gid))
+ suppgids[0] = i1->i_stbuf.st_gid;
+ else
+ suppgids[0] = -1;
+
+ if (i2) {
+ if (in_group_p(i2->i_stbuf.st_gid))
+ suppgids[1] = i2->i_stbuf.st_gid;
+ else
+ suppgids[1] = -1;
+ } else {
+ suppgids[1] = -1;
+ }
}
void llu_prep_md_op_data(struct md_op_data *op_data, struct inode *i1,
#endif
}
-int cfs_curproc_is_in_groups(gid_t gid)
+int in_group_p(gid_t gid)
{
int i;
struct intnl_stat *st = llu_i2stat(inode);
mode_t mode = st->st_mode;
- if (current->fsuid == st->st_uid)
- mode >>= 6;
- else if (cfs_curproc_is_in_groups(st->st_gid))
- mode >>= 3;
+ if (current->fsuid == st->st_uid)
+ mode >>= 6;
+ else if (in_group_p(st->st_gid))
+ mode >>= 3;
if ((mode & mask & (MAY_READ|MAY_WRITE|MAY_EXEC)) == mask)
return 0;
st->st_mtime = attr->ia_mtime;
if (ia_valid & ATTR_CTIME)
st->st_ctime = attr->ia_ctime;
- if (ia_valid & ATTR_MODE) {
- st->st_mode = attr->ia_mode;
- if (!cfs_curproc_is_in_groups(st->st_gid) &&
- !cfs_capable(CFS_CAP_FSETID))
- st->st_mode &= ~S_ISGID;
- }
+ if (ia_valid & ATTR_MODE) {
+ st->st_mode = attr->ia_mode;
+ if (!in_group_p(st->st_gid) &&
+ !cfs_capable(CFS_CAP_FSETID))
+ st->st_mode &= ~S_ISGID;
+ }
/* mark_inode_dirty(inode); */
return error;
}
RETURN(PTR_ERR(op_data));
if (!IS_POSIXACL(parent) || !exp_connect_umask(exp))
- it->it_create_mode &= ~cfs_curproc_umask();
+ it->it_create_mode &= ~current_umask();
it->it_create_mode |= M_CHECK_STALE;
rc = md_intent_lock(exp, op_data, NULL, 0, it,
lookup_flags,
op_data->op_cli_flags |= CLI_SET_MEA;
err = md_create(sbi->ll_md_exp, op_data, lump, sizeof(*lump), mode,
- cfs_curproc_fsuid(), cfs_curproc_fsgid(),
+ current_fsuid(), current_fsgid(),
cfs_curproc_cap_pack(), 0, &request);
ll_finish_md_op_data(op_data);
if (err)
sbi->ll_flags & LL_SBI_RMT_CLIENT)
RETURN(-EPERM);
break;
- case Q_GETQUOTA:
- if (((type == USRQUOTA && cfs_curproc_euid() != id) ||
- (type == GRPQUOTA && !in_egroup_p(id))) &&
- (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
- sbi->ll_flags & LL_SBI_RMT_CLIENT))
- RETURN(-EPERM);
+ case Q_GETQUOTA:
+ if (((type == USRQUOTA && current_euid() != id) ||
+ (type == GRPQUOTA && !in_egroup_p(id))) &&
+ (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
+ sbi->ll_flags & LL_SBI_RMT_CLIENT))
+ RETURN(-EPERM);
break;
case Q_GETINFO:
break;
inode == inode->i_sb->s_root->d_inode) {
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- LASSERT(fd != NULL);
- rc = rct_add(&sbi->ll_rct, cfs_curproc_pid(), arg);
- if (!rc)
- fd->fd_flags |= LL_FILE_RMTACL;
- RETURN(rc);
+ LASSERT(fd != NULL);
+ rc = rct_add(&sbi->ll_rct, current_pid(), arg);
+ if (!rc)
+ fd->fd_flags |= LL_FILE_RMTACL;
+ RETURN(rc);
} else
RETURN(0);
}
inode->i_generation, inode);
#ifdef CONFIG_FS_POSIX_ACL
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
- inode == inode->i_sb->s_root->d_inode) {
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
-
- LASSERT(fd != NULL);
- if (unlikely(fd->fd_flags & LL_FILE_RMTACL)) {
- fd->fd_flags &= ~LL_FILE_RMTACL;
- rct_del(&sbi->ll_rct, cfs_curproc_pid());
- et_search_free(&sbi->ll_et, cfs_curproc_pid());
- }
- }
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
+ inode == inode->i_sb->s_root->d_inode) {
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+
+ LASSERT(fd != NULL);
+ if (unlikely(fd->fd_flags & LL_FILE_RMTACL)) {
+ fd->fd_flags &= ~LL_FILE_RMTACL;
+ rct_del(&sbi->ll_rct, current_pid());
+ et_search_free(&sbi->ll_et, current_pid());
+ }
+ }
#endif
if (inode->i_sb->s_root != file->f_dentry)
if (lli->lli_opendir_key == NULL && lli->lli_sai == NULL &&
lli->lli_opendir_pid == 0) {
lli->lli_opendir_key = fd;
- lli->lli_opendir_pid = cfs_curproc_pid();
+ lli->lli_opendir_pid = current_pid();
opendir_set = 1;
}
spin_unlock(&lli->lli_sa_lock);
static inline void
ll_statahead_mark(struct inode *dir, struct dentry *dentry)
{
- struct ll_inode_info *lli = ll_i2info(dir);
- struct ll_statahead_info *sai = lli->lli_sai;
- struct ll_dentry_data *ldd = ll_d2d(dentry);
+ struct ll_inode_info *lli = ll_i2info(dir);
+ struct ll_statahead_info *sai = lli->lli_sai;
+ struct ll_dentry_data *ldd = ll_d2d(dentry);
- /* not the same process, don't mark */
- if (lli->lli_opendir_pid != cfs_curproc_pid())
- return;
+ /* not the same process, don't mark */
+ if (lli->lli_opendir_pid != current_pid())
+ return;
LASSERT(ldd != NULL);
if (sai != NULL)
lli = ll_i2info(dir);
/* not the same process, don't statahead */
- if (lli->lli_opendir_pid != cfs_curproc_pid())
+ if (lli->lli_opendir_pid != current_pid())
return -EAGAIN;
/* statahead has been stopped */
attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
}
- /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
+ /* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
if (attr->ia_valid & TIMES_SET_FLAGS) {
- if (cfs_curproc_fsuid() != inode->i_uid &&
- !cfs_capable(CFS_CAP_FOWNER))
- RETURN(-EPERM);
- }
+ if (current_fsuid() != inode->i_uid &&
+ !cfs_capable(CFS_CAP_FOWNER))
+ RETURN(-EPERM);
+ }
/* We mark all of the fields "set" so MDS/OST does not re-set them */
if (attr->ia_valid & ATTR_CTIME) {
int ll_flush_ctx(struct inode *inode)
{
- struct ll_sb_info *sbi = ll_i2sbi(inode);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
- CDEBUG(D_SEC, "flush context for user %d\n", cfs_curproc_uid());
+ CDEBUG(D_SEC, "flush context for user %d\n", current_uid());
- obd_set_info_async(NULL, sbi->ll_md_exp,
- sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
- 0, NULL, NULL);
- obd_set_info_async(NULL, sbi->ll_dt_exp,
- sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
- 0, NULL, NULL);
- return 0;
+ obd_set_info_async(NULL, sbi->ll_md_exp,
+ sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
+ 0, NULL, NULL);
+ obd_set_info_async(NULL, sbi->ll_dt_exp,
+ sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
+ 0, NULL, NULL);
+ return 0;
}
/* umount -f client means force down, don't save state */
op_data->op_namelen = namelen;
op_data->op_mode = mode;
op_data->op_mod_time = cfs_time_current_sec();
- op_data->op_fsuid = cfs_curproc_fsuid();
- op_data->op_fsgid = cfs_curproc_fsgid();
+ op_data->op_fsuid = current_fsuid();
+ op_data->op_fsgid = current_fsgid();
op_data->op_cap = cfs_curproc_cap_pack();
op_data->op_bias = 0;
op_data->op_cli_flags = 0;
else if (sbi->ll_stats_track_type == STATS_TRACK_PPID &&
sbi->ll_stats_track_id == current->parent->pid)
lprocfs_counter_add(sbi->ll_stats, op, count);
- else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
- sbi->ll_stats_track_id == cfs_curproc_gid())
- lprocfs_counter_add(sbi->ll_stats, op, count);
+ else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
+ sbi->ll_stats_track_id == current_gid())
+ lprocfs_counter_add(sbi->ll_stats, op, count);
}
EXPORT_SYMBOL(ll_stats_ops_tally);
__u32 ll_i2suppgid(struct inode *i)
{
- if (cfs_curproc_is_in_groups(i->i_gid))
- return (__u32)i->i_gid;
- else
- return (__u32)(-1);
+ if (in_group_p(i->i_gid))
+ return (__u32)i->i_gid;
+ else
+ return (__u32)(-1);
}
/* Pack the required supplementary groups into the supplied groups array.
/* enforce umask if acl disabled or MDS doesn't support umask */
if (!IS_POSIXACL(parent) || !exp_connect_umask(ll_i2mdexp(parent)))
- it->it_create_mode &= ~cfs_curproc_umask();
+ it->it_create_mode &= ~current_umask();
rc = md_intent_lock(ll_i2mdexp(parent), op_data, NULL, 0, it,
lookup_flags, &req, ll_md_blocking_ast, 0);
if (IS_ERR(op_data))
GOTO(err_exit, err = PTR_ERR(op_data));
- err = md_create(sbi->ll_md_exp, op_data, tgt, tgt_len, mode,
- cfs_curproc_fsuid(), cfs_curproc_fsgid(),
- cfs_curproc_cap_pack(), rdev, &request);
- ll_finish_md_op_data(op_data);
+ err = md_create(sbi->ll_md_exp, op_data, tgt, tgt_len, mode,
+ current_fsuid(), current_fsgid(),
+ cfs_curproc_cap_pack(), rdev, &request);
+ ll_finish_md_op_data(op_data);
if (err)
GOTO(err_exit, err);
name->len, name->name, dir->i_ino, dir->i_generation, dir,
mode, rdev);
- if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
- mode &= ~cfs_curproc_umask();
+ if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
+ mode &= ~current_umask();
switch (mode & S_IFMT) {
case 0:
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p)\n",
name->len, name->name, dir->i_ino, dir->i_generation, dir);
- if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
- mode &= ~cfs_curproc_umask();
- mode = (mode & (S_IRWXUGO|S_ISVTX)) | S_IFDIR;
- err = ll_new_node(dir, name, NULL, mode, 0, dchild, LUSTRE_OPC_MKDIR);
+ if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
+ mode &= ~current_umask();
+ mode = (mode & (S_IRWXUGO|S_ISVTX)) | S_IFDIR;
+ err = ll_new_node(dir, name, NULL, mode, 0, dchild, LUSTRE_OPC_MKDIR);
if (!err)
ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_MKDIR, 1);
int found = 0, rc;
ENTRY;
- if (!lli->lli_remote_perms)
- RETURN(-ENOENT);
+ if (!lli->lli_remote_perms)
+ RETURN(-ENOENT);
- head = lli->lli_remote_perms + remote_perm_hashfunc(cfs_curproc_uid());
+ head = lli->lli_remote_perms + remote_perm_hashfunc(current_uid());
spin_lock(&lli->lli_lock);
- cfs_hlist_for_each_entry(lrp, node, head, lrp_list) {
- if (lrp->lrp_uid != cfs_curproc_uid())
- continue;
- if (lrp->lrp_gid != cfs_curproc_gid())
- continue;
- if (lrp->lrp_fsuid != cfs_curproc_fsuid())
- continue;
- if (lrp->lrp_fsgid != cfs_curproc_fsgid())
- continue;
- found = 1;
- break;
- }
+ cfs_hlist_for_each_entry(lrp, node, head, lrp_list) {
+ if (lrp->lrp_uid != current_uid())
+ continue;
+ if (lrp->lrp_gid != current_gid())
+ continue;
+ if (lrp->lrp_fsuid != current_fsuid())
+ continue;
+ if (lrp->lrp_fsgid != current_fsgid())
+ continue;
+ found = 1;
+ break;
+ }
if (!found)
GOTO(out, rc = -ENOENT);
struct l_wait_info lwi = { 0 };
ENTRY;
- CDEBUG(D_READA, "agl thread started: [pid %d] [parent %.*s]\n",
- cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
+ CDEBUG(D_READA, "agl thread started: [pid %d] [parent %.*s]\n",
+ current_pid(), parent->d_name.len, parent->d_name.name);
- atomic_inc(&sbi->ll_agl_total);
+ atomic_inc(&sbi->ll_agl_total);
spin_lock(&plli->lli_agl_lock);
sai->sai_agl_valid = 1;
thread_set_flags(thread, SVC_RUNNING);
spin_unlock(&plli->lli_agl_lock);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ cfs_waitq_signal(&thread->t_ctl_waitq);
while (1) {
l_wait_event(thread->t_ctl_waitq,
cfs_waitq_signal(&thread->t_ctl_waitq);
ll_sai_put(sai);
CDEBUG(D_READA, "agl thread stopped: [pid %d] [parent %.*s]\n",
- cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
+ current_pid(), parent->d_name.len, parent->d_name.name);
RETURN(0);
}
ENTRY;
CDEBUG(D_READA, "start agl thread: [pid %d] [parent %.*s]\n",
- cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
+ current_pid(), parent->d_name.len, parent->d_name.name);
plli = ll_i2info(parent->d_inode);
task = kthread_run(ll_agl_thread, parent,
struct l_wait_info lwi = { 0 };
ENTRY;
- CDEBUG(D_READA, "statahead thread started: [pid %d] [parent %.*s]\n",
- cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
+ CDEBUG(D_READA, "statahead thread started: [pid %d] [parent %.*s]\n",
+ current_pid(), parent->d_name.len, parent->d_name.name);
- if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
- ll_start_agl(parent, sai);
+ if (sbi->ll_flags & LL_SBI_AGL_ENABLED)
+ ll_start_agl(parent, sai);
- atomic_inc(&sbi->ll_sa_total);
+ atomic_inc(&sbi->ll_sa_total);
spin_lock(&plli->lli_sa_lock);
thread_set_flags(thread, SVC_RUNNING);
spin_unlock(&plli->lli_sa_lock);
spin_unlock(&plli->lli_agl_lock);
cfs_waitq_signal(&agl_thread->t_ctl_waitq);
- CDEBUG(D_READA, "stop agl thread: [pid %d]\n",
- cfs_curproc_pid());
- l_wait_event(agl_thread->t_ctl_waitq,
- thread_is_stopped(agl_thread),
- &lwi);
+ CDEBUG(D_READA, "stop agl thread: [pid %d]\n",
+ current_pid());
+ l_wait_event(agl_thread->t_ctl_waitq,
+ thread_is_stopped(agl_thread),
+ &lwi);
} else {
/* Set agl_thread flags anyway. */
thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
cfs_waitq_signal(&thread->t_ctl_waitq);
ll_sai_put(sai);
dput(parent);
- CDEBUG(D_READA, "statahead thread stopped: [pid %d] [parent %.*s]\n",
- cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
- return rc;
+ CDEBUG(D_READA, "statahead thread stopped: [pid %d] [parent %.*s]\n",
+ current_pid(), parent->d_name.len, parent->d_name.name);
+ return rc;
}
/**
cfs_waitq_signal(&thread->t_ctl_waitq);
CDEBUG(D_READA, "stop statahead thread: [pid %d]\n",
- cfs_curproc_pid());
+ current_pid());
l_wait_event(thread->t_ctl_waitq,
thread_is_stopped(thread),
&lwi);
sai->sai_consecutive_miss++;
if (sa_low_hit(sai) && thread_is_running(thread)) {
atomic_inc(&sbi->ll_sa_wrong);
- CDEBUG(D_READA, "Statahead for dir "DFID" hit "
- "ratio too low: hit/miss "LPU64"/"LPU64
- ", sent/replied "LPU64"/"LPU64", stopping "
- "statahead thread: pid %d\n",
- PFID(&lli->lli_fid), sai->sai_hit,
- sai->sai_miss, sai->sai_sent,
- sai->sai_replied, cfs_curproc_pid());
+ CDEBUG(D_READA, "Statahead for dir "DFID" hit "
+ "ratio too low: hit/miss "LPU64"/"LPU64
+ ", sent/replied "LPU64"/"LPU64", stopping "
+ "statahead thread: pid %d\n",
+ PFID(&lli->lli_fid), sai->sai_hit,
+ sai->sai_miss, sai->sai_sent,
+ sai->sai_replied, current_pid());
spin_lock(&lli->lli_sa_lock);
if (!thread_is_stopped(thread))
thread_set_flags(thread, SVC_STOPPING);
struct ll_inode_info *plli;
ENTRY;
- LASSERT(lli->lli_opendir_pid == cfs_curproc_pid());
+ LASSERT(lli->lli_opendir_pid == current_pid());
if (sai) {
thread = &sai->sai_thread;
GOTO(out, rc = -EAGAIN);
}
- CDEBUG(D_READA, "start statahead thread: [pid %d] [parent %.*s]\n",
- cfs_curproc_pid(), parent->d_name.len, parent->d_name.name);
+ CDEBUG(D_READA, "start statahead thread: [pid %d] [parent %.*s]\n",
+ current_pid(), parent->d_name.len, parent->d_name.name);
lli->lli_sai = sai;
RETURN(-EOPNOTSUPP);
#ifdef CONFIG_FS_POSIX_ACL
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
- (xattr_type == XATTR_ACL_ACCESS_T ||
- xattr_type == XATTR_ACL_DEFAULT_T)) {
- rce = rct_search(&sbi->ll_rct, cfs_curproc_pid());
- if (rce == NULL ||
- (rce->rce_ops != RMT_LSETFACL &&
- rce->rce_ops != RMT_RSETFACL))
- RETURN(-EOPNOTSUPP);
-
- if (rce->rce_ops == RMT_LSETFACL) {
- struct eacl_entry *ee;
-
- ee = et_search_del(&sbi->ll_et, cfs_curproc_pid(),
- ll_inode2fid(inode), xattr_type);
- LASSERT(ee != NULL);
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
+ (xattr_type == XATTR_ACL_ACCESS_T ||
+ xattr_type == XATTR_ACL_DEFAULT_T)) {
+ rce = rct_search(&sbi->ll_rct, current_pid());
+ if (rce == NULL ||
+ (rce->rce_ops != RMT_LSETFACL &&
+ rce->rce_ops != RMT_RSETFACL))
+ RETURN(-EOPNOTSUPP);
+
+ if (rce->rce_ops == RMT_LSETFACL) {
+ struct eacl_entry *ee;
+
+ ee = et_search_del(&sbi->ll_et, current_pid(),
+ ll_inode2fid(inode), xattr_type);
+ LASSERT(ee != NULL);
if (valid & OBD_MD_FLXATTR) {
acl = lustre_acl_xattr_merge2ext(
(posix_acl_xattr_header *)value,
RETURN(-EOPNOTSUPP);
#ifdef CONFIG_FS_POSIX_ACL
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
- (xattr_type == XATTR_ACL_ACCESS_T ||
- xattr_type == XATTR_ACL_DEFAULT_T)) {
- rce = rct_search(&sbi->ll_rct, cfs_curproc_pid());
- if (rce == NULL ||
- (rce->rce_ops != RMT_LSETFACL &&
- rce->rce_ops != RMT_LGETFACL &&
- rce->rce_ops != RMT_RSETFACL &&
- rce->rce_ops != RMT_RGETFACL))
- RETURN(-EOPNOTSUPP);
- }
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
+ (xattr_type == XATTR_ACL_ACCESS_T ||
+ xattr_type == XATTR_ACL_DEFAULT_T)) {
+ rce = rct_search(&sbi->ll_rct, current_pid());
+ if (rce == NULL ||
+ (rce->rce_ops != RMT_LSETFACL &&
+ rce->rce_ops != RMT_LGETFACL &&
+ rce->rce_ops != RMT_RSETFACL &&
+ rce->rce_ops != RMT_RGETFACL))
+ RETURN(-EOPNOTSUPP);
+ }
/* posix acl is under protection of LOOKUP lock. when calling to this,
* we just have path resolution to the target inode, so we have great
if (IS_ERR(acl))
GOTO(out, rc = PTR_ERR(acl));
- rc = ee_add(&sbi->ll_et, cfs_curproc_pid(), ll_inode2fid(inode),
- xattr_type, acl);
+ rc = ee_add(&sbi->ll_et, current_pid(), ll_inode2fid(inode),
+ xattr_type, acl);
if (unlikely(rc < 0)) {
lustre_ext_acl_xattr_free(acl);
GOTO(out, rc);
PFID(&op_data->op_fid2), op_data->op_namelen,
op_data->op_name, PFID(&op_data->op_fid1));
- op_data->op_fsuid = cfs_curproc_fsuid();
- op_data->op_fsgid = cfs_curproc_fsgid();
+ op_data->op_fsuid = current_fsuid();
+ op_data->op_fsgid = current_fsgid();
op_data->op_cap = cfs_curproc_cap_pack();
tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid2);
if (IS_ERR(tgt))
if (rc)
RETURN(rc);
- op_data->op_fsuid = cfs_curproc_fsuid();
- op_data->op_fsgid = cfs_curproc_fsgid();
+ op_data->op_fsuid = current_fsuid();
+ op_data->op_fsgid = current_fsgid();
op_data->op_cap = cfs_curproc_cap_pack();
src_tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
if (IS_ERR(src_tgt))
if (IS_ERR(tgt))
RETURN(PTR_ERR(tgt));
- op_data->op_fsuid = cfs_curproc_fsuid();
- op_data->op_fsgid = cfs_curproc_fsgid();
+ op_data->op_fsuid = current_fsuid();
+ op_data->op_fsgid = current_fsgid();
op_data->op_cap = cfs_curproc_cap_pack();
/*
LASSERT(spin_is_locked(&lsm->lsm_lock));
#ifdef __KERNEL__
- LASSERT(lsm->lsm_lock_owner == cfs_curproc_pid());
+ LASSERT(lsm->lsm_lock_owner == current_pid());
#endif
CDEBUG(D_INODE, "MDT ID "DOSTID" initial value: s="LPU64" m="LPU64
LASSERT(spin_is_locked(&lsm->lsm_lock));
#ifdef __KERNEL__
- LASSERT(lsm->lsm_lock_owner == cfs_curproc_pid());
+ LASSERT(lsm->lsm_lock_owner == current_pid());
#endif
if (shrink) {
void lov_stripe_lock(struct lov_stripe_md *md)
{
- LASSERT(md->lsm_lock_owner != cfs_curproc_pid());
+ LASSERT(md->lsm_lock_owner != current_pid());
spin_lock(&md->lsm_lock);
LASSERT(md->lsm_lock_owner == 0);
- md->lsm_lock_owner = cfs_curproc_pid();
+ md->lsm_lock_owner = current_pid();
}
EXPORT_SYMBOL(lov_stripe_lock);
void lov_stripe_unlock(struct lov_stripe_md *md)
{
- LASSERT(md->lsm_lock_owner == cfs_curproc_pid());
+ LASSERT(md->lsm_lock_owner == current_pid());
md->lsm_lock_owner = 0;
spin_unlock(&md->lsm_lock);
}
LASSERT(d_refcount(new_ctx->pwd));
save->pwd = dget(current->fs->pwd.dentry);
save->pwdmnt = mntget(current->fs->pwd.mnt);
- save->luc.luc_umask = cfs_curproc_umask();
- save->ngroups = current_cred()->group_info->ngroups;
+ save->luc.luc_umask = current_umask();
+ save->ngroups = current_cred()->group_info->ngroups;
LASSERT(save->pwd);
LASSERT(save->pwdmnt);
static void __mdc_pack_body(struct mdt_body *b, __u32 suppgid)
{
- LASSERT (b != NULL);
-
- b->suppgid = suppgid;
- b->uid = cfs_curproc_uid();
- b->gid = cfs_curproc_gid();
- b->fsuid = cfs_curproc_fsuid();
- b->fsgid = cfs_curproc_fsgid();
- b->capability = cfs_curproc_cap_pack();
+ LASSERT (b != NULL);
+
+ b->suppgid = suppgid;
+ b->uid = current_uid();
+ b->gid = current_gid();
+ b->fsuid = current_fsuid();
+ b->fsgid = current_fsgid();
+ b->capability = cfs_curproc_cap_pack();
}
void mdc_pack_capa(struct ptlrpc_request *req, const struct req_msg_field *field,
flags |= MDS_OPEN_VOLATILE;
set_mrc_cr_flags(rec, flags);
rec->cr_bias = op_data->op_bias;
- rec->cr_umask = cfs_curproc_umask();
+ rec->cr_umask = current_umask();
mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1);
/* XXX do something about time, uid, gid */
rec->cr_opcode = REINT_OPEN;
- rec->cr_fsuid = cfs_curproc_fsuid();
- rec->cr_fsgid = cfs_curproc_fsgid();
+ rec->cr_fsuid = current_fsuid();
+ rec->cr_fsgid = current_fsgid();
rec->cr_cap = cfs_curproc_cap_pack();
if (op_data != NULL) {
rec->cr_fid1 = op_data->op_fid1;
rec->cr_suppgid1 = op_data->op_suppgids[0];
rec->cr_suppgid2 = op_data->op_suppgids[1];
rec->cr_bias = op_data->op_bias;
- rec->cr_umask = cfs_curproc_umask();
+ rec->cr_umask = current_umask();
rec->cr_old_handle = op_data->op_handle;
mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1);
}
static void mdc_setattr_pack_rec(struct mdt_rec_setattr *rec,
- struct md_op_data *op_data)
+ struct md_op_data *op_data)
{
- rec->sa_opcode = REINT_SETATTR;
- rec->sa_fsuid = cfs_curproc_fsuid();
- rec->sa_fsgid = cfs_curproc_fsgid();
- rec->sa_cap = cfs_curproc_cap_pack();
- rec->sa_suppgid = -1;
+ rec->sa_opcode = REINT_SETATTR;
+ rec->sa_fsuid = current_fsuid();
+ rec->sa_fsgid = current_fsgid();
+ rec->sa_cap = cfs_curproc_cap_pack();
+ rec->sa_suppgid = -1;
rec->sa_fid = op_data->op_fid1;
rec->sa_valid = attr_pack(op_data->op_attr.ia_valid);
rec->sa_mtime = LTIME_S(op_data->op_attr.ia_mtime);
rec->sa_ctime = LTIME_S(op_data->op_attr.ia_ctime);
rec->sa_attr_flags = ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags;
- if ((op_data->op_attr.ia_valid & ATTR_GID) &&
- cfs_curproc_is_in_groups(op_data->op_attr.ia_gid))
- rec->sa_suppgid = op_data->op_attr.ia_gid;
- else
- rec->sa_suppgid = op_data->op_suppgids[0];
+ if ((op_data->op_attr.ia_valid & ATTR_GID) &&
+ in_group_p(op_data->op_attr.ia_gid))
+ rec->sa_suppgid = op_data->op_attr.ia_gid;
+ else
+ rec->sa_suppgid = op_data->op_suppgids[0];
rec->sa_bias = op_data->op_bias;
}
CLASSERT(sizeof(struct mdt_rec_setxattr) ==
sizeof(struct mdt_rec_reint));
- rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
- rec->sx_opcode = REINT_SETXATTR;
- /* TODO:
- * cfs_curproc_fs{u,g}id() should replace
- * current->fs{u,g}id for portability.
- */
- rec->sx_fsuid = cfs_curproc_fsuid();
- rec->sx_fsgid = cfs_curproc_fsgid();
- rec->sx_cap = cfs_curproc_cap_pack();
- rec->sx_suppgid1 = suppgid;
+ rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
+ rec->sx_opcode = REINT_SETXATTR;
+ rec->sx_fsuid = current_fsuid();
+ rec->sx_fsgid = current_fsgid();
+ rec->sx_cap = cfs_curproc_cap_pack();
+ rec->sx_suppgid1 = suppgid;
rec->sx_suppgid2 = -1;
rec->sx_fid = *fid;
rec->sx_valid = valid | OBD_MD_FLCTIME;
cfs_waitq_signal(&cdt->cdt_thread.t_ctl_waitq);
CDEBUG(D_HSM, "%s: coordinator thread starting, pid=%d\n",
- mdt_obd_name(mdt), cfs_curproc_pid());
+ mdt_obd_name(mdt), current_pid());
/*
* create /proc entries for coordinator
if (rc != 0)
CERROR("%s: coordinator thread exiting, process=%d, rc=%d\n",
- mdt_obd_name(mdt), cfs_curproc_pid(), rc);
+ mdt_obd_name(mdt), current_pid(), rc);
else
CDEBUG(D_HSM, "%s: coordinator thread exiting, process=%d,"
" no error\n",
- mdt_obd_name(mdt), cfs_curproc_pid());
+ mdt_obd_name(mdt), current_pid());
return rc;
}
/* Use process name + fsuid as jobid */
if (strcmp(obd_jobid_var, JOBSTATS_PROCNAME_UID) == 0) {
snprintf(jobid, JOBSTATS_JOBID_SIZE, "%s.%u",
- cfs_curproc_comm(), cfs_curproc_fsuid());
+ current_comm(), current_fsuid());
RETURN(0);
}
*/
void obd_zombie_barrier(void)
{
- struct l_wait_info lwi = { 0 };
+ struct l_wait_info lwi = { 0 };
- if (obd_zombie_pid == cfs_curproc_pid())
- /* don't wait for myself */
- return;
- l_wait_event(obd_zombie_waitq, obd_zombie_is_idle(), &lwi);
+ if (obd_zombie_pid == current_pid())
+ /* don't wait for myself */
+ return;
+ l_wait_event(obd_zombie_waitq, obd_zombie_is_idle(), &lwi);
}
EXPORT_SYMBOL(obd_zombie_barrier);
unshare_fs_struct();
complete(&obd_zombie_start);
- obd_zombie_pid = cfs_curproc_pid();
+ obd_zombie_pid = current_pid();
while (!test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags)) {
struct l_wait_info lwi = { 0 };
oa->o_size = attr->ia_size;
oa->o_valid |= OBD_MD_FLSIZE;
}
- if (ia_valid & ATTR_MODE) {
- oa->o_mode = attr->ia_mode;
- oa->o_valid |= OBD_MD_FLTYPE | OBD_MD_FLMODE;
- if (!cfs_curproc_is_in_groups(oa->o_gid) &&
- !cfs_capable(CFS_CAP_FSETID))
- oa->o_mode &= ~S_ISGID;
- }
+ if (ia_valid & ATTR_MODE) {
+ oa->o_mode = attr->ia_mode;
+ oa->o_valid |= OBD_MD_FLTYPE | OBD_MD_FLMODE;
+ if (!in_group_p(oa->o_gid) &&
+ !cfs_capable(CFS_CAP_FSETID))
+ oa->o_mode &= ~S_ISGID;
+ }
if (ia_valid & ATTR_UID) {
oa->o_uid = attr->ia_uid;
oa->o_valid |= OBD_MD_FLUID;
}
#endif
if (valid & OBD_MD_FLMODE) {
- attr->ia_mode = (attr->ia_mode & S_IFMT)|(oa->o_mode & ~S_IFMT);
- attr->ia_valid |= ATTR_MODE;
- if (!cfs_curproc_is_in_groups(oa->o_gid) &&
- !cfs_capable(CFS_CAP_FSETID))
- attr->ia_mode &= ~S_ISGID;
+ attr->ia_mode = (attr->ia_mode & S_IFMT)|(oa->o_mode & ~S_IFMT);
+ attr->ia_valid |= ATTR_MODE;
+ if (!in_group_p(oa->o_gid) &&
+ !cfs_capable(CFS_CAP_FSETID))
+ attr->ia_mode &= ~S_ISGID;
}
if (valid & OBD_MD_FLUID) {
attr->ia_uid = oa->o_uid;
ucred->uc_suppgids[0] = -1;
ucred->uc_suppgids[1] = -1;
- ucred->uc_uid = ucred->uc_o_uid = cfs_curproc_uid();
- ucred->uc_gid = ucred->uc_o_gid = cfs_curproc_gid();
- ucred->uc_fsuid = ucred->uc_o_fsuid = cfs_curproc_fsuid();
- ucred->uc_fsgid = ucred->uc_o_fsgid = cfs_curproc_fsgid();
+ ucred->uc_uid = ucred->uc_o_uid = current_uid();
+ ucred->uc_gid = ucred->uc_o_gid = current_gid();
+ ucred->uc_fsuid = ucred->uc_o_fsuid = current_fsuid();
+ ucred->uc_fsgid = ucred->uc_o_fsgid = current_fsgid();
ucred->uc_cap = cfs_curproc_cap_pack();
/* remove fs privilege for non-root user. */
case OBD_IOC_PING_TARGET:
err = ptlrpc_obd_ping(obd);
GOTO(out, err);
- default:
- CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
- cmd, cfs_curproc_comm());
- GOTO(out, err = -ENOTTY);
- }
+ default:
+ CDEBUG(D_INODE, "unrecognised ioctl %#x by %s\n",
+ cmd, current_comm());
+ GOTO(out, err = -ENOTTY);
+ }
out:
- cfs_module_put(THIS_MODULE);
- return err;
+ cfs_module_put(THIS_MODULE);
+ return err;
}
static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
break;
default:
CERROR("%s: unrecognized ioctl %#x by %s\n", obd->obd_name,
- cmd, cfs_curproc_comm());
+ cmd, current_comm());
rc = -ENOTTY;
}
cfs_module_put(THIS_MODULE);
cfs_atomic_inc(&req->rq_import->imp_inflight);
spin_unlock(&imp->imp_lock);
- lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid());
+ lustre_msg_set_status(req->rq_reqmsg, current_pid());
rc = sptlrpc_req_refresh_ctx(req, -1);
if (rc) {
}
}
- CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc"
- " %s:%s:%d:"LPU64":%s:%d\n", cfs_curproc_comm(),
- imp->imp_obd->obd_uuid.uuid,
- lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
- libcfs_nid2str(imp->imp_connection->c_peer.nid),
- lustre_msg_get_opc(req->rq_reqmsg));
+ CDEBUG(D_RPCTRACE, "Sending RPC pname:cluuid:pid:xid:nid:opc"
+ " %s:%s:%d:"LPU64":%s:%d\n", current_comm(),
+ imp->imp_obd->obd_uuid.uuid,
+ lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
+ libcfs_nid2str(imp->imp_connection->c_peer.nid),
+ lustre_msg_get_opc(req->rq_reqmsg));
rc = ptl_send_rpc(req, 0);
if (rc) {
CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0,
"Completed RPC pname:cluuid:pid:xid:nid:"
"opc %s:%s:%d:"LPU64":%s:%d\n",
- cfs_curproc_comm(), imp->imp_obd->obd_uuid.uuid,
+ current_comm(), imp->imp_obd->obd_uuid.uuid,
lustre_msg_get_status(req->rq_reqmsg), req->rq_xid,
libcfs_nid2str(imp->imp_connection->c_peer.nid),
lustre_msg_get_opc(req->rq_reqmsg));
RETURN(-ENOMEM);
}
- /* for distributed debugging */
- lustre_msg_set_status(req->rq_reqmsg, cfs_curproc_pid());
+ /* for distributed debugging */
+ lustre_msg_set_status(req->rq_reqmsg, current_pid());
/* add a ref for the set (see comment in ptlrpc_set_add_req) */
ptlrpc_request_addref(req);
}
}
- thread_set_flags(thread, SVC_STOPPED);
- cfs_waitq_signal(&thread->t_ctl_waitq);
+ thread_set_flags(thread, SVC_STOPPED);
+ cfs_waitq_signal(&thread->t_ctl_waitq);
- CDEBUG(D_NET, "pinger thread exiting, process %d\n", cfs_curproc_pid());
- return 0;
+ CDEBUG(D_NET, "pinger thread exiting, process %d\n", current_pid());
+ return 0;
}
static struct ptlrpc_thread pinger_thread;
create = 0;
remove_dead = 0;
}
- } else {
- vcred.vc_uid = cfs_curproc_uid();
- vcred.vc_gid = cfs_curproc_gid();
- }
+ } else {
+ vcred.vc_uid = current_uid();
+ vcred.vc_gid = current_gid();
+ }
- return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred,
- create, remove_dead);
+ return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred, create,
+ remove_dead);
}
struct ptlrpc_cli_ctx *sptlrpc_cli_ctx_get(struct ptlrpc_cli_ctx *ctx)
void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
{
- import_flush_ctx_common(imp, cfs_curproc_uid(), 1, 1);
+ import_flush_ctx_common(imp, current_uid(), 1, 1);
}
EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
{
- struct ptlrpc_user_desc *pud;
+ struct ptlrpc_user_desc *pud;
- pud = lustre_msg_buf(msg, offset, 0);
+ pud = lustre_msg_buf(msg, offset, 0);
- pud->pud_uid = cfs_curproc_uid();
- pud->pud_gid = cfs_curproc_gid();
- pud->pud_fsuid = cfs_curproc_fsuid();
- pud->pud_fsgid = cfs_curproc_fsgid();
- pud->pud_cap = cfs_curproc_cap_pack();
- pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
+ pud->pud_uid = current_uid();
+ pud->pud_gid = current_gid();
+ pud->pud_fsuid = current_fsuid();
+ pud->pud_fsgid = current_fsgid();
+ pud->pud_cap = cfs_curproc_cap_pack();
+ pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
#ifdef __KERNEL__
- task_lock(current);
- if (pud->pud_ngroups > current_ngroups)
- pud->pud_ngroups = current_ngroups;
- memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
- pud->pud_ngroups * sizeof(__u32));
- task_unlock(current);
+ task_lock(current);
+ if (pud->pud_ngroups > current_ngroups)
+ pud->pud_ngroups = current_ngroups;
+ memcpy(pud->pud_groups, current_cred()->group_info->blocks[0],
+ pud->pud_ngroups * sizeof(__u32));
+ task_unlock(current);
#endif
- return 0;
+ return 0;
}
EXPORT_SYMBOL(sptlrpc_pack_user_desc);
goto put_conn;
}
- CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc "
- "%s:%s+%d:%d:x"LPU64":%s:%d\n", cfs_curproc_comm(),
- (request->rq_export ?
- (char *)request->rq_export->exp_client_uuid.uuid : "0"),
- (request->rq_export ?
- cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
- lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
- libcfs_id2str(request->rq_peer),
- lustre_msg_get_opc(request->rq_reqmsg));
+ CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc "
+ "%s:%s+%d:%d:x"LPU64":%s:%d\n", current_comm(),
+ (request->rq_export ?
+ (char *)request->rq_export->exp_client_uuid.uuid : "0"),
+ (request->rq_export ?
+ cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
+ lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
+ libcfs_id2str(request->rq_peer),
+ lustre_msg_get_opc(request->rq_reqmsg));
if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING)
CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val);
do_gettimeofday(&work_end);
timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
- "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
- "%ldus (%ldus total) trans "LPU64" rc %d/%d\n",
- cfs_curproc_comm(),
- (request->rq_export ?
- (char *)request->rq_export->exp_client_uuid.uuid : "0"),
+ "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
+ "%ldus (%ldus total) trans "LPU64" rc %d/%d\n",
+ current_comm(),
+ (request->rq_export ?
+ (char *)request->rq_export->exp_client_uuid.uuid : "0"),
(request->rq_export ?
cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
lustre_msg_get_status(request->rq_reqmsg),
int counter = 0, rc = 0;
ENTRY;
- thread->t_pid = cfs_curproc_pid();
+ thread->t_pid = current_pid();
unshare_fs_struct();
/* NB: we will call cfs_cpt_bind() for all threads, because we