*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
*
- * Copyright (c) 2012, Intel Corporation.
+ * Copyright (c) 2012, 2016, Intel Corporation.
*
* Author: Eric Mei <ericm@clusterfs.com>
*/
*/
#define DEBUG_SUBSYSTEM S_SEC
-#ifdef __KERNEL__
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/atomic.h>
struct rpc_clnt; /* for rpc_pipefs */
#include <linux/sunrpc/rpc_pipe_fs.h>
-#else
-#include <liblustre.h>
-#endif
+#include <libcfs/linux/linux-list.h>
#include <obd.h>
#include <obd_class.h>
#include <obd_support.h>
-#include <lustre/lustre_idl.h>
+#include <uapi/linux/lustre/lustre_idl.h>
#include <lustre_sec.h>
#include <lustre_net.h>
#include <lustre_import.h>
}
/****************************************
- * internel context helpers *
+ * internal context helpers *
****************************************/
static
ctx_check_death_locked_pf(ctx, freelist);
}
- sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
- EXIT;
+ sec->ps_gc_next = ktime_get_real_seconds() + sec->ps_gc_interval;
+ EXIT;
}
static
retry:
spin_lock(&sec->ps_lock);
- /* gc_next == 0 means never do gc */
- if (remove_dead && sec->ps_gc_next &&
- cfs_time_after(cfs_time_current_sec(), sec->ps_gc_next)) {
- gss_ctx_cache_gc_pf(gsec_pf, &freelist);
- gc = 1;
- }
+ /* gc_next == 0 means never do gc */
+ if (remove_dead && sec->ps_gc_next &&
+ (ktime_get_real_seconds() > sec->ps_gc_next)) {
+ gss_ctx_cache_gc_pf(gsec_pf, &freelist);
+ gc = 1;
+ }
cfs_hlist_for_each_entry_safe(ctx, pos, next, hash_head, cc_cache) {
if (gc == 0 &&
static
int simple_get_bytes(char **buf, __u32 *buflen, void *res, __u32 reslen)
{
- if (*buflen < reslen) {
- CERROR("buflen %u < %u\n", *buflen, reslen);
- return -EINVAL;
- }
+ if (*buflen < reslen) {
+ CERROR("shorter buflen than needed: %u < %u\n",
+ *buflen, reslen);
+ return -EINVAL;
+ }
- memcpy(res, *buf, reslen);
- *buf += reslen;
- *buflen -= reslen;
- return 0;
+ memcpy(res, *buf, reslen);
+ *buf += reslen;
+ *buflen -= reslen;
+ return 0;
}
/****************************************
static
ssize_t gss_pipe_downcall(struct file *filp, const char *src, size_t mlen)
{
- struct rpc_inode *rpci = RPC_I(filp->f_dentry->d_inode);
+ struct rpc_inode *rpci = RPC_I(file_inode(filp));
struct gss_upcall_msg *gss_msg;
struct ptlrpc_cli_ctx *ctx;
struct gss_cli_ctx *gctx = NULL;
{
struct gss_upcall_msg *gmsg;
struct gss_upcall_msg_data *gumd;
- static cfs_time_t ratelimit = 0;
+ static time64_t ratelimit;
ENTRY;
LASSERT(list_empty(&msg->list));
gumd = &gmsg->gum_data;
LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
- CERROR("failed msg %p (seq %u, uid %u, svc %u, nid "LPX64", obd %.*s): "
+ CERROR("failed msg %p (seq %u, uid %u, svc %u, nid %#llx, obd %.*s): "
"errno %d\n", msg, gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
gumd->gum_nid, (int) sizeof(gumd->gum_obd),
gumd->gum_obd, msg->errno);
atomic_inc(&gmsg->gum_refcount);
gss_unhash_msg(gmsg);
if (msg->errno == -ETIMEDOUT || msg->errno == -EPIPE) {
- cfs_time_t now = cfs_time_current_sec();
+ time64_t now = ktime_get_real_seconds();
- if (cfs_time_after(now, ratelimit)) {
+ if (now > ratelimit) {
CWARN("upcall timed out, is lgssd running?\n");
ratelimit = now + 15;
}
LASSERT(list_empty(&gmsg->gum_base.list));
CERROR("failing remaining msg %p:seq %u, uid %u, svc %u, "
- "nid "LPX64", obd %.*s\n", gmsg,
+ "nid %#llx, obd %.*s\n", gmsg,
gumd->gum_seq, gumd->gum_uid, gumd->gum_svc,
gumd->gum_nid, (int) sizeof(gumd->gum_obd),
gumd->gum_obd);