4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2014, Intel Corporation.
28 * Copyright 2017 Cray Inc, all rights reserved.
31 * Store PID->JobID mappings
34 #define DEBUG_SUBSYSTEM S_RPC
35 #include <linux/user_namespace.h>
36 #include <linux/uidgid.h>
37 #include <linux/utsname.h>
39 #include <libcfs/libcfs.h>
40 #include <obd_support.h>
41 #include <obd_class.h>
42 #include <lustre_net.h>
44 static struct cfs_hash *jobid_hash;
45 static struct cfs_hash_ops jobid_hash_ops;
46 spinlock_t jobid_hash_lock;
48 #define RESCAN_INTERVAL 30
49 #define DELETE_INTERVAL 300
51 char obd_jobid_var[JOBSTATS_JOBID_VAR_MAX_LEN + 1] = JOBSTATS_DISABLE;
52 char obd_jobid_name[LUSTRE_JOBID_SIZE] = "%e.%u";
55 * Structure to store a single PID->JobID mapping
57 struct jobid_pid_map {
58 struct hlist_node jp_hash;
60 spinlock_t jp_lock; /* protects jp_jobid */
61 char jp_jobid[LUSTRE_JOBID_SIZE];
62 unsigned int jp_joblen;
63 struct kref jp_refcount;
68 * Jobid can be set for a session (see setsid(2)) by writing to
69 * a sysfs file from any process in that session.
70 * The jobids are stored in a hash table indexed by the relevant
71 * struct pid. We periodically look for entries where the pid has
72 * no PIDTYPE_SID tasks any more, and prune them. This happens within
73 * 5 seconds of a jobid being added, and every 5 minutes when jobids exist,
76 #define JOBID_EXPEDITED_CLEAN (5)
77 #define JOBID_BACKGROUND_CLEAN (5 * 60)
79 struct session_jobid {
80 struct pid *sj_session;
81 struct rhash_head sj_linkage;
82 struct rcu_head sj_rcu;
86 static const struct rhashtable_params jobid_params = {
87 .key_len = sizeof(struct pid *),
88 .key_offset = offsetof(struct session_jobid, sj_session),
89 .head_offset = offsetof(struct session_jobid, sj_linkage),
92 static struct rhashtable session_jobids;
95 * jobid_current must be called with rcu_read_lock held.
96 * if it returns non-NULL, the string can only be used
97 * until rcu_read_unlock is called.
99 char *jobid_current(void)
101 struct pid *sid = task_session(current);
102 struct session_jobid *sj;
104 sj = rhashtable_lookup_fast(&session_jobids, &sid, jobid_params);
110 static void jobid_prune_expedite(void);
112 * jobid_set_current will try to add a new entry
113 * to the table. If one exists with the same key, the
114 * jobid will be replaced
116 int jobid_set_current(char *jobid)
119 struct session_jobid *sj, *origsj;
121 int len = strlen(jobid);
123 sj = kmalloc(sizeof(*sj) + len, GFP_KERNEL);
127 sid = task_session(current);
128 sj->sj_session = get_pid(sid);
129 strncpy(sj->sj_jobid, jobid, len+1);
130 origsj = rhashtable_lookup_get_insert_fast(&session_jobids,
133 if (origsj == NULL) {
134 /* successful insert */
136 jobid_prune_expedite();
140 if (IS_ERR(origsj)) {
141 put_pid(sj->sj_session);
144 return PTR_ERR(origsj);
146 ret = rhashtable_replace_fast(&session_jobids,
151 put_pid(sj->sj_session);
156 put_pid(origsj->sj_session);
158 kfree_rcu(origsj, sj_rcu);
159 jobid_prune_expedite();
164 static void jobid_free(void *vsj, void *arg)
166 struct session_jobid *sj = vsj;
168 put_pid(sj->sj_session);
172 static void jobid_prune(struct work_struct *work);
173 static DECLARE_DELAYED_WORK(jobid_prune_work, jobid_prune);
174 static int jobid_prune_expedited;
175 static void jobid_prune(struct work_struct *work)
178 struct rhashtable_iter iter;
179 struct session_jobid *sj;
181 jobid_prune_expedited = 0;
182 rhashtable_walk_enter(&session_jobids, &iter);
183 rhashtable_walk_start(&iter);
184 while ((sj = rhashtable_walk_next(&iter)) != NULL) {
186 if (PTR_ERR(sj) == -EAGAIN)
190 if (!hlist_empty(&sj->sj_session->tasks[PIDTYPE_SID])) {
194 if (rhashtable_remove_fast(&session_jobids,
196 jobid_params) == 0) {
197 put_pid(sj->sj_session);
198 kfree_rcu(sj, sj_rcu);
201 rhashtable_walk_stop(&iter);
202 rhashtable_walk_exit(&iter);
204 schedule_delayed_work(&jobid_prune_work,
205 cfs_time_seconds(JOBID_BACKGROUND_CLEAN));
208 static void jobid_prune_expedite(void)
210 if (!jobid_prune_expedited) {
211 jobid_prune_expedited = 1;
212 mod_delayed_work(system_wq, &jobid_prune_work,
213 cfs_time_seconds(JOBID_EXPEDITED_CLEAN));
217 static int cfs_access_process_vm(struct task_struct *tsk,
218 struct mm_struct *mm,
220 void *buf, int len, int write)
222 /* Just copied from kernel for the kernels which doesn't
223 * have access_process_vm() exported
225 struct vm_area_struct *vma = NULL;
229 /* Avoid deadlocks on mmap_sem if called from sys_mmap_pgoff(),
230 * which is already holding mmap_sem for writes. If some other
231 * thread gets the write lock in the meantime, this thread will
232 * block, but at least it won't deadlock on itself. LU-1735
234 if (!mmap_read_trylock(mm))
237 /* ignore errors, just check how much was successfully transferred */
239 int bytes, rc, offset;
242 #if defined(HAVE_GET_USER_PAGES_WITHOUT_VMA)
243 rc = get_user_pages(addr, 1, write ? FOLL_WRITE : 0, &page);
245 vma = vma_lookup(mm, addr);
246 #elif defined(HAVE_GET_USER_PAGES_GUP_FLAGS)
247 rc = get_user_pages(addr, 1, write ? FOLL_WRITE : 0, &page,
249 #elif defined(HAVE_GET_USER_PAGES_6ARG)
250 rc = get_user_pages(addr, 1, write, 1, &page, &vma);
252 rc = get_user_pages(tsk, mm, addr, 1, write, 1, &page, &vma);
258 offset = addr & (PAGE_SIZE-1);
259 if (bytes > PAGE_SIZE-offset)
260 bytes = PAGE_SIZE-offset;
264 copy_to_user_page(vma, page, addr,
265 maddr + offset, buf, bytes);
266 set_page_dirty_lock(page);
268 copy_from_user_page(vma, page, addr,
269 buf, maddr + offset, bytes);
277 mmap_read_unlock(mm);
279 return buf - old_buf;
282 /* Read the environment variable of current process specified by @key. */
283 static int cfs_get_environ(const char *key, char *value, int *val_len)
285 struct mm_struct *mm;
287 int buf_len = PAGE_SIZE;
288 int key_len = strlen(key);
294 buffer = kmalloc(buf_len, GFP_USER);
298 mm = get_task_mm(current);
304 addr = mm->env_start;
305 while (addr < mm->env_end) {
306 int this_len, retval, scan_len;
307 char *env_start, *env_end;
309 memset(buffer, 0, buf_len);
311 this_len = min_t(int, mm->env_end - addr, buf_len);
312 retval = cfs_access_process_vm(current, mm, addr, buffer,
315 GOTO(out, rc = retval);
316 else if (retval != this_len)
321 /* Parse the buffer to find out the specified key/value pair.
322 * The "key=value" entries are separated by '\0'.
330 env_end = memscan(env_start, '\0', scan_len);
331 LASSERT(env_end >= env_start &&
332 env_end <= env_start + scan_len);
334 /* The last entry of this buffer cross the buffer
335 * boundary, reread it in next cycle.
337 if (unlikely(env_end - env_start == scan_len)) {
338 /* Just skip the entry larger than page size,
339 * it can't be jobID env variable.
341 if (unlikely(scan_len == this_len))
346 } else if (unlikely(skip)) {
351 entry_len = env_end - env_start;
352 CDEBUG(D_INFO, "key: %s, entry: %s\n", key, entry);
354 /* Key length + length of '=' */
355 if (entry_len > key_len + 1 &&
356 entry[key_len] == '=' &&
357 !memcmp(entry, key, key_len)) {
358 entry += key_len + 1;
359 entry_len -= key_len + 1;
361 /* The 'value' buffer passed in is too small.
362 * Copy what fits, but return -EOVERFLOW.
364 if (entry_len >= *val_len) {
365 memcpy(value, entry, *val_len);
366 value[*val_len - 1] = 0;
367 GOTO(out, rc = -EOVERFLOW);
370 memcpy(value, entry, entry_len);
371 *val_len = entry_len;
375 scan_len -= (env_end - env_start + 1);
376 env_start = env_end + 1;
379 GOTO(out, rc = -ENOENT);
383 kfree((void *)buffer);
388 * Get jobid of current process by reading the environment variable
389 * stored in between the "env_start" & "env_end" of task struct.
391 * If some job scheduler doesn't store jobid in the "env_start/end",
392 * then an upcall could be issued here to get the jobid by utilizing
393 * the userspace tools/API. Then, the jobid must be cached.
395 static int jobid_get_from_environ(char *jobid_var, char *jobid, int *jobid_len)
399 rc = cfs_get_environ(jobid_var, jobid, jobid_len);
403 if (rc == -EOVERFLOW) {
404 /* For the PBS_JOBID and LOADL_STEP_ID keys (which are
405 * variable length strings instead of just numbers), it
406 * might make sense to keep the unique parts for JobID,
407 * instead of just returning an error. That means a
408 * larger temp buffer for cfs_get_environ(), then
409 * truncating the string at some separator to fit into
410 * the specified jobid_len. Fix later if needed. */
411 static ktime_t printed;
413 if (unlikely(ktime_to_ns(printed) == 0 ||
414 ktime_after(ktime_get(),
415 ktime_add_ns(printed,
416 3600ULL * 24 * NSEC_PER_SEC)))) {
417 LCONSOLE_WARN("jobid: '%s' value too large (%d)\n",
418 obd_jobid_var, *jobid_len);
419 printed = ktime_get();
424 CDEBUG_LIMIT((rc == -ENOENT || rc == -EINVAL ||
425 rc == -EDEADLK) ? D_INFO : D_ERROR,
426 "jobid: get '%s' failed: rc = %d\n",
435 * jobid_should_free_item
437 * Each item is checked to see if it should be released
438 * Removed from hash table by caller
439 * Actually freed in jobid_put_locked
441 * Returns 1 if item is to be freed, 0 if it is to be kept
444 static int jobid_should_free_item(void *obj, void *data)
447 struct jobid_pid_map *pidmap = obj;
454 WARN_ON_ONCE(kref_read(&pidmap->jp_refcount) != 1);
458 spin_lock(&pidmap->jp_lock);
459 /* prevent newly inserted items from deleting */
460 if (jobid[0] == '\0' && kref_read(&pidmap->jp_refcount) == 1)
462 else if (ktime_get_real_seconds() - pidmap->jp_time > DELETE_INTERVAL)
464 else if (strcmp(pidmap->jp_jobid, jobid) == 0)
466 spin_unlock(&pidmap->jp_lock);
472 * jobid_name_is_valid
474 * Checks if the jobid is a Lustre process
476 * Returns true if jobid is valid
477 * Returns false if jobid looks like it's a Lustre process
479 static bool jobid_name_is_valid(char *jobid)
481 const char *const lustre_reserved[] = { "ll_ping", "ptlrpc",
482 "ldlm", "ll_sa", "kworker",
483 "kswapd", "writeback", "irq",
487 if (jobid[0] == '\0')
490 for (i = 0; lustre_reserved[i] != NULL; i++) {
491 if (strncmp(jobid, lustre_reserved[i],
492 strlen(lustre_reserved[i])) == 0)
499 * jobid_get_from_cache()
501 * Returns contents of jobid_var from process environment for current PID,
502 * or from the per-session jobid table.
503 * Values fetch from process environment will be cached for some time to avoid
504 * the overhead of scanning the environment.
506 * Return: -ENOMEM if allocating a new pidmap fails
507 * -ENOENT if no entry could be found
508 * +ve string length for success (something was returned in jobid)
510 static int jobid_get_from_cache(char *jobid, size_t joblen)
512 static time64_t last_expire;
513 bool expire_cache = false;
514 pid_t pid = current->pid;
515 struct jobid_pid_map *pidmap = NULL;
516 time64_t now = ktime_get_real_seconds();
520 if (strcmp(obd_jobid_var, JOBSTATS_SESSION) == 0) {
524 jid = jobid_current();
526 strscpy(jobid, jid, joblen);
527 joblen = strlen(jobid);
535 LASSERT(jobid_hash != NULL);
537 /* scan hash periodically to remove old PID entries from cache */
538 spin_lock(&jobid_hash_lock);
539 if (unlikely(last_expire + DELETE_INTERVAL <= now)) {
543 spin_unlock(&jobid_hash_lock);
546 cfs_hash_cond_del(jobid_hash, jobid_should_free_item,
547 "intentionally_bad_jobid");
549 /* first try to find PID in the hash and use that value */
550 pidmap = cfs_hash_lookup(jobid_hash, &pid);
551 if (pidmap == NULL) {
552 struct jobid_pid_map *pidmap2;
554 OBD_ALLOC_PTR(pidmap);
556 GOTO(out, rc = -ENOMEM);
558 pidmap->jp_pid = pid;
560 pidmap->jp_jobid[0] = '\0';
561 spin_lock_init(&pidmap->jp_lock);
562 INIT_HLIST_NODE(&pidmap->jp_hash);
564 * @pidmap might be reclaimed just after it is added into
565 * hash list, init @jp_refcount as 1 to make sure memory
566 * could be not freed during access.
568 kref_init(&pidmap->jp_refcount);
571 * Add the newly created map to the hash, on key collision we
572 * lost a racing addition and must destroy our newly allocated
573 * map. The object which exists in the hash will be returned.
575 pidmap2 = cfs_hash_findadd_unique(jobid_hash, &pid,
577 if (unlikely(pidmap != pidmap2)) {
578 CDEBUG(D_INFO, "jobid: duplicate found for PID=%u\n",
580 OBD_FREE_PTR(pidmap);
586 * If pidmap is old (this is always true for new entries) refresh it.
587 * If obd_jobid_var is not found, cache empty entry and try again
588 * later, to avoid repeat lookups for PID if obd_jobid_var missing.
590 spin_lock(&pidmap->jp_lock);
591 if (pidmap->jp_time + RESCAN_INTERVAL <= now) {
592 char env_jobid[LUSTRE_JOBID_SIZE] = "";
593 int env_len = sizeof(env_jobid);
595 pidmap->jp_time = now;
597 spin_unlock(&pidmap->jp_lock);
598 rc = jobid_get_from_environ(obd_jobid_var, env_jobid, &env_len);
600 CDEBUG(D_INFO, "jobid: PID mapping established: %d->%s\n",
601 pidmap->jp_pid, env_jobid);
602 spin_lock(&pidmap->jp_lock);
604 pidmap->jp_joblen = env_len;
605 strscpy(pidmap->jp_jobid, env_jobid,
606 sizeof(pidmap->jp_jobid));
608 } else if (rc == -ENOENT) {
609 /* It might have been deleted, clear out old entry */
610 pidmap->jp_joblen = 0;
611 pidmap->jp_jobid[0] = '\0';
616 * Regardless of how pidmap was found, if it contains a valid entry
617 * use that for now. If there was a technical error (e.g. -ENOMEM)
618 * use the old cached value until it can be looked up again properly.
619 * If a cached missing entry was found, return -ENOENT.
621 if (pidmap->jp_joblen) {
622 strscpy(jobid, pidmap->jp_jobid, joblen);
623 joblen = pidmap->jp_joblen;
628 spin_unlock(&pidmap->jp_lock);
630 cfs_hash_put(jobid_hash, &pidmap->jp_hash);
634 return rc < 0 ? rc : joblen;
638 * jobid_print_current_comm()
640 * Print current comm name into the provided jobid buffer, and trim names of
641 * kernel threads like "kworker/0:0" to "kworker" or "ll_sa_12345" to "ll_sa"
643 * Return: number of chars printed to jobid
645 static int jobid_print_current_comm(char *jobid, ssize_t joblen)
647 const char *const names[] = {"kworker", "kswapd", "ll_sa", "ll_agl",
651 if (current->flags & PF_KTHREAD) {
652 for (i = 0; names[i] != NULL; i++) {
653 if (strncmp(current->comm, names[i],
654 strlen(names[i])) == 0)
655 return snprintf(jobid, joblen, "%s", names[i]);
659 return snprintf(jobid, joblen, "%s", current->comm);
663 * jobid_interpret_string()
665 * Interpret the jobfmt string to expand specified fields, like coredumps do:
669 * %H = short hostname
670 * %j = jobid from environment
674 * Unknown escape strings are dropped. Other characters are copied through,
675 * excluding whitespace (to avoid making jobid parsing difficult).
677 * Return: -EOVERFLOW if the expanded string does not fit within @joblen
680 static int jobid_interpret_string(const char *jobfmt, char *jobid,
685 while ((c = *jobfmt++) && joblen > 1) {
689 if (isspace(c)) /* Don't allow embedded spaces */
700 switch ((f = *jobfmt++)) {
701 case 'e': /* executable name */
702 l = jobid_print_current_comm(jobid, joblen);
704 case 'g': /* group ID */
705 l = snprintf(jobid, joblen, "%u",
706 from_kgid(&init_user_ns, current_fsgid()));
708 case 'h': /* hostname */
709 l = snprintf(jobid, joblen, "%s",
710 init_utsname()->nodename);
712 case 'H': /* short hostname. Cut at first dot */
713 l = snprintf(jobid, joblen, "%s",
714 init_utsname()->nodename);
715 p = strnchr(jobid, joblen, '.');
721 case 'j': /* jobid stored in process environment */
722 l = jobid_get_from_cache(jobid, joblen);
726 case 'p': /* process ID */
727 l = snprintf(jobid, joblen, "%u", current->pid);
729 case 'u': /* user ID */
730 l = snprintf(jobid, joblen, "%u",
731 from_kuid(&init_user_ns, current_fsuid()));
733 case '\0': /* '%' at end of format string */
736 default: /* drop unknown %x format strings */
744 * This points at the end of the buffer, so long as jobid is always
745 * incremented the same amount as joblen is decremented.
748 jobid[joblen - 1] = '\0';
750 return joblen < 0 ? -EOVERFLOW : 0;
754 * Hash initialization, copied from server-side job stats bucket sizes
756 #define HASH_JOBID_BKT_BITS 5
757 #define HASH_JOBID_CUR_BITS 7
758 #define HASH_JOBID_MAX_BITS 12
760 int jobid_cache_init(void)
768 spin_lock_init(&jobid_hash_lock);
769 jobid_hash = cfs_hash_create("JOBID_HASH", HASH_JOBID_CUR_BITS,
770 HASH_JOBID_MAX_BITS, HASH_JOBID_BKT_BITS,
771 0, CFS_HASH_MIN_THETA, CFS_HASH_MAX_THETA,
772 &jobid_hash_ops, CFS_HASH_DEFAULT);
776 rc = rhashtable_init(&session_jobids, &jobid_params);
778 cfs_hash_putref(jobid_hash);
785 EXPORT_SYMBOL(jobid_cache_init);
787 void jobid_cache_fini(void)
789 struct cfs_hash *tmp_hash;
792 spin_lock(&jobid_hash_lock);
793 tmp_hash = jobid_hash;
795 spin_unlock(&jobid_hash_lock);
797 cancel_delayed_work_sync(&jobid_prune_work);
799 if (tmp_hash != NULL) {
800 cfs_hash_cond_del(tmp_hash, jobid_should_free_item, NULL);
801 cfs_hash_putref(tmp_hash);
803 rhashtable_free_and_destroy(&session_jobids, jobid_free, NULL);
809 EXPORT_SYMBOL(jobid_cache_fini);
812 * Hash operations for pid<->jobid
815 jobid_hashfn(struct cfs_hash *hs, const void *key, const unsigned int bits)
817 return cfs_hash_djb2_hash(key, sizeof(pid_t), bits);
820 static void *jobid_key(struct hlist_node *hnode)
822 struct jobid_pid_map *pidmap;
824 pidmap = hlist_entry(hnode, struct jobid_pid_map, jp_hash);
825 return &pidmap->jp_pid;
828 static int jobid_keycmp(const void *key, struct hlist_node *hnode)
830 const pid_t *pid_key1;
831 const pid_t *pid_key2;
833 LASSERT(key != NULL);
834 pid_key1 = (pid_t *)key;
835 pid_key2 = (pid_t *)jobid_key(hnode);
837 return *pid_key1 == *pid_key2;
840 static void *jobid_object(struct hlist_node *hnode)
842 return hlist_entry(hnode, struct jobid_pid_map, jp_hash);
845 static void jobid_get(struct cfs_hash *hs, struct hlist_node *hnode)
847 struct jobid_pid_map *pidmap;
849 pidmap = hlist_entry(hnode, struct jobid_pid_map, jp_hash);
851 kref_get(&pidmap->jp_refcount);
854 static void jobid_put_locked_free(struct kref *kref)
856 struct jobid_pid_map *pidmap = container_of(kref, struct jobid_pid_map,
859 CDEBUG(D_INFO, "Freeing: %d->%s\n", pidmap->jp_pid, pidmap->jp_jobid);
860 OBD_FREE_PTR(pidmap);
863 static void jobid_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
865 struct jobid_pid_map *pidmap;
870 pidmap = hlist_entry(hnode, struct jobid_pid_map, jp_hash);
871 LASSERT(kref_read(&pidmap->jp_refcount) > 0);
872 kref_put(&pidmap->jp_refcount, jobid_put_locked_free);
875 static struct cfs_hash_ops jobid_hash_ops = {
876 .hs_hash = jobid_hashfn,
877 .hs_keycmp = jobid_keycmp,
879 .hs_object = jobid_object,
881 .hs_put = jobid_put_locked,
882 .hs_put_locked = jobid_put_locked,
886 * Generate the job identifier string for this process for tracking purposes.
888 * Fill in @jobid string based on the value of obd_jobid_var:
889 * JOBSTATS_DISABLE: none
890 * JOBSTATS_NODELOCAL: content of obd_jobid_name (jobid_interpret_string())
891 * JOBSTATS_PROCNAME_UID: process name/UID
892 * JOBSTATS_SESSION per-session value set by
893 * /sys/fs/lustre/jobid_this_session
894 * anything else: look up obd_jobid_var in the processes environment
896 * Return -ve error number, 0 on success.
898 int lustre_get_jobid(char *jobid, size_t joblen)
900 char id[LUSTRE_JOBID_SIZE] = "";
901 int len = min_t(int, joblen, LUSTRE_JOBID_SIZE);
905 if (unlikely(joblen < 2)) {
911 if (strcmp(obd_jobid_var, JOBSTATS_DISABLE) == 0) {
912 /* Jobstats isn't enabled */
913 memset(jobid, 0, joblen);
917 if (strcmp(obd_jobid_var, JOBSTATS_NODELOCAL) == 0) {
918 /* Whole node dedicated to single job */
919 rc = jobid_interpret_string(obd_jobid_name, id, len);
920 } else if (strcmp(obd_jobid_var, JOBSTATS_PROCNAME_UID) == 0) {
921 rc = jobid_interpret_string("%e.%u", id, len);
922 } else if (strcmp(obd_jobid_var, JOBSTATS_SESSION) == 0 ||
923 jobid_name_is_valid(current->comm)) {
925 * per-process jobid wanted, either from environment or from
926 * per-session setting.
927 * If obd_jobid_name contains "%j" or if getting the per-process
928 * jobid directly fails, fall back to using obd_jobid_name.
931 if (!strnstr(obd_jobid_name, "%j", joblen))
932 rc = jobid_get_from_cache(id, len);
934 /* fall back to jobid_name if jobid_var not available */
936 int rc2 = jobid_interpret_string(obd_jobid_name,
943 memcpy(jobid, id, len);
946 EXPORT_SYMBOL(lustre_get_jobid);
951 * Search cache for JobID given by @find_jobid.
952 * If any entries in the hash table match the value, they are removed
954 void lustre_jobid_clear(const char *find_jobid)
956 char jobid[LUSTRE_JOBID_SIZE];
959 if (jobid_hash == NULL)
962 strscpy(jobid, find_jobid, sizeof(jobid));
963 /* trim \n off the end of the incoming jobid */
964 end = strchr(jobid, '\n');
965 if (end && *end == '\n')
968 CDEBUG(D_INFO, "Clearing Jobid: %s\n", jobid);
969 cfs_hash_cond_del(jobid_hash, jobid_should_free_item, jobid);
971 CDEBUG(D_INFO, "%d items remain in jobID table\n",
972 atomic_read(&jobid_hash->hs_count));