Whamcloud - gitweb
Branch HEAD
[fs/lustre-release.git] / lustre / ptlrpc / lproc_ptlrpc.c
index c78dacf..5c3aaca 100644 (file)
@@ -16,8 +16,8 @@
  * in the LICENSE file that accompanied this code).
  *
  * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see [sun.com URL with a
- * copy of GPLv2].
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
  *
  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  * CA 95054 USA or visit www.sun.com if you need additional information or
@@ -72,6 +72,7 @@ struct ll_rpc_opcode {
         { OST_SET_INFO,     "ost_set_info" },
         { OST_QUOTACHECK,   "ost_quotacheck" },
         { OST_QUOTACTL,     "ost_quotactl" },
+        { OST_QUOTA_ADJUST_QUNIT, "ost_quota_adjust_qunit" },
         { MDS_GETATTR,      "mds_getattr" },
         { MDS_GETATTR_NAME, "mds_getattr_lock" },
         { MDS_CLOSE,        "mds_close" },
@@ -106,7 +107,7 @@ struct ll_rpc_opcode {
         { MGS_SET_INFO,     "mgs_set_info" },
         { OBD_PING,         "obd_ping" },
         { OBD_LOG_CANCEL,   "llog_origin_handle_cancel" },
-        { OBD_QC_CALLBACK,  "obd_qc_callback" },
+        { OBD_QC_CALLBACK,  "obd_quota_callback" },
         { LLOG_ORIGIN_HANDLE_CREATE,     "llog_origin_handle_create" },
         { LLOG_ORIGIN_HANDLE_NEXT_BLOCK, "llog_origin_handle_next_block" },
         { LLOG_ORIGIN_HANDLE_READ_HEADER,"llog_origin_handle_read_header" },
@@ -120,7 +121,9 @@ struct ll_rpc_opcode {
         { SEQ_QUERY,        "seq_query" },
         { SEC_CTX_INIT,     "sec_ctx_init" },
         { SEC_CTX_INIT_CONT,"sec_ctx_init_cont" },
-        { SEC_CTX_FINI,     "sec_ctx_fini" }
+        { SEC_CTX_FINI,     "sec_ctx_fini" },
+        { QUOTA_DQACQ,      "quota_acquire" },
+        { QUOTA_DQREL,      "quota_release" }
 };
 
 struct ll_eopcode {
@@ -132,26 +135,33 @@ struct ll_eopcode {
         { LDLM_EXTENT_ENQUEUE,  "ldlm_extent_enqueue" },
         { LDLM_FLOCK_ENQUEUE,   "ldlm_flock_enqueue" },
         { LDLM_IBITS_ENQUEUE,   "ldlm_ibits_enqueue" },
+        { MDS_REINT_SETATTR,    "mds_reint_setattr" },
         { MDS_REINT_CREATE,     "mds_reint_create" },
         { MDS_REINT_LINK,       "mds_reint_link" },
-        { MDS_REINT_OPEN,       "mds_reint_open" },
-        { MDS_REINT_SETATTR,    "mds_reint_setattr" },
+        { MDS_REINT_UNLINK,     "mds_reint_unlink" },
         { MDS_REINT_RENAME,     "mds_reint_rename" },
-        { MDS_REINT_UNLINK,     "mds_reint_unlink" }
+        { MDS_REINT_OPEN,       "mds_reint_open" },
+        { MDS_REINT_SETXATTR,   "mds_reint_setxattr" },
+        { BRW_READ_BYTES,       "read_bytes" },
+        { BRW_WRITE_BYTES,      "write_bytes" },
 };
 
 const char *ll_opcode2str(__u32 opcode)
 {
         /* When one of the assertions below fail, chances are that:
-         *     1) A new opcode was added in lustre_idl.h, but was
-         *        is missing from the table above.
+         *     1) A new opcode was added in include/lustre/lustre_idl.h,
+         *        but is missing from the table above.
          * or  2) The opcode space was renumbered or rearranged,
          *        and the opcode_offset() function in
          *        ptlrpc_internal.h needs to be modified.
          */
         __u32 offset = opcode_offset(opcode);
-        LASSERT(offset < LUSTRE_MAX_OPCODES);
-        LASSERT(ll_rpc_opcode_table[offset].opcode == opcode);
+        LASSERTF(offset < LUSTRE_MAX_OPCODES,
+                 "offset %u >= LUSTRE_MAX_OPCODES %u\n",
+                 offset, LUSTRE_MAX_OPCODES);
+        LASSERTF(ll_rpc_opcode_table[offset].opcode == opcode,
+                 "ll_rpc_opcode_table[%u].opcode %u != opcode %u\n",
+                 offset, ll_rpc_opcode_table[offset].opcode, opcode);
         return ll_rpc_opcode_table[offset].opname;
 }
 
@@ -174,7 +184,7 @@ void ptlrpc_lprocfs_register(struct proc_dir_entry *root, char *dir,
         LASSERT(*procroot_ret == NULL);
         LASSERT(*stats_ret == NULL);
 
-        svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES + LUSTRE_MAX_OPCODES, 0);
+        svc_stats = lprocfs_alloc_stats(EXTRA_MAX_OPCODES+LUSTRE_MAX_OPCODES,0);
         if (svc_stats == NULL)
                 return;
 
@@ -199,9 +209,20 @@ void ptlrpc_lprocfs_register(struct proc_dir_entry *root, char *dir,
         lprocfs_counter_init(svc_stats, PTLRPC_REQBUF_AVAIL_CNTR,
                              svc_counter_config, "reqbuf_avail", "bufs");
         for (i = 0; i < EXTRA_LAST_OPC; i++) {
+                char *units;
+
+                switch(i) {
+                case BRW_WRITE_BYTES:
+                case BRW_READ_BYTES:
+                        units = "bytes";
+                        break;
+                default:
+                        units = "reqs";
+                        break;
+                }
                 lprocfs_counter_init(svc_stats, PTLRPC_LAST_CNTR + i,
                                      svc_counter_config,
-                                     ll_eopcode2str(i), "reqs");
+                                     ll_eopcode2str(i), units);
         }
         for (i = 0; i < LUSTRE_MAX_OPCODES; i++) {
                 __u32 opcode = ll_rpc_opcode_table[i].opcode;
@@ -493,6 +514,32 @@ static int ptlrpc_lprocfs_rd_timeouts(char *page, char **start, off_t off,
         return rc;
 }
 
+static int ptlrpc_lprocfs_rd_hp_ratio(char *page, char **start, off_t off,
+                                      int count, int *eof, void *data)
+{
+        struct ptlrpc_service *svc = data;
+        int rc = snprintf(page, count, "%d", svc->srv_hpreq_ratio);
+        return rc;
+}
+
+static int ptlrpc_lprocfs_wr_hp_ratio(struct file *file, const char *buffer,
+                                      unsigned long count, void *data)
+{
+        struct ptlrpc_service *svc = data;
+        int rc, val;
+        
+        rc = lprocfs_write_helper(buffer, count, &val);
+        if (rc < 0)
+                return rc;
+        if (val < 0)
+                return -ERANGE;
+
+        spin_lock(&svc->srv_lock);
+        svc->srv_hpreq_ratio = val;
+        spin_unlock(&svc->srv_lock);
+        return count;
+}
+
 void ptlrpc_lprocfs_register_service(struct proc_dir_entry *entry,
                                      struct ptlrpc_service *svc)
 {
@@ -508,6 +555,10 @@ void ptlrpc_lprocfs_register_service(struct proc_dir_entry *entry,
                 {.name       = "timeouts",
                  .read_fptr  = ptlrpc_lprocfs_rd_timeouts,
                  .data       = svc},
+                {.name       = "high_priority_ratio",
+                 .read_fptr  = ptlrpc_lprocfs_rd_hp_ratio,
+                 .write_fptr = ptlrpc_lprocfs_wr_hp_ratio,
+                 .data       = svc},
                 {NULL}
         };
         static struct file_operations req_history_fops = {
@@ -553,20 +604,36 @@ void ptlrpc_lprocfs_rpc_sent(struct ptlrpc_request *req)
         if (svc_stats == NULL || opc <= 0)
                 return;
         LASSERT(opc < LUSTRE_MAX_OPCODES);
-        /* These two use the ptlrpc_lprocfs_brw below */
-        if (!(opc == OST_WRITE || opc == OST_READ || op == LDLM_ENQUEUE
-              || op == MDS_REINT))
+        if (!(op == LDLM_ENQUEUE || op == MDS_REINT))
                 lprocfs_counter_add(svc_stats, opc + EXTRA_MAX_OPCODES, 0);
 }
 
-void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int opc, int bytes)
+void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes)
 {
         struct lprocfs_stats *svc_stats;
+        int idx;
+
+        if (!req->rq_import)
+                return;
         svc_stats = req->rq_import->imp_obd->obd_svc_stats;
-        if (!svc_stats) 
+        if (!svc_stats)
                 return;
-        lprocfs_counter_add(svc_stats, opc + EXTRA_MAX_OPCODES, bytes);
+        idx = lustre_msg_get_opc(req->rq_reqmsg);
+        switch (idx) {
+        case OST_READ:
+                idx = BRW_READ_BYTES + PTLRPC_LAST_CNTR;
+                break;
+        case OST_WRITE:
+                idx = BRW_WRITE_BYTES + PTLRPC_LAST_CNTR;
+                break;
+        default:
+                LASSERTF(0, "unsupported opcode %u\n", idx);
+                break;
+        }
+
+        lprocfs_counter_add(svc_stats, idx, bytes);
 }
+
 EXPORT_SYMBOL(ptlrpc_lprocfs_brw);
 
 void ptlrpc_lprocfs_unregister_service(struct ptlrpc_service *svc)
@@ -595,16 +662,16 @@ int lprocfs_wr_evict_client(struct file *file, const char *buffer,
         struct obd_device *obd = data;
         char tmpbuf[sizeof(struct obd_uuid)];
 
-        /* Kludge code(deadlock situation): the lprocfs lock has been held 
+        /* Kludge code(deadlock situation): the lprocfs lock has been held
          * since the client is evicted by writting client's
-         * uuid/nid to procfs "evict_client" entry. However, 
+         * uuid/nid to procfs "evict_client" entry. However,
          * obd_export_evict_by_uuid() will call lprocfs_remove() to destroy
          * the proc entries under the being destroyed export{}, so I have
-         * to drop the lock at first here. 
+         * to drop the lock at first here.
          * - jay, jxiong@clusterfs.com */
-        class_incref(obd);
+        class_incref(obd, __FUNCTION__, cfs_current());
         LPROCFS_EXIT();
+
         sscanf(buffer, "%40s", tmpbuf);
         if (strncmp(tmpbuf, "nid:", 4) == 0)
                 obd_export_evict_by_nid(obd, tmpbuf + 4);
@@ -614,7 +681,7 @@ int lprocfs_wr_evict_client(struct file *file, const char *buffer,
                 obd_export_evict_by_uuid(obd, tmpbuf);
 
         LPROCFS_ENTRY();
-        class_decref(obd);
+        class_decref(obd, __FUNCTION__, cfs_current());
 
         return count;
 }
@@ -629,17 +696,12 @@ int lprocfs_wr_ping(struct file *file, const char *buffer,
         ENTRY;
 
         LPROCFS_CLIMP_CHECK(obd);
-        req = ptlrpc_request_alloc_pack(obd->u.cli.cl_import, &RQF_OBD_PING,
-                                        LUSTRE_OBD_VERSION, OBD_PING);
-
+        req = ptlrpc_prep_ping(obd->u.cli.cl_import);
         LPROCFS_CLIMP_EXIT(obd);
         if (req == NULL)
                 RETURN(-ENOMEM);
 
-        ptlrpc_request_set_replen(req);
         req->rq_send_state = LUSTRE_IMP_FULL;
-        req->rq_no_resend = 1;
-        req->rq_no_delay = 1;
 
         rc = ptlrpc_queue_wait(req);