return desc;
}
+EXPORT_SYMBOL(ptlrpc_prep_bulk_exp);
/**
* Starts bulk transfer for descriptor \a desc
RETURN(0);
}
+EXPORT_SYMBOL(ptlrpc_start_bulk_transfer);
/**
* Server side bulk abort. Idempotent. Not thread-safe (i.e. only
CWARN("Unexpectedly long timeout: desc %p\n", desc);
}
}
+EXPORT_SYMBOL(ptlrpc_abort_bulk);
#endif /* HAVE_SERVER_SUPPORT */
/**
req->rq_xid, desc->bd_portal);
RETURN(0);
}
+EXPORT_SYMBOL(ptlrpc_register_bulk);
/**
* Disconnect a bulk desc from the network. Idempotent. Not
}
RETURN(0);
}
+EXPORT_SYMBOL(ptlrpc_unregister_bulk);
static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags)
{
ptlrpc_connection_put(conn);
return rc;
}
+EXPORT_SYMBOL(ptlrpc_send_reply);
int ptlrpc_reply (struct ptlrpc_request *req)
{
else
return (ptlrpc_send_reply(req, 0));
}
+EXPORT_SYMBOL(ptlrpc_reply);
/**
* For request \a req send an error reply back. Create empty
rc = ptlrpc_send_reply(req, may_be_difficult);
RETURN(rc);
}
+EXPORT_SYMBOL(ptlrpc_send_error);
int ptlrpc_error(struct ptlrpc_request *req)
{
return ptlrpc_send_error(req, 0);
}
+EXPORT_SYMBOL(ptlrpc_error);
/**
* Send request \a request.
cfs_memory_pressure_restore(mpflag);
return rc;
}
+EXPORT_SYMBOL(ptl_send_rpc);
/**
* Register request buffer descriptor for request receiving.