+static struct cl_page *__cl_page_alloc(struct cl_object *o)
+{
+ int i = 0;
+ struct cl_page *cl_page = NULL;
+ unsigned short bufsize = cl_object_header(o)->coh_page_bufsize;
+
+check:
+ /* the number of entries in cl_page_kmem_array is expected to
+ * only be 2-3 entries, so the lookup overhead should be low.
+ */
+ for ( ; i < ARRAY_SIZE(cl_page_kmem_array); i++) {
+ if (smp_load_acquire(&cl_page_kmem_size_array[i])
+ == bufsize) {
+ OBD_SLAB_ALLOC_GFP(cl_page, cl_page_kmem_array[i],
+ bufsize, GFP_NOFS);
+ if (cl_page)
+ cl_page->cp_kmem_index = i;
+ return cl_page;
+ }
+ if (cl_page_kmem_size_array[i] == 0)
+ break;
+ }
+
+ if (i < ARRAY_SIZE(cl_page_kmem_array)) {
+ char cache_name[32];
+
+ mutex_lock(&cl_page_kmem_mutex);
+ if (cl_page_kmem_size_array[i]) {
+ mutex_unlock(&cl_page_kmem_mutex);
+ goto check;
+ }
+ snprintf(cache_name, sizeof(cache_name),
+ "cl_page_kmem-%u", bufsize);
+ cl_page_kmem_array[i] =
+ kmem_cache_create(cache_name, bufsize,
+ 0, 0, NULL);
+ if (cl_page_kmem_array[i] == NULL) {
+ mutex_unlock(&cl_page_kmem_mutex);
+ return NULL;
+ }
+ smp_store_release(&cl_page_kmem_size_array[i],
+ bufsize);
+ mutex_unlock(&cl_page_kmem_mutex);
+ goto check;
+ } else {
+ OBD_ALLOC_GFP(cl_page, bufsize, GFP_NOFS);
+ if (cl_page)
+ cl_page->cp_kmem_index = -1;
+ }
+
+ return cl_page;
+}
+