• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Compressed RAM block device
3  *
4  * Copyright (C) 2008, 2009, 2010  Nitin Gupta
5  *
6  * This code is released using a dual license strategy: BSD/GPL
7  * You can choose the licence that better fits your requirements.
8  *
9  * Released under the terms of 3-clause BSD License
10  * Released under the terms of GNU General Public License Version 2.0
11  *
12  * Project home: http://compcache.googlecode.com
13  */
14 
15 #define KMSG_COMPONENT "zram"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17 
18 #ifdef CONFIG_ZRAM_DEBUG
19 #define DEBUG
20 #endif
21 
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/bio.h>
25 #include <linux/bitops.h>
26 #include <linux/blkdev.h>
27 #include <linux/buffer_head.h>
28 #include <linux/device.h>
29 #include <linux/genhd.h>
30 #include <linux/highmem.h>
31 #include <linux/slab.h>
32 #include <linux/lzo.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
35 
36 #include "zram_drv.h"
37 
38 /* Globals */
39 static int zram_major;
40 struct zram *zram_devices;
41 
42 /* Module params (documentation at end) */
43 static unsigned int num_devices;
44 
zram_stat_inc(u32 * v)45 static void zram_stat_inc(u32 *v)
46 {
47 	*v = *v + 1;
48 }
49 
zram_stat_dec(u32 * v)50 static void zram_stat_dec(u32 *v)
51 {
52 	*v = *v - 1;
53 }
54 
zram_stat64_add(struct zram * zram,u64 * v,u64 inc)55 static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
56 {
57 	spin_lock(&zram->stat64_lock);
58 	*v = *v + inc;
59 	spin_unlock(&zram->stat64_lock);
60 }
61 
zram_stat64_sub(struct zram * zram,u64 * v,u64 dec)62 static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
63 {
64 	spin_lock(&zram->stat64_lock);
65 	*v = *v - dec;
66 	spin_unlock(&zram->stat64_lock);
67 }
68 
zram_stat64_inc(struct zram * zram,u64 * v)69 static void zram_stat64_inc(struct zram *zram, u64 *v)
70 {
71 	zram_stat64_add(zram, v, 1);
72 }
73 
zram_test_flag(struct zram * zram,u32 index,enum zram_pageflags flag)74 static int zram_test_flag(struct zram *zram, u32 index,
75 			enum zram_pageflags flag)
76 {
77 	return zram->table[index].flags & BIT(flag);
78 }
79 
zram_set_flag(struct zram * zram,u32 index,enum zram_pageflags flag)80 static void zram_set_flag(struct zram *zram, u32 index,
81 			enum zram_pageflags flag)
82 {
83 	zram->table[index].flags |= BIT(flag);
84 }
85 
zram_clear_flag(struct zram * zram,u32 index,enum zram_pageflags flag)86 static void zram_clear_flag(struct zram *zram, u32 index,
87 			enum zram_pageflags flag)
88 {
89 	zram->table[index].flags &= ~BIT(flag);
90 }
91 
page_zero_filled(void * ptr)92 static int page_zero_filled(void *ptr)
93 {
94 	unsigned int pos;
95 	unsigned long *page;
96 
97 	page = (unsigned long *)ptr;
98 
99 	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
100 		if (page[pos])
101 			return 0;
102 	}
103 
104 	return 1;
105 }
106 
zram_set_disksize(struct zram * zram,size_t totalram_bytes)107 static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
108 {
109 	if (!zram->disksize) {
110 		pr_info(
111 		"disk size not provided. You can use disksize_kb module "
112 		"param to specify size.\nUsing default: (%u%% of RAM).\n",
113 		default_disksize_perc_ram
114 		);
115 		zram->disksize = default_disksize_perc_ram *
116 					(totalram_bytes / 100);
117 	}
118 
119 	if (zram->disksize > 2 * (totalram_bytes)) {
120 		pr_info(
121 		"There is little point creating a zram of greater than "
122 		"twice the size of memory since we expect a 2:1 compression "
123 		"ratio. Note that zram uses about 0.1%% of the size of "
124 		"the disk when not in use so a huge zram is "
125 		"wasteful.\n"
126 		"\tMemory Size: %zu kB\n"
127 		"\tSize you selected: %llu kB\n"
128 		"Continuing anyway ...\n",
129 		totalram_bytes >> 10, zram->disksize
130 		);
131 	}
132 
133 	zram->disksize &= PAGE_MASK;
134 }
135 
zram_free_page(struct zram * zram,size_t index)136 static void zram_free_page(struct zram *zram, size_t index)
137 {
138 	void *handle = zram->table[index].handle;
139 
140 	if (unlikely(!handle)) {
141 		/*
142 		 * No memory is allocated for zero filled pages.
143 		 * Simply clear zero page flag.
144 		 */
145 		if (zram_test_flag(zram, index, ZRAM_ZERO)) {
146 			zram_clear_flag(zram, index, ZRAM_ZERO);
147 			zram_stat_dec(&zram->stats.pages_zero);
148 		}
149 		return;
150 	}
151 
152 	if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
153 		__free_page(handle);
154 		zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
155 		zram_stat_dec(&zram->stats.pages_expand);
156 		goto out;
157 	}
158 
159 	zs_free(zram->mem_pool, handle);
160 
161 	if (zram->table[index].size <= PAGE_SIZE / 2)
162 		zram_stat_dec(&zram->stats.good_compress);
163 
164 out:
165 	zram_stat64_sub(zram, &zram->stats.compr_size,
166 			zram->table[index].size);
167 	zram_stat_dec(&zram->stats.pages_stored);
168 
169 	zram->table[index].handle = NULL;
170 	zram->table[index].size = 0;
171 }
172 
handle_zero_page(struct bio_vec * bvec)173 static void handle_zero_page(struct bio_vec *bvec)
174 {
175 	struct page *page = bvec->bv_page;
176 	void *user_mem;
177 
178 	user_mem = kmap_atomic(page);
179 	memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
180 	kunmap_atomic(user_mem);
181 
182 	flush_dcache_page(page);
183 }
184 
handle_uncompressed_page(struct zram * zram,struct bio_vec * bvec,u32 index,int offset)185 static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
186 				     u32 index, int offset)
187 {
188 	struct page *page = bvec->bv_page;
189 	unsigned char *user_mem, *cmem;
190 
191 	user_mem = kmap_atomic(page);
192 	cmem = kmap_atomic(zram->table[index].handle);
193 
194 	memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
195 	kunmap_atomic(cmem);
196 	kunmap_atomic(user_mem);
197 
198 	flush_dcache_page(page);
199 }
200 
is_partial_io(struct bio_vec * bvec)201 static inline int is_partial_io(struct bio_vec *bvec)
202 {
203 	return bvec->bv_len != PAGE_SIZE;
204 }
205 
zram_bvec_read(struct zram * zram,struct bio_vec * bvec,u32 index,int offset,struct bio * bio)206 static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
207 			  u32 index, int offset, struct bio *bio)
208 {
209 	int ret;
210 	size_t clen;
211 	struct page *page;
212 	struct zobj_header *zheader;
213 	unsigned char *user_mem, *cmem, *uncmem = NULL;
214 
215 	page = bvec->bv_page;
216 
217 	if (zram_test_flag(zram, index, ZRAM_ZERO)) {
218 		handle_zero_page(bvec);
219 		return 0;
220 	}
221 
222 	/* Requested page is not present in compressed area */
223 	if (unlikely(!zram->table[index].handle)) {
224 		pr_debug("Read before write: sector=%lu, size=%u",
225 			 (ulong)(bio->bi_sector), bio->bi_size);
226 		handle_zero_page(bvec);
227 		return 0;
228 	}
229 
230 	/* Page is stored uncompressed since it's incompressible */
231 	if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
232 		handle_uncompressed_page(zram, bvec, index, offset);
233 		return 0;
234 	}
235 
236 	if (is_partial_io(bvec)) {
237 		/* Use  a temporary buffer to decompress the page */
238 		uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
239 		if (!uncmem) {
240 			pr_info("Error allocating temp memory!\n");
241 			return -ENOMEM;
242 		}
243 	}
244 
245 	user_mem = kmap_atomic(page);
246 	if (!is_partial_io(bvec))
247 		uncmem = user_mem;
248 	clen = PAGE_SIZE;
249 
250 	cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
251 
252 	ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
253 				    zram->table[index].size,
254 				    uncmem, &clen);
255 
256 	if (is_partial_io(bvec)) {
257 		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
258 		       bvec->bv_len);
259 		kfree(uncmem);
260 	}
261 
262 	zs_unmap_object(zram->mem_pool, zram->table[index].handle);
263 	kunmap_atomic(user_mem);
264 
265 	/* Should NEVER happen. Return bio error if it does. */
266 	if (unlikely(ret != LZO_E_OK)) {
267 		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
268 		zram_stat64_inc(zram, &zram->stats.failed_reads);
269 		return ret;
270 	}
271 
272 	flush_dcache_page(page);
273 
274 	return 0;
275 }
276 
zram_read_before_write(struct zram * zram,char * mem,u32 index)277 static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
278 {
279 	int ret;
280 	size_t clen = PAGE_SIZE;
281 	struct zobj_header *zheader;
282 	unsigned char *cmem;
283 
284 	if (zram_test_flag(zram, index, ZRAM_ZERO) ||
285 	    !zram->table[index].handle) {
286 		memset(mem, 0, PAGE_SIZE);
287 		return 0;
288 	}
289 
290 	cmem = zs_map_object(zram->mem_pool, zram->table[index].handle);
291 
292 	/* Page is stored uncompressed since it's incompressible */
293 	if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
294 		memcpy(mem, cmem, PAGE_SIZE);
295 		kunmap_atomic(cmem);
296 		return 0;
297 	}
298 
299 	ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
300 				    zram->table[index].size,
301 				    mem, &clen);
302 	zs_unmap_object(zram->mem_pool, zram->table[index].handle);
303 
304 	/* Should NEVER happen. Return bio error if it does. */
305 	if (unlikely(ret != LZO_E_OK)) {
306 		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
307 		zram_stat64_inc(zram, &zram->stats.failed_reads);
308 		return ret;
309 	}
310 
311 	return 0;
312 }
313 
zram_bvec_write(struct zram * zram,struct bio_vec * bvec,u32 index,int offset)314 static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
315 			   int offset)
316 {
317 	int ret;
318 	u32 store_offset;
319 	size_t clen;
320 	void *handle;
321 	struct zobj_header *zheader;
322 	struct page *page, *page_store;
323 	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
324 
325 	page = bvec->bv_page;
326 	src = zram->compress_buffer;
327 
328 	if (is_partial_io(bvec)) {
329 		/*
330 		 * This is a partial IO. We need to read the full page
331 		 * before to write the changes.
332 		 */
333 		uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
334 		if (!uncmem) {
335 			pr_info("Error allocating temp memory!\n");
336 			ret = -ENOMEM;
337 			goto out;
338 		}
339 		ret = zram_read_before_write(zram, uncmem, index);
340 		if (ret) {
341 			kfree(uncmem);
342 			goto out;
343 		}
344 	}
345 
346 	/*
347 	 * System overwrites unused sectors. Free memory associated
348 	 * with this sector now.
349 	 */
350 	if (zram->table[index].handle ||
351 	    zram_test_flag(zram, index, ZRAM_ZERO))
352 		zram_free_page(zram, index);
353 
354 	user_mem = kmap_atomic(page);
355 
356 	if (is_partial_io(bvec))
357 		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
358 		       bvec->bv_len);
359 	else
360 		uncmem = user_mem;
361 
362 	if (page_zero_filled(uncmem)) {
363 		kunmap_atomic(user_mem);
364 		if (is_partial_io(bvec))
365 			kfree(uncmem);
366 		zram_stat_inc(&zram->stats.pages_zero);
367 		zram_set_flag(zram, index, ZRAM_ZERO);
368 		ret = 0;
369 		goto out;
370 	}
371 
372 	ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
373 			       zram->compress_workmem);
374 
375 	kunmap_atomic(user_mem);
376 	if (is_partial_io(bvec))
377 			kfree(uncmem);
378 
379 	if (unlikely(ret != LZO_E_OK)) {
380 		pr_err("Compression failed! err=%d\n", ret);
381 		goto out;
382 	}
383 
384 	/*
385 	 * Page is incompressible. Store it as-is (uncompressed)
386 	 * since we do not want to return too many disk write
387 	 * errors which has side effect of hanging the system.
388 	 */
389 	if (unlikely(clen > max_zpage_size)) {
390 		clen = PAGE_SIZE;
391 		page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
392 		if (unlikely(!page_store)) {
393 			pr_info("Error allocating memory for "
394 				"incompressible page: %u\n", index);
395 			ret = -ENOMEM;
396 			goto out;
397 		}
398 
399 		store_offset = 0;
400 		zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
401 		zram_stat_inc(&zram->stats.pages_expand);
402 		handle = page_store;
403 		src = kmap_atomic(page);
404 		cmem = kmap_atomic(page_store);
405 		goto memstore;
406 	}
407 
408 	handle = zs_malloc(zram->mem_pool, clen + sizeof(*zheader));
409 	if (!handle) {
410 		pr_info("Error allocating memory for compressed "
411 			"page: %u, size=%zu\n", index, clen);
412 		ret = -ENOMEM;
413 		goto out;
414 	}
415 	cmem = zs_map_object(zram->mem_pool, handle);
416 
417 memstore:
418 #if 0
419 	/* Back-reference needed for memory defragmentation */
420 	if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
421 		zheader = (struct zobj_header *)cmem;
422 		zheader->table_idx = index;
423 		cmem += sizeof(*zheader);
424 	}
425 #endif
426 
427 	memcpy(cmem, src, clen);
428 
429 	if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
430 		kunmap_atomic(cmem);
431 		kunmap_atomic(src);
432 	} else {
433 		zs_unmap_object(zram->mem_pool, handle);
434 	}
435 
436 	zram->table[index].handle = handle;
437 	zram->table[index].size = clen;
438 
439 	/* Update stats */
440 	zram_stat64_add(zram, &zram->stats.compr_size, clen);
441 	zram_stat_inc(&zram->stats.pages_stored);
442 	if (clen <= PAGE_SIZE / 2)
443 		zram_stat_inc(&zram->stats.good_compress);
444 
445 	return 0;
446 
447 out:
448 	if (ret)
449 		zram_stat64_inc(zram, &zram->stats.failed_writes);
450 	return ret;
451 }
452 
zram_bvec_rw(struct zram * zram,struct bio_vec * bvec,u32 index,int offset,struct bio * bio,int rw)453 static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
454 			int offset, struct bio *bio, int rw)
455 {
456 	int ret;
457 
458 	if (rw == READ) {
459 		down_read(&zram->lock);
460 		ret = zram_bvec_read(zram, bvec, index, offset, bio);
461 		up_read(&zram->lock);
462 	} else {
463 		down_write(&zram->lock);
464 		ret = zram_bvec_write(zram, bvec, index, offset);
465 		up_write(&zram->lock);
466 	}
467 
468 	return ret;
469 }
470 
update_position(u32 * index,int * offset,struct bio_vec * bvec)471 static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
472 {
473 	if (*offset + bvec->bv_len >= PAGE_SIZE)
474 		(*index)++;
475 	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
476 }
477 
__zram_make_request(struct zram * zram,struct bio * bio,int rw)478 static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
479 {
480 	int i, offset;
481 	u32 index;
482 	struct bio_vec *bvec;
483 
484 	switch (rw) {
485 	case READ:
486 		zram_stat64_inc(zram, &zram->stats.num_reads);
487 		break;
488 	case WRITE:
489 		zram_stat64_inc(zram, &zram->stats.num_writes);
490 		break;
491 	}
492 
493 	index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
494 	offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
495 
496 	bio_for_each_segment(bvec, bio, i) {
497 		int max_transfer_size = PAGE_SIZE - offset;
498 
499 		if (bvec->bv_len > max_transfer_size) {
500 			/*
501 			 * zram_bvec_rw() can only make operation on a single
502 			 * zram page. Split the bio vector.
503 			 */
504 			struct bio_vec bv;
505 
506 			bv.bv_page = bvec->bv_page;
507 			bv.bv_len = max_transfer_size;
508 			bv.bv_offset = bvec->bv_offset;
509 
510 			if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
511 				goto out;
512 
513 			bv.bv_len = bvec->bv_len - max_transfer_size;
514 			bv.bv_offset += max_transfer_size;
515 			if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
516 				goto out;
517 		} else
518 			if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
519 			    < 0)
520 				goto out;
521 
522 		update_position(&index, &offset, bvec);
523 	}
524 
525 	set_bit(BIO_UPTODATE, &bio->bi_flags);
526 	bio_endio(bio, 0);
527 	return;
528 
529 out:
530 	bio_io_error(bio);
531 }
532 
533 /*
534  * Check if request is within bounds and aligned on zram logical blocks.
535  */
valid_io_request(struct zram * zram,struct bio * bio)536 static inline int valid_io_request(struct zram *zram, struct bio *bio)
537 {
538 	if (unlikely(
539 		(bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
540 		(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
541 		(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
542 
543 		return 0;
544 	}
545 
546 	/* I/O request is valid */
547 	return 1;
548 }
549 
550 /*
551  * Handler function for all zram I/O requests.
552  */
zram_make_request(struct request_queue * queue,struct bio * bio)553 static void zram_make_request(struct request_queue *queue, struct bio *bio)
554 {
555 	struct zram *zram = queue->queuedata;
556 
557 	if (unlikely(!zram->init_done) && zram_init_device(zram))
558 		goto error;
559 
560 	down_read(&zram->init_lock);
561 	if (unlikely(!zram->init_done))
562 		goto error_unlock;
563 
564 	if (!valid_io_request(zram, bio)) {
565 		zram_stat64_inc(zram, &zram->stats.invalid_io);
566 		goto error_unlock;
567 	}
568 
569 	__zram_make_request(zram, bio, bio_data_dir(bio));
570 	up_read(&zram->init_lock);
571 
572 	return;
573 
574 error_unlock:
575 	up_read(&zram->init_lock);
576 error:
577 	bio_io_error(bio);
578 }
579 
__zram_reset_device(struct zram * zram)580 void __zram_reset_device(struct zram *zram)
581 {
582 	size_t index;
583 
584 	zram->init_done = 0;
585 
586 	/* Free various per-device buffers */
587 	kfree(zram->compress_workmem);
588 	free_pages((unsigned long)zram->compress_buffer, 1);
589 
590 	zram->compress_workmem = NULL;
591 	zram->compress_buffer = NULL;
592 
593 	/* Free all pages that are still in this zram device */
594 	for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
595 		void *handle = zram->table[index].handle;
596 		if (!handle)
597 			continue;
598 
599 		if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
600 			__free_page(handle);
601 		else
602 			zs_free(zram->mem_pool, handle);
603 	}
604 
605 	vfree(zram->table);
606 	zram->table = NULL;
607 
608 	zs_destroy_pool(zram->mem_pool);
609 	zram->mem_pool = NULL;
610 
611 	/* Reset stats */
612 	memset(&zram->stats, 0, sizeof(zram->stats));
613 
614 	zram->disksize = 0;
615 }
616 
zram_reset_device(struct zram * zram)617 void zram_reset_device(struct zram *zram)
618 {
619 	down_write(&zram->init_lock);
620 	__zram_reset_device(zram);
621 	up_write(&zram->init_lock);
622 }
623 
zram_init_device(struct zram * zram)624 int zram_init_device(struct zram *zram)
625 {
626 	int ret;
627 	size_t num_pages;
628 
629 	down_write(&zram->init_lock);
630 
631 	if (zram->init_done) {
632 		up_write(&zram->init_lock);
633 		return 0;
634 	}
635 
636 	zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
637 
638 	zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
639 	if (!zram->compress_workmem) {
640 		pr_err("Error allocating compressor working memory!\n");
641 		ret = -ENOMEM;
642 		goto fail_no_table;
643 	}
644 
645 	zram->compress_buffer =
646 		(void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
647 	if (!zram->compress_buffer) {
648 		pr_err("Error allocating compressor buffer space\n");
649 		ret = -ENOMEM;
650 		goto fail_no_table;
651 	}
652 
653 	num_pages = zram->disksize >> PAGE_SHIFT;
654 	zram->table = vzalloc(num_pages * sizeof(*zram->table));
655 	if (!zram->table) {
656 		pr_err("Error allocating zram address table\n");
657 		ret = -ENOMEM;
658 		goto fail_no_table;
659 	}
660 
661 	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
662 
663 	/* zram devices sort of resembles non-rotational disks */
664 	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
665 
666 	zram->mem_pool = zs_create_pool("zram", GFP_NOIO | __GFP_HIGHMEM);
667 	if (!zram->mem_pool) {
668 		pr_err("Error creating memory pool\n");
669 		ret = -ENOMEM;
670 		goto fail;
671 	}
672 
673 	zram->init_done = 1;
674 	up_write(&zram->init_lock);
675 
676 	pr_debug("Initialization done!\n");
677 	return 0;
678 
679 fail_no_table:
680 	/* To prevent accessing table entries during cleanup */
681 	zram->disksize = 0;
682 fail:
683 	__zram_reset_device(zram);
684 	up_write(&zram->init_lock);
685 	pr_err("Initialization failed: err=%d\n", ret);
686 	return ret;
687 }
688 
zram_slot_free_notify(struct block_device * bdev,unsigned long index)689 static void zram_slot_free_notify(struct block_device *bdev,
690 				unsigned long index)
691 {
692 	struct zram *zram;
693 
694 	zram = bdev->bd_disk->private_data;
695 	zram_free_page(zram, index);
696 	zram_stat64_inc(zram, &zram->stats.notify_free);
697 }
698 
699 static const struct block_device_operations zram_devops = {
700 	.swap_slot_free_notify = zram_slot_free_notify,
701 	.owner = THIS_MODULE
702 };
703 
create_device(struct zram * zram,int device_id)704 static int create_device(struct zram *zram, int device_id)
705 {
706 	int ret = 0;
707 
708 	init_rwsem(&zram->lock);
709 	init_rwsem(&zram->init_lock);
710 	spin_lock_init(&zram->stat64_lock);
711 
712 	zram->queue = blk_alloc_queue(GFP_KERNEL);
713 	if (!zram->queue) {
714 		pr_err("Error allocating disk queue for device %d\n",
715 			device_id);
716 		ret = -ENOMEM;
717 		goto out;
718 	}
719 
720 	blk_queue_make_request(zram->queue, zram_make_request);
721 	zram->queue->queuedata = zram;
722 
723 	 /* gendisk structure */
724 	zram->disk = alloc_disk(1);
725 	if (!zram->disk) {
726 		blk_cleanup_queue(zram->queue);
727 		pr_warning("Error allocating disk structure for device %d\n",
728 			device_id);
729 		ret = -ENOMEM;
730 		goto out;
731 	}
732 
733 	zram->disk->major = zram_major;
734 	zram->disk->first_minor = device_id;
735 	zram->disk->fops = &zram_devops;
736 	zram->disk->queue = zram->queue;
737 	zram->disk->private_data = zram;
738 	snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
739 
740 	/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
741 	set_capacity(zram->disk, 0);
742 
743 	/*
744 	 * To ensure that we always get PAGE_SIZE aligned
745 	 * and n*PAGE_SIZED sized I/O requests.
746 	 */
747 	blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
748 	blk_queue_logical_block_size(zram->disk->queue,
749 					ZRAM_LOGICAL_BLOCK_SIZE);
750 	blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
751 	blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
752 
753 	add_disk(zram->disk);
754 
755 	ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
756 				&zram_disk_attr_group);
757 	if (ret < 0) {
758 		pr_warning("Error creating sysfs group");
759 		goto out;
760 	}
761 
762 	zram->init_done = 0;
763 
764 out:
765 	return ret;
766 }
767 
destroy_device(struct zram * zram)768 static void destroy_device(struct zram *zram)
769 {
770 	sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
771 			&zram_disk_attr_group);
772 
773 	if (zram->disk) {
774 		del_gendisk(zram->disk);
775 		put_disk(zram->disk);
776 	}
777 
778 	if (zram->queue)
779 		blk_cleanup_queue(zram->queue);
780 }
781 
zram_get_num_devices(void)782 unsigned int zram_get_num_devices(void)
783 {
784 	return num_devices;
785 }
786 
zram_init(void)787 static int __init zram_init(void)
788 {
789 	int ret, dev_id;
790 
791 	if (num_devices > max_num_devices) {
792 		pr_warning("Invalid value for num_devices: %u\n",
793 				num_devices);
794 		ret = -EINVAL;
795 		goto out;
796 	}
797 
798 	zram_major = register_blkdev(0, "zram");
799 	if (zram_major <= 0) {
800 		pr_warning("Unable to get major number\n");
801 		ret = -EBUSY;
802 		goto out;
803 	}
804 
805 	if (!num_devices) {
806 		pr_info("num_devices not specified. Using default: 1\n");
807 		num_devices = 1;
808 	}
809 
810 	/* Allocate the device array and initialize each one */
811 	pr_info("Creating %u devices ...\n", num_devices);
812 	zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
813 	if (!zram_devices) {
814 		ret = -ENOMEM;
815 		goto unregister;
816 	}
817 
818 	for (dev_id = 0; dev_id < num_devices; dev_id++) {
819 		ret = create_device(&zram_devices[dev_id], dev_id);
820 		if (ret)
821 			goto free_devices;
822 	}
823 
824 	return 0;
825 
826 free_devices:
827 	while (dev_id)
828 		destroy_device(&zram_devices[--dev_id]);
829 	kfree(zram_devices);
830 unregister:
831 	unregister_blkdev(zram_major, "zram");
832 out:
833 	return ret;
834 }
835 
zram_exit(void)836 static void __exit zram_exit(void)
837 {
838 	int i;
839 	struct zram *zram;
840 
841 	for (i = 0; i < num_devices; i++) {
842 		zram = &zram_devices[i];
843 
844 		destroy_device(zram);
845 		if (zram->init_done)
846 			zram_reset_device(zram);
847 	}
848 
849 	unregister_blkdev(zram_major, "zram");
850 
851 	kfree(zram_devices);
852 	pr_debug("Cleanup done!\n");
853 }
854 
855 module_param(num_devices, uint, 0);
856 MODULE_PARM_DESC(num_devices, "Number of zram devices");
857 
858 module_init(zram_init);
859 module_exit(zram_exit);
860 
861 MODULE_LICENSE("Dual BSD/GPL");
862 MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
863 MODULE_DESCRIPTION("Compressed RAM Block Device");
864