• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1commit 523de3019f56ab52b857e7b4ae6f03a605ef96ba
2Author: zhaoxc0502 <zhaoxc0502@thundersoft.com>
3Date:   Thu Jun 16 17:11:53 2022 +0800
4
5    linux_block
6
7    Change-Id: I38ecc75058b0e884b1a7b85a700d3ec794b93c75
8
9diff --git a/block/blk-map.c b/block/blk-map.c
10index ede73f4f7..61e94a5a6 100644
11--- a/block/blk-map.c
12+++ b/block/blk-map.c
13@@ -488,7 +488,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
14 		if (bytes > len)
15 			bytes = len;
16
17-		page = alloc_page(q->bounce_gfp | __GFP_ZERO | gfp_mask);
18+		page = alloc_page(q->bounce_gfp | gfp_mask);
19 		if (!page)
20 			goto cleanup;
21
22@@ -668,6 +668,12 @@ int blk_rq_unmap_user(struct bio *bio)
23 }
24 EXPORT_SYMBOL(blk_rq_unmap_user);
25
26+#ifdef CONFIG_AHCI_IMX
27+extern void *sg_io_buffer_hack;
28+#else
29+#define sg_io_buffer_hack NULL
30+#endif
31+
32 /**
33  * blk_rq_map_kern - map kernel data to a request, for passthrough requests
34  * @q:		request queue where request should be inserted
35@@ -694,7 +700,12 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
36 	if (!len || !kbuf)
37 		return -EINVAL;
38
39+#ifdef CONFIG_AHCI_IMX
40+	if ((kbuf != sg_io_buffer_hack) && (!blk_rq_aligned(q, addr, len)
41+			|| object_is_on_stack(kbuf)))
42+#else
43 	if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf))
44+#endif
45 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
46 	else
47 		bio = bio_map_kern(q, kbuf, len, gfp_mask);
48diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
49index c9f009cc0..bc596b8f9 100644
50--- a/block/scsi_ioctl.c
51+++ b/block/scsi_ioctl.c
52@@ -243,6 +243,12 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
53 	return 0;
54 }
55
56+#ifdef CONFIG_AHCI_IMX
57+extern void *sg_io_buffer_hack;
58+#else
59+#define sg_io_buffer_hack NULL
60+#endif
61+
62 static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
63 				 struct bio *bio)
64 {
65@@ -272,7 +278,12 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
66 			ret = -EFAULT;
67 	}
68
69-	r = blk_rq_unmap_user(bio);
70+	if (sg_io_buffer_hack && !hdr->iovec_count)
71+		r = copy_to_user(hdr->dxferp, sg_io_buffer_hack,
72+				hdr->dxfer_len);
73+	else
74+		r = blk_rq_unmap_user(bio);
75+
76 	if (!ret)
77 		ret = r;
78
79@@ -296,6 +307,9 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
80 	if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9))
81 		return -EIO;
82
83+	if (sg_io_buffer_hack && hdr->dxfer_len > 0x10000)
84+		return -EIO;
85+
86 	if (hdr->dxfer_len)
87 		switch (hdr->dxfer_direction) {
88 		default:
89@@ -341,9 +355,14 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
90
91 		ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL);
92 		kfree(iov);
93-	} else if (hdr->dxfer_len)
94-		ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
95-				      GFP_KERNEL);
96+	} else if (hdr->dxfer_len) {
97+		if (sg_io_buffer_hack)
98+			ret = blk_rq_map_kern(q, rq, sg_io_buffer_hack,
99+					hdr->dxfer_len, GFP_KERNEL);
100+		else
101+			ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp,
102+					hdr->dxfer_len, GFP_KERNEL);
103+	}
104
105 	if (ret)
106 		goto out_free_cdb;
107