• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/kernel.h>
6 #include <linux/wait.h>
7 #include <linux/fs.h>
8 #include <linux/io.h>
9 #include <linux/uaccess.h>
10 #include <linux/miscdevice.h>
11 
12 #include <linux/device.h>
13 #include <linux/pci_regs.h>
14 #include <linux/pci_ids.h>
15 #include <linux/pci.h>
16 
17 #include <uapi/linux/goldfish/goldfish_address_space.h>
18 
19 MODULE_DESCRIPTION("A Goldfish driver that allocates address space ranges in "
20 		   "the guest to populate them later in the host. This allows "
21 		   "sharing host's memory with the guest.");
22 MODULE_AUTHOR("Roman Kiryanov <rkir@google.com>");
23 MODULE_LICENSE("GPL v2");
24 
25 #define AS_DEBUG 0
26 
27 #if AS_DEBUG
28 	#define AS_DPRINT(fmt, ...) \
29 		printk(KERN_ERR "%s:%d " fmt "\n", \
30 		       __func__, __LINE__, ##__VA_ARGS__);
31 #else
32 	#define AS_DPRINT(fmt, ...)
33 #endif
34 
35 enum as_register_id {
36 	AS_REGISTER_COMMAND = 0,
37 	AS_REGISTER_STATUS = 4,
38 	AS_REGISTER_GUEST_PAGE_SIZE = 8,
39 	AS_REGISTER_BLOCK_SIZE_LOW = 12,
40 	AS_REGISTER_BLOCK_SIZE_HIGH = 16,
41 	AS_REGISTER_BLOCK_OFFSET_LOW = 20,
42 	AS_REGISTER_BLOCK_OFFSET_HIGH = 24,
43 	AS_REGISTER_PING = 28,
44 	AS_REGISTER_PING_INFO_ADDR_LOW = 32,
45 	AS_REGISTER_PING_INFO_ADDR_HIGH = 36,
46 	AS_REGISTER_HANDLE = 40,
47 	AS_REGISTER_PHYS_START_LOW = 44,
48 	AS_REGISTER_PHYS_START_HIGH = 48,
49 };
50 
51 enum as_command_id {
52 	AS_COMMAND_ALLOCATE_BLOCK = 1,
53 	AS_COMMAND_DEALLOCATE_BLOCK = 2,
54 	AS_COMMAND_GEN_HANDLE = 3,
55 	AS_COMMAND_DESTROY_HANDLE = 4,
56 	AS_COMMAND_TELL_PING_INFO_ADDR = 5,
57 };
58 
59 #define AS_PCI_VENDOR_ID	0x607D
60 #define AS_PCI_DEVICE_ID	0xF153
61 #define AS_ALLOCATED_BLOCKS_INITIAL_CAPACITY 32
62 #define AS_INVALID_HANDLE (~(0))
63 
64 enum as_pci_bar_id {
65 	AS_PCI_CONTROL_BAR_ID = 0,
66 	AS_PCI_AREA_BAR_ID = 1,
67 };
68 
69 struct as_device_state {
70 	struct miscdevice	miscdevice;
71 	struct pci_dev		*dev;
72 	struct as_driver_state	*driver_state;
73 
74 	void __iomem		*io_registers;
75 
76 	void			*address_area;	/* to claim the address space */
77 
78 	/* physical address to allocate from */
79 	unsigned long		address_area_phys_address;
80 
81 	struct mutex		registers_lock;	/* protects registers */
82 };
83 
84 struct as_block {
85 	u64 offset;
86 	u64 size;
87 };
88 
89 struct as_allocated_blocks {
90 	struct as_block *blocks;  /* a dynamic array of allocated blocks */
91 	int blocks_size;
92 	int blocks_capacity;
93 	struct mutex blocks_lock; /* protects operations with blocks */
94 };
95 
96 struct as_file_state {
97 	struct as_device_state *device_state;
98 	struct as_allocated_blocks allocated_blocks;
99 	struct as_allocated_blocks shared_allocated_blocks;
100 	struct goldfish_address_space_ping *ping_info;
101 	struct mutex ping_info_lock;	/* protects ping_info */
102 	u32 handle; /* handle generated by the host */
103 };
104 
as_register_address(void __iomem * base,int offset)105 static void __iomem *as_register_address(void __iomem *base,
106 					 int offset)
107 {
108 	WARN_ON(!base);
109 
110 	return ((char __iomem *)base) + offset;
111 }
112 
as_write_register(void __iomem * registers,int offset,u32 value)113 static void as_write_register(void __iomem *registers,
114 			      int offset,
115 			      u32 value)
116 {
117 	writel(value, as_register_address(registers, offset));
118 }
119 
as_read_register(void __iomem * registers,int offset)120 static u32 as_read_register(void __iomem *registers, int offset)
121 {
122 	return readl(as_register_address(registers, offset));
123 }
124 
as_run_command(struct as_device_state * state,enum as_command_id cmd)125 static int as_run_command(struct as_device_state *state, enum as_command_id cmd)
126 {
127 	WARN_ON(!state);
128 
129 	as_write_register(state->io_registers, AS_REGISTER_COMMAND, cmd);
130 	return -as_read_register(state->io_registers, AS_REGISTER_STATUS);
131 }
132 
as_ping_impl(struct as_device_state * state,u32 handle)133 static void as_ping_impl(struct as_device_state *state, u32 handle)
134 {
135 	as_write_register(state->io_registers, AS_REGISTER_PING, handle);
136 }
137 
138 static long
as_ioctl_allocate_block_locked_impl(struct as_device_state * state,u64 * size,u64 * offset)139 as_ioctl_allocate_block_locked_impl(struct as_device_state *state,
140 				    u64 *size, u64 *offset)
141 {
142 	long res;
143 
144 	as_write_register(state->io_registers,
145 			  AS_REGISTER_BLOCK_SIZE_LOW,
146 			  lower_32_bits(*size));
147 	as_write_register(state->io_registers,
148 			  AS_REGISTER_BLOCK_SIZE_HIGH,
149 			  upper_32_bits(*size));
150 
151 	res = as_run_command(state, AS_COMMAND_ALLOCATE_BLOCK);
152 	if (!res) {
153 		u64 low = as_read_register(state->io_registers,
154 					   AS_REGISTER_BLOCK_OFFSET_LOW);
155 		u64 high = as_read_register(state->io_registers,
156 					    AS_REGISTER_BLOCK_OFFSET_HIGH);
157 		*offset = low | (high << 32);
158 
159 		low = as_read_register(state->io_registers,
160 				       AS_REGISTER_BLOCK_SIZE_LOW);
161 		high = as_read_register(state->io_registers,
162 					AS_REGISTER_BLOCK_SIZE_HIGH);
163 		*size = low | (high << 32);
164 	}
165 
166 	return res;
167 }
168 
169 static long
as_ioctl_unallocate_block_locked_impl(struct as_device_state * state,u64 offset)170 as_ioctl_unallocate_block_locked_impl(struct as_device_state *state, u64 offset)
171 {
172 	as_write_register(state->io_registers,
173 			  AS_REGISTER_BLOCK_OFFSET_LOW,
174 			  lower_32_bits(offset));
175 	as_write_register(state->io_registers,
176 			  AS_REGISTER_BLOCK_OFFSET_HIGH,
177 			  upper_32_bits(offset));
178 
179 	return as_run_command(state, AS_COMMAND_DEALLOCATE_BLOCK);
180 }
181 
as_blocks_grow_capacity(int old_capacity)182 static int as_blocks_grow_capacity(int old_capacity)
183 {
184 	WARN_ON(old_capacity < 0);
185 
186 	return old_capacity + old_capacity;
187 }
188 
189 static int
as_blocks_insert(struct as_allocated_blocks * allocated_blocks,u64 offset,u64 size)190 as_blocks_insert(struct as_allocated_blocks *allocated_blocks,
191 		 u64 offset,
192 		 u64 size)
193 {
194 	int blocks_size;
195 
196 	if (mutex_lock_interruptible(&allocated_blocks->blocks_lock))
197 		return -ERESTARTSYS;
198 
199 	blocks_size = allocated_blocks->blocks_size;
200 
201 	WARN_ON(allocated_blocks->blocks_capacity < 1);
202 	WARN_ON(allocated_blocks->blocks_capacity <
203 		allocated_blocks->blocks_size);
204 	WARN_ON(!allocated_blocks->blocks);
205 
206 	if (allocated_blocks->blocks_capacity == blocks_size) {
207 		int new_capacity =
208 			as_blocks_grow_capacity(
209 				allocated_blocks->blocks_capacity);
210 		struct as_block *new_blocks =
211 			kcalloc(new_capacity,
212 				sizeof(allocated_blocks->blocks[0]),
213 				GFP_KERNEL);
214 
215 		if (!new_blocks) {
216 			mutex_unlock(&allocated_blocks->blocks_lock);
217 			return -ENOMEM;
218 		}
219 
220 		memcpy(new_blocks, allocated_blocks->blocks,
221 		       blocks_size * sizeof(allocated_blocks->blocks[0]));
222 
223 		kfree(allocated_blocks->blocks);
224 		allocated_blocks->blocks = new_blocks;
225 		allocated_blocks->blocks_capacity = new_capacity;
226 	}
227 
228 	WARN_ON(blocks_size >= allocated_blocks->blocks_capacity);
229 
230 	allocated_blocks->blocks[blocks_size] =
231 		(struct as_block){ .offset = offset, .size = size };
232 	allocated_blocks->blocks_size = blocks_size + 1;
233 
234 	mutex_unlock(&allocated_blocks->blocks_lock);
235 	return 0;
236 }
237 
238 static int
as_blocks_remove(struct as_allocated_blocks * allocated_blocks,u64 offset)239 as_blocks_remove(struct as_allocated_blocks *allocated_blocks, u64 offset)
240 {
241 	long res = -ENXIO;
242 	struct as_block *blocks;
243 	int blocks_size;
244 	int i;
245 
246 	if (mutex_lock_interruptible(&allocated_blocks->blocks_lock))
247 		return -ERESTARTSYS;
248 
249 	blocks = allocated_blocks->blocks;
250 	WARN_ON(!blocks);
251 
252 	blocks_size = allocated_blocks->blocks_size;
253 	WARN_ON(blocks_size < 0);
254 
255 	for (i = 0; i < blocks_size; ++i) {
256 		if (offset == blocks[i].offset) {
257 			int last = blocks_size - 1;
258 
259 			if (last > i)
260 				blocks[i] = blocks[last];
261 
262 			--allocated_blocks->blocks_size;
263 			res = 0;
264 			break;
265 		}
266 	}
267 
268 	if (res)
269 		pr_err("%s: Block not found atoffset: 0x%llx\n",
270 			__func__, offset);
271 
272 	mutex_unlock(&allocated_blocks->blocks_lock);
273 	return res;
274 }
275 
276 static int
as_blocks_check_if_mine(struct as_allocated_blocks * allocated_blocks,u64 offset,u64 size)277 as_blocks_check_if_mine(struct as_allocated_blocks *allocated_blocks,
278 			u64 offset,
279 			u64 size)
280 {
281 	const u64 end = offset + size;
282 	int res = -EPERM;
283 	struct as_block *block;
284 	int blocks_size;
285 
286 	if (mutex_lock_interruptible(&allocated_blocks->blocks_lock))
287 		return -ERESTARTSYS;
288 
289 	block = allocated_blocks->blocks;
290 	WARN_ON(!block);
291 
292 	blocks_size = allocated_blocks->blocks_size;
293 	WARN_ON(blocks_size < 0);
294 
295 	for (; blocks_size > 0; --blocks_size, ++block) {
296 		u64 block_offset = block->offset;
297 		u64 block_end = block_offset + block->size;
298 
299 		if (offset >= block_offset && end <= block_end) {
300 			res = 0;
301 			break;
302 		}
303 	}
304 
305 	mutex_unlock(&allocated_blocks->blocks_lock);
306 	return res;
307 }
308 
as_open(struct inode * inode,struct file * filp)309 static int as_open(struct inode *inode, struct file *filp)
310 {
311 	struct as_file_state *file_state;
312 	struct as_device_state *device_state;
313 	struct goldfish_address_space_ping *ping_info;
314 	u64 ping_info_phys;
315 	u64 ping_info_phys_returned;
316 	int err;
317 
318 	AS_DPRINT("Get free page");
319 	ping_info =
320 		(struct goldfish_address_space_ping *)
321 		__get_free_page(GFP_KERNEL);
322 	ping_info_phys = virt_to_phys(ping_info);
323 	AS_DPRINT("Got free page: %p 0x%llx", ping_info,
324 		  (unsigned long long)ping_info_phys);
325 
326 	if (!ping_info) {
327 		printk(KERN_ERR "Could not alloc goldfish_address_space command buffer!\n");
328 		err = -ENOMEM;
329 		goto err_ping_info_alloc_failed;
330 	}
331 
332 	file_state = kzalloc(sizeof(*file_state), GFP_KERNEL);
333 	if (!file_state) {
334 		err = -ENOMEM;
335 		goto err_file_state_alloc_failed;
336 	}
337 
338 	file_state->device_state =
339 		container_of(filp->private_data,
340 			     struct as_device_state,
341 			     miscdevice);
342 	device_state = file_state->device_state;
343 
344 	file_state->allocated_blocks.blocks =
345 		kcalloc(AS_ALLOCATED_BLOCKS_INITIAL_CAPACITY,
346 				sizeof(file_state->allocated_blocks.blocks[0]),
347 				GFP_KERNEL);
348 
349 	if (!file_state->allocated_blocks.blocks) {
350 		err = -ENOMEM;
351 		goto err_file_state_blocks_alloc_failed;
352 	}
353 
354 	file_state->shared_allocated_blocks.blocks =
355 		kcalloc(
356 			AS_ALLOCATED_BLOCKS_INITIAL_CAPACITY,
357 			sizeof(file_state->shared_allocated_blocks.blocks[0]),
358 			GFP_KERNEL);
359 
360 	if (!file_state->shared_allocated_blocks.blocks) {
361 		err = -ENOMEM;
362 		goto err_file_state_blocks_alloc_failed;
363 	}
364 
365 	file_state->allocated_blocks.blocks_size = 0;
366 	file_state->allocated_blocks.blocks_capacity =
367 		AS_ALLOCATED_BLOCKS_INITIAL_CAPACITY;
368 	mutex_init(&file_state->allocated_blocks.blocks_lock);
369 
370 	file_state->shared_allocated_blocks.blocks_size = 0;
371 	file_state->shared_allocated_blocks.blocks_capacity =
372 		AS_ALLOCATED_BLOCKS_INITIAL_CAPACITY;
373 	mutex_init(&file_state->shared_allocated_blocks.blocks_lock);
374 
375 	mutex_init(&file_state->ping_info_lock);
376 	file_state->ping_info = ping_info;
377 
378 	AS_DPRINT("Acq regs lock");
379 	mutex_lock(&device_state->registers_lock);
380 	AS_DPRINT("Got regs lock, gen handle");
381 	as_run_command(device_state, AS_COMMAND_GEN_HANDLE);
382 	file_state->handle = as_read_register(
383 		device_state->io_registers,
384 		AS_REGISTER_HANDLE);
385 	AS_DPRINT("Got regs lock, read handle: %u", file_state->handle);
386 	mutex_unlock(&device_state->registers_lock);
387 
388 	if (file_state->handle == AS_INVALID_HANDLE) {
389 		err = -EINVAL;
390 		goto err_gen_handle_failed;
391 	}
392 
393 	AS_DPRINT("Acq regs lock 2");
394 	mutex_lock(&device_state->registers_lock);
395 	AS_DPRINT("Acqd regs lock 2, write handle and ping info addr");
396 	as_write_register(
397 		device_state->io_registers,
398 		AS_REGISTER_HANDLE,
399 		file_state->handle);
400 	as_write_register(
401 		device_state->io_registers,
402 		AS_REGISTER_PING_INFO_ADDR_LOW,
403 		lower_32_bits(ping_info_phys));
404 	as_write_register(
405 		device_state->io_registers,
406 		AS_REGISTER_PING_INFO_ADDR_HIGH,
407 		upper_32_bits(ping_info_phys));
408 	AS_DPRINT("Do tell ping info addr");
409 	as_run_command(device_state, AS_COMMAND_TELL_PING_INFO_ADDR);
410 	ping_info_phys_returned =
411 		((u64)as_read_register(device_state->io_registers,
412 				       AS_REGISTER_PING_INFO_ADDR_LOW)) |
413 		((u64)as_read_register(device_state->io_registers,
414 				       AS_REGISTER_PING_INFO_ADDR_HIGH) << 32);
415 	AS_DPRINT("Read back");
416 
417 	if (ping_info_phys != ping_info_phys_returned) {
418 		printk(KERN_ERR "%s: Invalid result for ping info phys addr: expected 0x%llx, got 0x%llx\n",
419 		       __func__,
420 		       ping_info_phys, ping_info_phys_returned);
421 		err = -EINVAL;
422 		goto err_ping_info_failed;
423 	}
424 
425 	mutex_unlock(&device_state->registers_lock);
426 
427 	filp->private_data = file_state;
428 	return 0;
429 
430 err_ping_info_failed:
431 err_gen_handle_failed:
432 	kfree(file_state->allocated_blocks.blocks);
433 	kfree(file_state->shared_allocated_blocks.blocks);
434 err_file_state_blocks_alloc_failed:
435 	kfree(file_state);
436 err_file_state_alloc_failed:
437 	free_page((unsigned long)ping_info);
438 err_ping_info_alloc_failed:
439 	return err;
440 }
441 
as_release(struct inode * inode,struct file * filp)442 static int as_release(struct inode *inode, struct file *filp)
443 {
444 	struct as_file_state *file_state = filp->private_data;
445 	struct as_allocated_blocks *allocated_blocks =
446 		&file_state->allocated_blocks;
447 	struct as_allocated_blocks *shared_allocated_blocks =
448 		&file_state->shared_allocated_blocks;
449 	struct goldfish_address_space_ping *ping_info = file_state->ping_info;
450 	struct as_device_state *state = file_state->device_state;
451 	int blocks_size, shared_blocks_size;
452 	int i;
453 
454 	WARN_ON(!state);
455 	WARN_ON(!allocated_blocks);
456 	WARN_ON(!allocated_blocks->blocks);
457 	WARN_ON(allocated_blocks->blocks_size < 0);
458 	WARN_ON(!shared_allocated_blocks);
459 	WARN_ON(!shared_allocated_blocks->blocks);
460 	WARN_ON(shared_allocated_blocks->blocks_size < 0);
461 	WARN_ON(!ping_info);
462 
463 	blocks_size = allocated_blocks->blocks_size;
464 	shared_blocks_size = shared_allocated_blocks->blocks_size;
465 
466 	mutex_lock(&state->registers_lock);
467 
468 	as_write_register(state->io_registers, AS_REGISTER_HANDLE,
469 			  file_state->handle);
470 	as_run_command(state, AS_COMMAND_DESTROY_HANDLE);
471 
472 	for (i = 0; i < blocks_size; ++i) {
473 		WARN_ON(as_ioctl_unallocate_block_locked_impl(
474 				state, allocated_blocks->blocks[i].offset));
475 	}
476 
477 	// Do not unalloc shared blocks as they are host-owned
478 
479 	mutex_unlock(&state->registers_lock);
480 
481 	kfree(allocated_blocks->blocks);
482 	kfree(shared_allocated_blocks->blocks);
483 	free_page((unsigned long)ping_info);
484 	kfree(file_state);
485 	return 0;
486 }
487 
as_mmap_impl(struct as_device_state * state,size_t size,struct vm_area_struct * vma)488 static int as_mmap_impl(struct as_device_state *state,
489 			size_t size,
490 			struct vm_area_struct *vma)
491 {
492 	unsigned long pfn = (state->address_area_phys_address >> PAGE_SHIFT) +
493 		vma->vm_pgoff;
494 
495 	return remap_pfn_range(vma,
496 			       vma->vm_start,
497 			       pfn,
498 			       size,
499 			       vma->vm_page_prot);
500 }
501 
as_mmap(struct file * filp,struct vm_area_struct * vma)502 static int as_mmap(struct file *filp, struct vm_area_struct *vma)
503 {
504 	struct as_file_state *file_state = filp->private_data;
505 	struct as_allocated_blocks *allocated_blocks =
506 		&file_state->allocated_blocks;
507 	struct as_allocated_blocks *shared_allocated_blocks =
508 		&file_state->shared_allocated_blocks;
509 	size_t size = PAGE_ALIGN(vma->vm_end - vma->vm_start);
510 	int res_check_nonshared, res_check_shared;
511 
512 	WARN_ON(!allocated_blocks);
513 
514 	res_check_nonshared =
515 		as_blocks_check_if_mine(allocated_blocks,
516 			vma->vm_pgoff << PAGE_SHIFT,
517 			size);
518 
519 	res_check_shared =
520 		as_blocks_check_if_mine(shared_allocated_blocks,
521 			vma->vm_pgoff << PAGE_SHIFT,
522 			size);
523 
524 	if (res_check_nonshared && res_check_shared)
525 		return res_check_nonshared;
526 	else
527 		return as_mmap_impl(file_state->device_state, size, vma);
528 }
529 
as_ioctl_allocate_block_impl(struct as_device_state * state,struct goldfish_address_space_allocate_block * request)530 static long as_ioctl_allocate_block_impl(
531 	struct as_device_state *state,
532 	struct goldfish_address_space_allocate_block *request)
533 {
534 	long res;
535 
536 	if (mutex_lock_interruptible(&state->registers_lock))
537 		return -ERESTARTSYS;
538 
539 	res = as_ioctl_allocate_block_locked_impl(state,
540 						  &request->size,
541 						  &request->offset);
542 	if (!res) {
543 		request->phys_addr =
544 			state->address_area_phys_address + request->offset;
545 	}
546 
547 	mutex_unlock(&state->registers_lock);
548 	return res;
549 }
550 
551 static void
as_ioctl_unallocate_block_impl(struct as_device_state * state,u64 offset)552 as_ioctl_unallocate_block_impl(struct as_device_state *state, u64 offset)
553 {
554 	mutex_lock(&state->registers_lock);
555 	WARN_ON(as_ioctl_unallocate_block_locked_impl(state, offset));
556 	mutex_unlock(&state->registers_lock);
557 }
558 
559 static long
as_ioctl_allocate_block(struct as_allocated_blocks * allocated_blocks,struct as_device_state * state,void __user * ptr)560 as_ioctl_allocate_block(struct as_allocated_blocks *allocated_blocks,
561 			struct as_device_state *state,
562 			void __user *ptr)
563 {
564 	long res;
565 	struct goldfish_address_space_allocate_block request;
566 
567 	if (copy_from_user(&request, ptr, sizeof(request)))
568 		return -EFAULT;
569 
570 	res = as_ioctl_allocate_block_impl(state, &request);
571 	if (!res) {
572 		res = as_blocks_insert(allocated_blocks,
573 				       request.offset,
574 				       request.size);
575 
576 		if (res) {
577 			as_ioctl_unallocate_block_impl(state, request.offset);
578 		} else if (copy_to_user(ptr, &request, sizeof(request))) {
579 			as_ioctl_unallocate_block_impl(state, request.offset);
580 			res = -EFAULT;
581 		}
582 	}
583 
584 	return res;
585 }
586 
587 static long
as_ioctl_unallocate_block(struct as_allocated_blocks * allocated_blocks,struct as_device_state * state,void __user * ptr)588 as_ioctl_unallocate_block(struct as_allocated_blocks *allocated_blocks,
589 			  struct as_device_state *state,
590 			  void __user *ptr)
591 {
592 	long res;
593 	u64 offset;
594 
595 	if (copy_from_user(&offset, ptr, sizeof(offset)))
596 		return -EFAULT;
597 
598 	res = as_blocks_remove(allocated_blocks, offset);
599 	if (!res)
600 		as_ioctl_unallocate_block_impl(state, offset);
601 
602 	return res;
603 }
604 
605 static long
as_ioctl_claim_block(struct as_allocated_blocks * allocated_blocks,struct as_device_state * state,void __user * ptr)606 as_ioctl_claim_block(struct as_allocated_blocks *allocated_blocks,
607 			struct as_device_state *state,
608 			void __user *ptr)
609 {
610 	long res;
611 	struct goldfish_address_space_claim_shared request;
612 
613 	if (copy_from_user(&request, ptr, sizeof(request)))
614 		return -EFAULT;
615 
616 	res = as_blocks_insert(allocated_blocks,
617 				   request.offset,
618 				   request.size);
619 
620 	if (res)
621 		return res;
622 	else if (copy_to_user(ptr, &request, sizeof(request)))
623 		return -EFAULT;
624 
625 	return 0;
626 }
627 
628 static long
as_ioctl_unclaim_block(struct as_allocated_blocks * allocated_blocks,struct as_device_state * state,void __user * ptr)629 as_ioctl_unclaim_block(struct as_allocated_blocks *allocated_blocks,
630 			  struct as_device_state *state,
631 			  void __user *ptr)
632 {
633 	long res;
634 	u64 offset;
635 
636 	if (copy_from_user(&offset, ptr, sizeof(offset)))
637 		return -EFAULT;
638 
639 	res = as_blocks_remove(allocated_blocks, offset);
640 	if (res)
641 		pr_err("%s: as_blocks_remove failed (%ld)\n", __func__, res);
642 
643 	return res;
644 }
645 
646 static long
as_ioctl_ping_impl(struct goldfish_address_space_ping * ping_info,struct as_device_state * state,u32 handle,void __user * ptr)647 as_ioctl_ping_impl(struct goldfish_address_space_ping *ping_info,
648 		   struct as_device_state *state,
649 		   u32 handle,
650 		   void __user *ptr)
651 {
652 	struct goldfish_address_space_ping user_copy;
653 
654 	if (copy_from_user(&user_copy, ptr, sizeof(user_copy)))
655 		return -EFAULT;
656 
657 	*ping_info = user_copy;
658 
659 	// Convert to phys addrs
660 	ping_info->offset += state->address_area_phys_address;
661 
662 	mutex_lock(&state->registers_lock);
663 	as_ping_impl(state, handle);
664 	mutex_unlock(&state->registers_lock);
665 
666 	memcpy(&user_copy, ping_info, sizeof(user_copy));
667 	if (copy_to_user(ptr, &user_copy, sizeof(user_copy)))
668 		return -EFAULT;
669 
670 	return 0;
671 }
672 
as_ioctl_ping(struct as_file_state * file_state,void __user * ptr)673 static long as_ioctl_ping(struct as_file_state *file_state, void __user *ptr)
674 {
675 	long ret;
676 
677 	mutex_lock(&file_state->ping_info_lock);
678 	ret = as_ioctl_ping_impl(file_state->ping_info,
679 				 file_state->device_state,
680 				 file_state->handle,
681 				 ptr);
682 	mutex_unlock(&file_state->ping_info_lock);
683 
684 	return ret;
685 }
686 
as_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)687 static long as_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
688 {
689 	struct as_file_state *file_state = filp->private_data;
690 	long res = -ENOTTY;
691 
692 	switch (cmd) {
693 	case GOLDFISH_ADDRESS_SPACE_IOCTL_ALLOCATE_BLOCK:
694 		res = as_ioctl_allocate_block(&file_state->allocated_blocks,
695 						   file_state->device_state,
696 						   (void __user *)arg);
697 		break;
698 
699 	case GOLDFISH_ADDRESS_SPACE_IOCTL_DEALLOCATE_BLOCK:
700 		res = as_ioctl_unallocate_block(&file_state->allocated_blocks,
701 						 file_state->device_state,
702 						 (void __user *)arg);
703 		break;
704 
705 	case GOLDFISH_ADDRESS_SPACE_IOCTL_PING:
706 		res = as_ioctl_ping(file_state, (void __user *)arg);
707 		break;
708 
709 	case GOLDFISH_ADDRESS_SPACE_IOCTL_CLAIM_SHARED:
710 		res = as_ioctl_claim_block(
711 			&file_state->shared_allocated_blocks,
712 			file_state->device_state,
713 			(void __user *)arg);
714 		break;
715 
716 	case GOLDFISH_ADDRESS_SPACE_IOCTL_UNCLAIM_SHARED:
717 		res = as_ioctl_unclaim_block(
718 			&file_state->shared_allocated_blocks,
719 			file_state->device_state,
720 			(void __user *)arg);
721 		break;
722 
723 	default:
724 		res = -ENOTTY;
725 	}
726 
727 	return res;
728 }
729 
730 static const struct file_operations userspace_file_operations = {
731 	.owner = THIS_MODULE,
732 	.open = as_open,
733 	.release = as_release,
734 	.mmap = as_mmap,
735 	.unlocked_ioctl = as_ioctl,
736 	.compat_ioctl = as_ioctl,
737 };
738 
ioremap_pci_bar(struct pci_dev * dev,int bar_id)739 static void __iomem __must_check *ioremap_pci_bar(struct pci_dev *dev,
740 						  int bar_id)
741 {
742 	void __iomem *io;
743 	unsigned long size = pci_resource_len(dev, bar_id);
744 
745 	if (!size)
746 		return IOMEM_ERR_PTR(-ENXIO);
747 
748 	io = ioremap(pci_resource_start(dev, bar_id), size);
749 	if (!io)
750 		return IOMEM_ERR_PTR(-ENOMEM);
751 
752 	return io;
753 }
754 
memremap_pci_bar(struct pci_dev * dev,int bar_id,unsigned long flags)755 static void __must_check *memremap_pci_bar(struct pci_dev *dev,
756 					   int bar_id,
757 					   unsigned long flags)
758 {
759 	void *mem;
760 	unsigned long size = pci_resource_len(dev, bar_id);
761 
762 	if (!size)
763 		return ERR_PTR(-ENXIO);
764 
765 	mem = memremap(pci_resource_start(dev, bar_id), size, flags);
766 	if (!mem)
767 		return ERR_PTR(-ENOMEM);
768 
769 	return mem;
770 }
771 
772 
fill_miscdevice(struct miscdevice * miscdev)773 static void fill_miscdevice(struct miscdevice *miscdev)
774 {
775 	memset(miscdev, 0, sizeof(*miscdev));
776 
777 	miscdev->minor = MISC_DYNAMIC_MINOR;
778 	miscdev->name = GOLDFISH_ADDRESS_SPACE_DEVICE_NAME;
779 	miscdev->fops = &userspace_file_operations;
780 }
781 
782 static int __must_check
create_as_device(struct pci_dev * dev,const struct pci_device_id * id)783 create_as_device(struct pci_dev *dev, const struct pci_device_id *id)
784 {
785 	int res;
786 	struct as_device_state *state;
787 
788 	state = kzalloc(sizeof(*state), GFP_KERNEL);
789 	if (!state)
790 		return -ENOMEM;
791 
792 	res = pci_request_region(dev,
793 				 AS_PCI_CONTROL_BAR_ID,
794 				 "Address space control");
795 	if (res) {
796 		pr_err("(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR%d",
797 		       dev->bus->number,
798 		       dev->devfn,
799 		       AS_PCI_CONTROL_BAR_ID);
800 		goto out_free_device_state;
801 	}
802 
803 	res = pci_request_region(dev,
804 				 AS_PCI_AREA_BAR_ID,
805 				 "Address space area");
806 	if (res) {
807 		pr_err("(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR%d",
808 		       dev->bus->number,
809 		       dev->devfn,
810 		       AS_PCI_AREA_BAR_ID);
811 		goto out_release_control_bar;
812 	}
813 
814 	fill_miscdevice(&state->miscdevice);
815 	res = misc_register(&state->miscdevice);
816 	if (res)
817 		goto out_release_area_bar;
818 
819 	state->io_registers = ioremap_pci_bar(dev,
820 					      AS_PCI_CONTROL_BAR_ID);
821 	if (IS_ERR(state->io_registers)) {
822 		res = PTR_ERR(state->io_registers);
823 		goto out_misc_deregister;
824 	}
825 
826 	state->address_area = memremap_pci_bar(dev,
827 					       AS_PCI_AREA_BAR_ID,
828 					       MEMREMAP_WB);
829 	if (IS_ERR(state->address_area)) {
830 		res = PTR_ERR(state->address_area);
831 		goto out_iounmap;
832 	}
833 
834 	state->address_area_phys_address =
835 		pci_resource_start(dev, AS_PCI_AREA_BAR_ID);
836 
837 	as_write_register(state->io_registers,
838 			  AS_REGISTER_GUEST_PAGE_SIZE,
839 			  PAGE_SIZE);
840 	as_write_register(state->io_registers,
841 			  AS_REGISTER_PHYS_START_LOW,
842 			  lower_32_bits(state->address_area_phys_address));
843 	as_write_register(state->io_registers,
844 			  AS_REGISTER_PHYS_START_HIGH,
845 			  upper_32_bits(state->address_area_phys_address));
846 
847 	state->dev = dev;
848 	mutex_init(&state->registers_lock);
849 
850 	pci_set_drvdata(dev, state);
851 	return 0;
852 
853 out_iounmap:
854 	iounmap(state->io_registers);
855 out_misc_deregister:
856 	misc_deregister(&state->miscdevice);
857 out_release_area_bar:
858 	pci_release_region(dev, AS_PCI_AREA_BAR_ID);
859 out_release_control_bar:
860 	pci_release_region(dev, AS_PCI_CONTROL_BAR_ID);
861 out_free_device_state:
862 	kzfree(state);
863 
864 	return res;
865 }
866 
as_pci_destroy_device(struct as_device_state * state)867 static void as_pci_destroy_device(struct as_device_state *state)
868 {
869 	memunmap(state->address_area);
870 	iounmap(state->io_registers);
871 	misc_deregister(&state->miscdevice);
872 	pci_release_region(state->dev, AS_PCI_AREA_BAR_ID);
873 	pci_release_region(state->dev, AS_PCI_CONTROL_BAR_ID);
874 	kfree(state);
875 }
876 
877 static int __must_check
as_pci_probe(struct pci_dev * dev,const struct pci_device_id * id)878 as_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
879 {
880 	int res;
881 	u8 hardware_revision;
882 
883 	res = pci_enable_device(dev);
884 	if (res)
885 		return res;
886 
887 	res = pci_read_config_byte(dev, PCI_REVISION_ID, &hardware_revision);
888 	if (res)
889 		goto out_disable_pci;
890 
891 	switch (hardware_revision) {
892 	case 1:
893 		res = create_as_device(dev, id);
894 		break;
895 
896 	default:
897 		res = -ENODEV;
898 		goto out_disable_pci;
899 	}
900 
901 	return 0;
902 
903 out_disable_pci:
904 	pci_disable_device(dev);
905 
906 	return res;
907 }
908 
as_pci_remove(struct pci_dev * dev)909 static void as_pci_remove(struct pci_dev *dev)
910 {
911 	struct as_device_state *state = pci_get_drvdata(dev);
912 
913 	as_pci_destroy_device(state);
914 	pci_disable_device(dev);
915 }
916 
917 static const struct pci_device_id as_pci_tbl[] = {
918 	{ PCI_DEVICE(AS_PCI_VENDOR_ID, AS_PCI_DEVICE_ID), },
919 	{ }
920 };
921 MODULE_DEVICE_TABLE(pci, as_pci_tbl);
922 
923 static struct pci_driver goldfish_address_space_driver = {
924 	.name		= GOLDFISH_ADDRESS_SPACE_DEVICE_NAME,
925 	.id_table	= as_pci_tbl,
926 	.probe		= as_pci_probe,
927 	.remove		= as_pci_remove,
928 };
929 
930 module_pci_driver(goldfish_address_space_driver);
931