• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/kernel.h>
6 #include <linux/wait.h>
7 #include <linux/fs.h>
8 #include <linux/io.h>
9 #include <linux/uaccess.h>
10 #include <linux/miscdevice.h>
11 
12 #include <linux/device.h>
13 #include <linux/pci_regs.h>
14 #include <linux/pci_ids.h>
15 #include <linux/pci.h>
16 
17 #include <uapi/linux/goldfish/goldfish_address_space.h>
18 
19 MODULE_DESCRIPTION("A Goldfish driver that allocates address space ranges in "
20 		   "the guest to populate them later in the host. This allows "
21 		   "sharing host's memory with the guest.");
22 MODULE_AUTHOR("Roman Kiryanov <rkir@google.com>");
23 MODULE_LICENSE("GPL v2");
24 
25 #define AS_DEBUG 0
26 
27 #if AS_DEBUG
28 	#define AS_DPRINT(fmt, ...) \
29 		printk(KERN_ERR "%s:%d " fmt "\n", \
30 		       __func__, __LINE__, ##__VA_ARGS__);
31 #else
32 	#define AS_DPRINT(fmt, ...)
33 #endif
34 
35 enum as_register_id {
36 	AS_REGISTER_COMMAND = 0,
37 	AS_REGISTER_STATUS = 4,
38 	AS_REGISTER_GUEST_PAGE_SIZE = 8,
39 	AS_REGISTER_BLOCK_SIZE_LOW = 12,
40 	AS_REGISTER_BLOCK_SIZE_HIGH = 16,
41 	AS_REGISTER_BLOCK_OFFSET_LOW = 20,
42 	AS_REGISTER_BLOCK_OFFSET_HIGH = 24,
43 	AS_REGISTER_PING = 28,
44 	AS_REGISTER_PING_INFO_ADDR_LOW = 32,
45 	AS_REGISTER_PING_INFO_ADDR_HIGH = 36,
46 	AS_REGISTER_HANDLE = 40,
47 };
48 
49 enum as_command_id {
50 	AS_COMMAND_ALLOCATE_BLOCK = 1,
51 	AS_COMMAND_DEALLOCATE_BLOCK = 2,
52 	AS_COMMAND_GEN_HANDLE = 3,
53 	AS_COMMAND_DESTROY_HANDLE = 4,
54 	AS_COMMAND_TELL_PING_INFO_ADDR = 5,
55 };
56 
57 #define AS_PCI_VENDOR_ID	0x607D
58 #define AS_PCI_DEVICE_ID	0xF153
59 #define AS_ALLOCATED_BLOCKS_INITIAL_CAPACITY 32
60 #define AS_INVALID_HANDLE (~(0))
61 
62 enum as_pci_bar_id {
63 	AS_PCI_CONTROL_BAR_ID = 0,
64 	AS_PCI_AREA_BAR_ID = 1,
65 };
66 
67 struct as_device_state {
68 	struct miscdevice	miscdevice;
69 	struct pci_dev		*dev;
70 	struct as_driver_state	*driver_state;
71 
72 	void __iomem		*io_registers;
73 
74 	void			*address_area;	/* to claim the address space */
75 
76 	/* physical address to allocate from */
77 	unsigned long		address_area_phys_address;
78 
79 	struct mutex		registers_lock;	/* protects registers */
80 };
81 
82 struct as_block {
83 	u64 offset;
84 	u64 size;
85 };
86 
87 struct as_allocated_blocks {
88 	struct as_block *blocks;  /* a dynamic array of allocated blocks */
89 	int blocks_size;
90 	int blocks_capacity;
91 	struct mutex blocks_lock; /* protects operations with blocks */
92 };
93 
94 struct as_file_state {
95 	struct as_device_state *device_state;
96 	struct as_allocated_blocks allocated_blocks;
97 	struct goldfish_address_space_ping *ping_info;
98 	struct mutex ping_info_lock;	/* protects ping_info */
99 	u32 handle; /* handle generated by the host */
100 };
101 
as_register_address(void __iomem * base,int offset)102 static void __iomem *as_register_address(void __iomem *base,
103 					 int offset)
104 {
105 	WARN_ON(!base);
106 
107 	return ((char __iomem *)base) + offset;
108 }
109 
as_write_register(void __iomem * registers,int offset,u32 value)110 static void as_write_register(void __iomem *registers,
111 			      int offset,
112 			      u32 value)
113 {
114 	writel(value, as_register_address(registers, offset));
115 }
116 
as_read_register(void __iomem * registers,int offset)117 static u32 as_read_register(void __iomem *registers, int offset)
118 {
119 	return readl(as_register_address(registers, offset));
120 }
121 
as_run_command(struct as_device_state * state,enum as_command_id cmd)122 static int as_run_command(struct as_device_state *state, enum as_command_id cmd)
123 {
124 	WARN_ON(!state);
125 
126 	as_write_register(state->io_registers, AS_REGISTER_COMMAND, cmd);
127 	return -as_read_register(state->io_registers, AS_REGISTER_STATUS);
128 }
129 
as_ping_impl(struct as_device_state * state,u32 handle)130 static void as_ping_impl(struct as_device_state *state, u32 handle)
131 {
132 	as_write_register(state->io_registers, AS_REGISTER_PING, handle);
133 }
134 
135 static long
as_ioctl_allocate_block_locked_impl(struct as_device_state * state,u64 * size,u64 * offset)136 as_ioctl_allocate_block_locked_impl(struct as_device_state *state,
137 				    u64 *size, u64 *offset)
138 {
139 	long res;
140 
141 	as_write_register(state->io_registers,
142 			  AS_REGISTER_BLOCK_SIZE_LOW,
143 			  lower_32_bits(*size));
144 	as_write_register(state->io_registers,
145 			  AS_REGISTER_BLOCK_SIZE_HIGH,
146 			  upper_32_bits(*size));
147 
148 	res = as_run_command(state, AS_COMMAND_ALLOCATE_BLOCK);
149 	if (!res) {
150 		u64 low = as_read_register(state->io_registers,
151 					   AS_REGISTER_BLOCK_OFFSET_LOW);
152 		u64 high = as_read_register(state->io_registers,
153 					    AS_REGISTER_BLOCK_OFFSET_HIGH);
154 		*offset = low | (high << 32);
155 
156 		low = as_read_register(state->io_registers,
157 				       AS_REGISTER_BLOCK_SIZE_LOW);
158 		high = as_read_register(state->io_registers,
159 					AS_REGISTER_BLOCK_SIZE_HIGH);
160 		*size = low | (high << 32);
161 	}
162 
163 	return res;
164 }
165 
166 static long
as_ioctl_unallocate_block_locked_impl(struct as_device_state * state,u64 offset)167 as_ioctl_unallocate_block_locked_impl(struct as_device_state *state, u64 offset)
168 {
169 	as_write_register(state->io_registers,
170 			  AS_REGISTER_BLOCK_OFFSET_LOW,
171 			  lower_32_bits(offset));
172 	as_write_register(state->io_registers,
173 			  AS_REGISTER_BLOCK_OFFSET_HIGH,
174 			  upper_32_bits(offset));
175 
176 	return as_run_command(state, AS_COMMAND_DEALLOCATE_BLOCK);
177 }
178 
as_blocks_grow_capacity(int old_capacity)179 static int as_blocks_grow_capacity(int old_capacity)
180 {
181 	WARN_ON(old_capacity < 0);
182 
183 	return old_capacity + old_capacity;
184 }
185 
186 static int
as_blocks_insert(struct as_allocated_blocks * allocated_blocks,u64 offset,u64 size)187 as_blocks_insert(struct as_allocated_blocks *allocated_blocks,
188 		 u64 offset,
189 		 u64 size)
190 {
191 	int blocks_size;
192 
193 	if (mutex_lock_interruptible(&allocated_blocks->blocks_lock))
194 		return -ERESTARTSYS;
195 
196 	blocks_size = allocated_blocks->blocks_size;
197 
198 	WARN_ON(allocated_blocks->blocks_capacity < 1);
199 	WARN_ON(allocated_blocks->blocks_capacity <
200 		allocated_blocks->blocks_size);
201 	WARN_ON(!allocated_blocks->blocks);
202 
203 	if (allocated_blocks->blocks_capacity == blocks_size) {
204 		int new_capacity =
205 			as_blocks_grow_capacity(
206 				allocated_blocks->blocks_capacity);
207 		struct as_block *new_blocks =
208 			kcalloc(new_capacity,
209 				sizeof(allocated_blocks->blocks[0]),
210 				GFP_KERNEL);
211 
212 		if (!new_blocks) {
213 			mutex_unlock(&allocated_blocks->blocks_lock);
214 			return -ENOMEM;
215 		}
216 
217 		memcpy(new_blocks, allocated_blocks->blocks,
218 		       blocks_size * sizeof(allocated_blocks->blocks[0]));
219 
220 		kfree(allocated_blocks->blocks);
221 		allocated_blocks->blocks = new_blocks;
222 		allocated_blocks->blocks_capacity = new_capacity;
223 	}
224 
225 	WARN_ON(blocks_size >= allocated_blocks->blocks_capacity);
226 
227 	allocated_blocks->blocks[blocks_size] =
228 		(struct as_block){ .offset = offset, .size = size };
229 	allocated_blocks->blocks_size = blocks_size + 1;
230 
231 	mutex_unlock(&allocated_blocks->blocks_lock);
232 	return 0;
233 }
234 
235 static int
as_blocks_remove(struct as_allocated_blocks * allocated_blocks,u64 offset)236 as_blocks_remove(struct as_allocated_blocks *allocated_blocks, u64 offset)
237 {
238 	long res = -ENXIO;
239 	struct as_block *blocks;
240 	int blocks_size;
241 	int i;
242 
243 	if (mutex_lock_interruptible(&allocated_blocks->blocks_lock))
244 		return -ERESTARTSYS;
245 
246 	blocks = allocated_blocks->blocks;
247 	WARN_ON(!blocks);
248 
249 	blocks_size = allocated_blocks->blocks_size;
250 	WARN_ON(blocks_size < 0);
251 
252 	for (i = 0; i < blocks_size; ++i) {
253 		if (offset == blocks[i].offset) {
254 			int last = blocks_size - 1;
255 
256 			if (last > i)
257 				blocks[i] = blocks[last];
258 
259 			--allocated_blocks->blocks_size;
260 			res = 0;
261 			break;
262 		}
263 	}
264 
265 	mutex_unlock(&allocated_blocks->blocks_lock);
266 	return res;
267 }
268 
269 static int
as_blocks_check_if_mine(struct as_allocated_blocks * allocated_blocks,u64 offset,u64 size)270 as_blocks_check_if_mine(struct as_allocated_blocks *allocated_blocks,
271 			u64 offset,
272 			u64 size)
273 {
274 	const u64 end = offset + size;
275 	int res = -EPERM;
276 	struct as_block *block;
277 	int blocks_size;
278 
279 	if (mutex_lock_interruptible(&allocated_blocks->blocks_lock))
280 		return -ERESTARTSYS;
281 
282 	block = allocated_blocks->blocks;
283 	WARN_ON(!block);
284 
285 	blocks_size = allocated_blocks->blocks_size;
286 	WARN_ON(blocks_size < 0);
287 
288 	for (; blocks_size > 0; --blocks_size, ++block) {
289 		u64 block_offset = block->offset;
290 		u64 block_end = block_offset + block->size;
291 
292 		if (offset >= block_offset && end <= block_end) {
293 			res = 0;
294 			break;
295 		}
296 	}
297 
298 	mutex_unlock(&allocated_blocks->blocks_lock);
299 	return res;
300 }
301 
as_open(struct inode * inode,struct file * filp)302 static int as_open(struct inode *inode, struct file *filp)
303 {
304 	struct as_file_state *file_state;
305 	struct as_device_state *device_state;
306 	struct goldfish_address_space_ping *ping_info;
307 	u64 ping_info_phys;
308 	u64 ping_info_phys_returned;
309 	int err;
310 
311 	AS_DPRINT("Get free page");
312 	ping_info =
313 		(struct goldfish_address_space_ping *)
314 		__get_free_page(GFP_KERNEL);
315 	ping_info_phys = virt_to_phys(ping_info);
316 	AS_DPRINT("Got free page: %p 0x%llx", ping_info,
317 		  (unsigned long long)ping_info_phys);
318 
319 	if (!ping_info) {
320 		printk(KERN_ERR "Could not alloc goldfish_address_space command buffer!\n");
321 		err = -ENOMEM;
322 		goto err_ping_info_alloc_failed;
323 	}
324 
325 	file_state = kzalloc(sizeof(*file_state), GFP_KERNEL);
326 	if (!file_state) {
327 		err = -ENOMEM;
328 		goto err_file_state_alloc_failed;
329 	}
330 
331 	file_state->device_state =
332 		container_of(filp->private_data,
333 			     struct as_device_state,
334 			     miscdevice);
335 	device_state = file_state->device_state;
336 
337 	file_state->allocated_blocks.blocks =
338 		kcalloc(AS_ALLOCATED_BLOCKS_INITIAL_CAPACITY,
339 				sizeof(file_state->allocated_blocks.blocks[0]),
340 				GFP_KERNEL);
341 
342 	if (!file_state->allocated_blocks.blocks) {
343 		err = -ENOMEM;
344 		goto err_file_state_blocks_alloc_failed;
345 	}
346 
347 	file_state->allocated_blocks.blocks_size = 0;
348 	file_state->allocated_blocks.blocks_capacity =
349 		AS_ALLOCATED_BLOCKS_INITIAL_CAPACITY;
350 	mutex_init(&file_state->allocated_blocks.blocks_lock);
351 
352 	mutex_init(&file_state->ping_info_lock);
353 	file_state->ping_info = ping_info;
354 
355 	AS_DPRINT("Acq regs lock");
356 	mutex_lock(&device_state->registers_lock);
357 	AS_DPRINT("Got regs lock, gen handle");
358 	as_run_command(device_state, AS_COMMAND_GEN_HANDLE);
359 	file_state->handle = as_read_register(
360 		device_state->io_registers,
361 		AS_REGISTER_HANDLE);
362 	AS_DPRINT("Got regs lock, read handle: %u", file_state->handle);
363 	mutex_unlock(&device_state->registers_lock);
364 
365 	if (file_state->handle == AS_INVALID_HANDLE) {
366 		err = -EINVAL;
367 		goto err_gen_handle_failed;
368 	}
369 
370 	AS_DPRINT("Acq regs lock 2");
371 	mutex_lock(&device_state->registers_lock);
372 	AS_DPRINT("Acqd regs lock 2, write handle and ping info addr");
373 	as_write_register(
374 		device_state->io_registers,
375 		AS_REGISTER_HANDLE,
376 		file_state->handle);
377 	as_write_register(
378 		device_state->io_registers,
379 		AS_REGISTER_PING_INFO_ADDR_LOW,
380 		lower_32_bits(ping_info_phys));
381 	as_write_register(
382 		device_state->io_registers,
383 		AS_REGISTER_PING_INFO_ADDR_HIGH,
384 		upper_32_bits(ping_info_phys));
385 	AS_DPRINT("Do tell ping info addr");
386 	as_run_command(device_state, AS_COMMAND_TELL_PING_INFO_ADDR);
387 	ping_info_phys_returned =
388 		((u64)as_read_register(device_state->io_registers,
389 				       AS_REGISTER_PING_INFO_ADDR_LOW)) |
390 		((u64)as_read_register(device_state->io_registers,
391 				       AS_REGISTER_PING_INFO_ADDR_HIGH) << 32);
392 	AS_DPRINT("Read back");
393 
394 	if (ping_info_phys != ping_info_phys_returned) {
395 		printk(KERN_ERR "%s: Invalid result for ping info phys addr: expected 0x%llx, got 0x%llx\n",
396 		       __func__,
397 		       ping_info_phys, ping_info_phys_returned);
398 		err = -EINVAL;
399 		goto err_ping_info_failed;
400 	}
401 
402 	mutex_unlock(&device_state->registers_lock);
403 
404 	filp->private_data = file_state;
405 	return 0;
406 
407 err_ping_info_failed:
408 err_gen_handle_failed:
409 	kfree(file_state->allocated_blocks.blocks);
410 err_file_state_blocks_alloc_failed:
411 	kfree(file_state);
412 err_file_state_alloc_failed:
413 	free_page((unsigned long)ping_info);
414 err_ping_info_alloc_failed:
415 	return err;
416 }
417 
as_release(struct inode * inode,struct file * filp)418 static int as_release(struct inode *inode, struct file *filp)
419 {
420 	struct as_file_state *file_state = filp->private_data;
421 	struct as_allocated_blocks *allocated_blocks =
422 		&file_state->allocated_blocks;
423 	struct goldfish_address_space_ping *ping_info = file_state->ping_info;
424 	struct as_device_state *state = file_state->device_state;
425 	int blocks_size;
426 	int i;
427 
428 	WARN_ON(!state);
429 	WARN_ON(!allocated_blocks);
430 	WARN_ON(!allocated_blocks->blocks);
431 	WARN_ON(allocated_blocks->blocks_size < 0);
432 	WARN_ON(!ping_info);
433 
434 	blocks_size = allocated_blocks->blocks_size;
435 
436 	mutex_lock(&state->registers_lock);
437 
438 	as_write_register(state->io_registers, AS_REGISTER_HANDLE,
439 			  file_state->handle);
440 	as_run_command(state, AS_COMMAND_DESTROY_HANDLE);
441 
442 	for (i = 0; i < blocks_size; ++i) {
443 		WARN_ON(as_ioctl_unallocate_block_locked_impl(
444 				state, allocated_blocks->blocks[i].offset));
445 	}
446 
447 	mutex_unlock(&state->registers_lock);
448 
449 	kfree(allocated_blocks->blocks);
450 	free_page((unsigned long)ping_info);
451 	kfree(file_state);
452 	return 0;
453 }
454 
as_mmap_impl(struct as_device_state * state,size_t size,struct vm_area_struct * vma)455 static int as_mmap_impl(struct as_device_state *state,
456 			size_t size,
457 			struct vm_area_struct *vma)
458 {
459 	unsigned long pfn = (state->address_area_phys_address >> PAGE_SHIFT) +
460 		vma->vm_pgoff;
461 
462 	return remap_pfn_range(vma,
463 			       vma->vm_start,
464 			       pfn,
465 			       size,
466 			       vma->vm_page_prot);
467 }
468 
as_mmap(struct file * filp,struct vm_area_struct * vma)469 static int as_mmap(struct file *filp, struct vm_area_struct *vma)
470 {
471 	struct as_file_state *file_state = filp->private_data;
472 	struct as_allocated_blocks *allocated_blocks =
473 		&file_state->allocated_blocks;
474 	size_t size = PAGE_ALIGN(vma->vm_end - vma->vm_start);
475 	int res;
476 
477 	WARN_ON(!allocated_blocks);
478 
479 	res = as_blocks_check_if_mine(allocated_blocks,
480 				      vma->vm_pgoff << PAGE_SHIFT,
481 				      size);
482 
483 	if (res)
484 		return res;
485 	else
486 		return as_mmap_impl(file_state->device_state, size, vma);
487 }
488 
as_ioctl_allocate_block_impl(struct as_device_state * state,struct goldfish_address_space_allocate_block * request)489 static long as_ioctl_allocate_block_impl(
490 	struct as_device_state *state,
491 	struct goldfish_address_space_allocate_block *request)
492 {
493 	long res;
494 
495 	if (mutex_lock_interruptible(&state->registers_lock))
496 		return -ERESTARTSYS;
497 
498 	res = as_ioctl_allocate_block_locked_impl(state,
499 						  &request->size,
500 						  &request->offset);
501 	if (!res) {
502 		request->phys_addr =
503 			state->address_area_phys_address + request->offset;
504 	}
505 
506 	mutex_unlock(&state->registers_lock);
507 	return res;
508 }
509 
510 static void
as_ioctl_unallocate_block_impl(struct as_device_state * state,u64 offset)511 as_ioctl_unallocate_block_impl(struct as_device_state *state, u64 offset)
512 {
513 	mutex_lock(&state->registers_lock);
514 	WARN_ON(as_ioctl_unallocate_block_locked_impl(state, offset));
515 	mutex_unlock(&state->registers_lock);
516 }
517 
518 static long
as_ioctl_allocate_block(struct as_allocated_blocks * allocated_blocks,struct as_device_state * state,void __user * ptr)519 as_ioctl_allocate_block(struct as_allocated_blocks *allocated_blocks,
520 			struct as_device_state *state,
521 			void __user *ptr)
522 {
523 	long res;
524 	struct goldfish_address_space_allocate_block request;
525 
526 	if (copy_from_user(&request, ptr, sizeof(request)))
527 		return -EFAULT;
528 
529 	res = as_ioctl_allocate_block_impl(state, &request);
530 	if (!res) {
531 		res = as_blocks_insert(allocated_blocks,
532 				       request.offset,
533 				       request.size);
534 
535 		if (res) {
536 			as_ioctl_unallocate_block_impl(state, request.offset);
537 		} else if (copy_to_user(ptr, &request, sizeof(request))) {
538 			as_ioctl_unallocate_block_impl(state, request.offset);
539 			res = -EFAULT;
540 		}
541 	}
542 
543 	return res;
544 }
545 
546 static long
as_ioctl_unallocate_block(struct as_allocated_blocks * allocated_blocks,struct as_device_state * state,void __user * ptr)547 as_ioctl_unallocate_block(struct as_allocated_blocks *allocated_blocks,
548 			  struct as_device_state *state,
549 			  void __user *ptr)
550 {
551 	long res;
552 	u64 offset;
553 
554 	if (copy_from_user(&offset, ptr, sizeof(offset)))
555 		return -EFAULT;
556 
557 	res = as_blocks_remove(allocated_blocks, offset);
558 	if (!res)
559 		as_ioctl_unallocate_block_impl(state, offset);
560 
561 	return res;
562 }
563 
564 static long
as_ioctl_ping_impl(struct goldfish_address_space_ping * ping_info,struct as_device_state * state,u32 handle,void __user * ptr)565 as_ioctl_ping_impl(struct goldfish_address_space_ping *ping_info,
566 		   struct as_device_state *state,
567 		   u32 handle,
568 		   void __user *ptr)
569 {
570 	struct goldfish_address_space_ping user_copy;
571 
572 	if (copy_from_user(&user_copy, ptr, sizeof(user_copy)))
573 		return -EFAULT;
574 
575 	*ping_info = user_copy;
576 
577 	// Convert to phys addrs
578 	ping_info->offset += state->address_area_phys_address;
579 	ping_info->wait_offset += state->address_area_phys_address;
580 
581 	mutex_lock(&state->registers_lock);
582 	as_ping_impl(state, handle);
583 	mutex_unlock(&state->registers_lock);
584 
585 	user_copy.metadata = ping_info->metadata;
586 	if (copy_to_user(ptr, &user_copy, sizeof(user_copy)))
587 		return -EFAULT;
588 
589 	return 0;
590 }
591 
as_ioctl_ping(struct as_file_state * file_state,void __user * ptr)592 static long as_ioctl_ping(struct as_file_state *file_state, void __user *ptr)
593 {
594 	long ret;
595 
596 	mutex_lock(&file_state->ping_info_lock);
597 	ret = as_ioctl_ping_impl(file_state->ping_info,
598 				 file_state->device_state,
599 				 file_state->handle,
600 				 ptr);
601 	mutex_unlock(&file_state->ping_info_lock);
602 
603 	return ret;
604 }
605 
as_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)606 static long as_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
607 {
608 	struct as_file_state *file_state = filp->private_data;
609 
610 	switch (cmd) {
611 	case GOLDFISH_ADDRESS_SPACE_IOCTL_ALLOCATE_BLOCK:
612 		return as_ioctl_allocate_block(&file_state->allocated_blocks,
613 					       file_state->device_state,
614 					       (void __user *)arg);
615 
616 	case GOLDFISH_ADDRESS_SPACE_IOCTL_DEALLOCATE_BLOCK:
617 		return as_ioctl_unallocate_block(&file_state->allocated_blocks,
618 						 file_state->device_state,
619 						 (void __user *)arg);
620 
621 	case GOLDFISH_ADDRESS_SPACE_IOCTL_PING:
622 		return as_ioctl_ping(file_state, (void __user *)arg);
623 
624 	default:
625 		return -ENOTTY;
626 	}
627 }
628 
629 static const struct file_operations userspace_file_operations = {
630 	.owner = THIS_MODULE,
631 	.open = as_open,
632 	.release = as_release,
633 	.mmap = as_mmap,
634 	.unlocked_ioctl = as_ioctl,
635 	.compat_ioctl = as_ioctl,
636 };
637 
ioremap_pci_bar(struct pci_dev * dev,int bar_id)638 static void __iomem __must_check *ioremap_pci_bar(struct pci_dev *dev,
639 						  int bar_id)
640 {
641 	void __iomem *io;
642 	unsigned long size = pci_resource_len(dev, bar_id);
643 
644 	if (!size)
645 		return IOMEM_ERR_PTR(-ENXIO);
646 
647 	io = ioremap(pci_resource_start(dev, bar_id), size);
648 	if (!io)
649 		return IOMEM_ERR_PTR(-ENOMEM);
650 
651 	return io;
652 }
653 
memremap_pci_bar(struct pci_dev * dev,int bar_id,unsigned long flags)654 static void __must_check *memremap_pci_bar(struct pci_dev *dev,
655 					   int bar_id,
656 					   unsigned long flags)
657 {
658 	void *mem;
659 	unsigned long size = pci_resource_len(dev, bar_id);
660 
661 	if (!size)
662 		return ERR_PTR(-ENXIO);
663 
664 	mem = memremap(pci_resource_start(dev, bar_id), size, flags);
665 	if (!mem)
666 		return ERR_PTR(-ENOMEM);
667 
668 	return mem;
669 }
670 
671 
fill_miscdevice(struct miscdevice * miscdev)672 static void fill_miscdevice(struct miscdevice *miscdev)
673 {
674 	memset(miscdev, 0, sizeof(*miscdev));
675 
676 	miscdev->minor = MISC_DYNAMIC_MINOR;
677 	miscdev->name = GOLDFISH_ADDRESS_SPACE_DEVICE_NAME;
678 	miscdev->fops = &userspace_file_operations;
679 }
680 
681 static int __must_check
create_as_device(struct pci_dev * dev,const struct pci_device_id * id)682 create_as_device(struct pci_dev *dev, const struct pci_device_id *id)
683 {
684 	int res;
685 	struct as_device_state *state;
686 
687 	state = kzalloc(sizeof(*state), GFP_KERNEL);
688 	if (!state)
689 		return -ENOMEM;
690 
691 	res = pci_request_region(dev,
692 				 AS_PCI_CONTROL_BAR_ID,
693 				 "Address space control");
694 	if (res) {
695 		pr_err("(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR%d",
696 		       dev->bus->number,
697 		       dev->devfn,
698 		       AS_PCI_CONTROL_BAR_ID);
699 		goto out_free_device_state;
700 	}
701 
702 	res = pci_request_region(dev,
703 				 AS_PCI_AREA_BAR_ID,
704 				 "Address space area");
705 	if (res) {
706 		pr_err("(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR%d",
707 		       dev->bus->number,
708 		       dev->devfn,
709 		       AS_PCI_AREA_BAR_ID);
710 		goto out_release_control_bar;
711 	}
712 
713 	fill_miscdevice(&state->miscdevice);
714 	res = misc_register(&state->miscdevice);
715 	if (res)
716 		goto out_release_area_bar;
717 
718 	state->io_registers = ioremap_pci_bar(dev,
719 					      AS_PCI_CONTROL_BAR_ID);
720 	if (IS_ERR(state->io_registers)) {
721 		res = PTR_ERR(state->io_registers);
722 		goto out_misc_deregister;
723 	}
724 
725 	state->address_area = memremap_pci_bar(dev,
726 					       AS_PCI_AREA_BAR_ID,
727 					       MEMREMAP_WB);
728 	if (IS_ERR(state->address_area)) {
729 		res = PTR_ERR(state->address_area);
730 		goto out_iounmap;
731 	}
732 
733 	state->address_area_phys_address =
734 		pci_resource_start(dev, AS_PCI_AREA_BAR_ID);
735 
736 	as_write_register(state->io_registers,
737 			  AS_REGISTER_GUEST_PAGE_SIZE,
738 			  PAGE_SIZE);
739 
740 	state->dev = dev;
741 	mutex_init(&state->registers_lock);
742 
743 	pci_set_drvdata(dev, state);
744 	return 0;
745 
746 out_iounmap:
747 	iounmap(state->io_registers);
748 out_misc_deregister:
749 	misc_deregister(&state->miscdevice);
750 out_release_area_bar:
751 	pci_release_region(dev, AS_PCI_AREA_BAR_ID);
752 out_release_control_bar:
753 	pci_release_region(dev, AS_PCI_CONTROL_BAR_ID);
754 out_free_device_state:
755 	kzfree(state);
756 
757 	return res;
758 }
759 
as_pci_destroy_device(struct as_device_state * state)760 static void as_pci_destroy_device(struct as_device_state *state)
761 {
762 	memunmap(state->address_area);
763 	iounmap(state->io_registers);
764 	misc_deregister(&state->miscdevice);
765 	pci_release_region(state->dev, AS_PCI_AREA_BAR_ID);
766 	pci_release_region(state->dev, AS_PCI_CONTROL_BAR_ID);
767 	kfree(state);
768 }
769 
770 static int __must_check
as_pci_probe(struct pci_dev * dev,const struct pci_device_id * id)771 as_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
772 {
773 	int res;
774 	u8 hardware_revision;
775 
776 	res = pci_enable_device(dev);
777 	if (res)
778 		return res;
779 
780 	res = pci_read_config_byte(dev, PCI_REVISION_ID, &hardware_revision);
781 	if (res)
782 		goto out_disable_pci;
783 
784 	switch (hardware_revision) {
785 	case 1:
786 		res = create_as_device(dev, id);
787 		break;
788 
789 	default:
790 		res = -ENODEV;
791 		goto out_disable_pci;
792 	}
793 
794 	return 0;
795 
796 out_disable_pci:
797 	pci_disable_device(dev);
798 
799 	return res;
800 }
801 
as_pci_remove(struct pci_dev * dev)802 static void as_pci_remove(struct pci_dev *dev)
803 {
804 	struct as_device_state *state = pci_get_drvdata(dev);
805 
806 	as_pci_destroy_device(state);
807 	pci_disable_device(dev);
808 }
809 
810 static const struct pci_device_id as_pci_tbl[] = {
811 	{ PCI_DEVICE(AS_PCI_VENDOR_ID, AS_PCI_DEVICE_ID), },
812 	{ }
813 };
814 MODULE_DEVICE_TABLE(pci, as_pci_tbl);
815 
816 static struct pci_driver goldfish_address_space_driver = {
817 	.name		= GOLDFISH_ADDRESS_SPACE_DEVICE_NAME,
818 	.id_table	= as_pci_tbl,
819 	.probe		= as_pci_probe,
820 	.remove		= as_pci_remove,
821 };
822 
823 module_pci_driver(goldfish_address_space_driver);
824