/drivers/staging/android/ion/ |
D | ion_page_pool.c | 27 struct page *page; member 33 struct page *page = alloc_pages(pool->gfp_mask, pool->order); in ion_page_pool_alloc_pages() local 35 if (!page) in ion_page_pool_alloc_pages() 37 ion_page_pool_alloc_set_cache_policy(pool, page); in ion_page_pool_alloc_pages() 39 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order, in ion_page_pool_alloc_pages() 41 return page; in ion_page_pool_alloc_pages() 45 struct page *page) in ion_page_pool_free_pages() argument 47 ion_page_pool_free_set_cache_policy(pool, page); in ion_page_pool_free_pages() 48 __free_pages(page, pool->order); in ion_page_pool_free_pages() 51 static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) in ion_page_pool_add() argument [all …]
|
D | ion_system_heap.c | 56 struct page *page; member 61 static struct page *alloc_buffer_page(struct ion_system_heap *heap, in alloc_buffer_page() 67 struct page *page; in alloc_buffer_page() local 70 page = ion_page_pool_alloc(pool); in alloc_buffer_page() 76 page = alloc_pages(gfp_flags, order); in alloc_buffer_page() 77 if (!page) in alloc_buffer_page() 79 ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order, in alloc_buffer_page() 82 if (!page) in alloc_buffer_page() 85 return page; in alloc_buffer_page() 89 struct ion_buffer *buffer, struct page *page, in free_buffer_page() argument [all …]
|
D | ion_priv.h | 84 struct page **pages; 238 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot); 385 void ion_page_pool_free(struct ion_page_pool *, struct page *); 386 void ion_page_pool_free_immediate(struct ion_page_pool *, struct page *); 391 struct page *page){ in ion_page_pool_alloc_set_cache_policy() argument 392 void *va = page_address(page); in ion_page_pool_alloc_set_cache_policy() 400 struct page *page){ in ion_page_pool_free_set_cache_policy() argument 401 void *va = page_address(page); in ion_page_pool_free_set_cache_policy() 410 struct page *page){ } in ion_page_pool_alloc_set_cache_policy() argument 414 struct page *page){ } in ion_page_pool_free_set_cache_policy() argument [all …]
|
/drivers/xen/ |
D | balloon.c | 112 static void scrub_page(struct page *page) in scrub_page() argument 115 clear_highpage(page); in scrub_page() 120 static void __balloon_append(struct page *page) in __balloon_append() argument 123 if (PageHighMem(page)) { in __balloon_append() 124 list_add_tail(&page->lru, &ballooned_pages); in __balloon_append() 127 list_add(&page->lru, &ballooned_pages); in __balloon_append() 132 static void balloon_append(struct page *page) in balloon_append() argument 134 __balloon_append(page); in balloon_append() 135 if (PageHighMem(page)) in balloon_append() 141 static struct page *balloon_retrieve(bool prefer_highmem) in balloon_retrieve() [all …]
|
/drivers/hwmon/pmbus/ |
D | ltc2978.c | 102 static int ltc2978_read_word_data_common(struct i2c_client *client, int page, in ltc2978_read_word_data_common() argument 111 ret = pmbus_read_word_data(client, page, LTC2978_MFR_VIN_PEAK); in ltc2978_read_word_data_common() 119 ret = pmbus_read_word_data(client, page, LTC2978_MFR_VOUT_PEAK); in ltc2978_read_word_data_common() 125 if (ret > data->vout_max[page]) in ltc2978_read_word_data_common() 126 data->vout_max[page] = ret; in ltc2978_read_word_data_common() 127 ret = data->vout_max[page]; in ltc2978_read_word_data_common() 131 ret = pmbus_read_word_data(client, page, in ltc2978_read_word_data_common() 135 > lin11_to_val(data->temp_max[page])) in ltc2978_read_word_data_common() 136 data->temp_max[page] = ret; in ltc2978_read_word_data_common() 137 ret = data->temp_max[page]; in ltc2978_read_word_data_common() [all …]
|
D | pmbus.c | 36 int page; in pmbus_find_sensor_groups() local 75 for (page = 0; page < info->pages; page++) { in pmbus_find_sensor_groups() 76 if (pmbus_check_word_register(client, page, PMBUS_READ_VOUT)) { in pmbus_find_sensor_groups() 77 info->func[page] |= PMBUS_HAVE_VOUT; in pmbus_find_sensor_groups() 78 if (pmbus_check_byte_register(client, page, in pmbus_find_sensor_groups() 80 info->func[page] |= PMBUS_HAVE_STATUS_VOUT; in pmbus_find_sensor_groups() 82 if (pmbus_check_word_register(client, page, PMBUS_READ_IOUT)) { in pmbus_find_sensor_groups() 83 info->func[page] |= PMBUS_HAVE_IOUT; in pmbus_find_sensor_groups() 86 info->func[page] |= PMBUS_HAVE_STATUS_IOUT; in pmbus_find_sensor_groups() 88 if (pmbus_check_word_register(client, page, PMBUS_READ_POUT)) in pmbus_find_sensor_groups() [all …]
|
D | pmbus_core.c | 60 u8 page; /* page number */ member 125 int pmbus_set_page(struct i2c_client *client, u8 page) in pmbus_set_page() argument 131 if (page != data->currpage) { in pmbus_set_page() 132 rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page); in pmbus_set_page() 134 if (newpage != page) in pmbus_set_page() 137 data->currpage = page; in pmbus_set_page() 143 int pmbus_write_byte(struct i2c_client *client, int page, u8 value) in pmbus_write_byte() argument 147 if (page >= 0) { in pmbus_write_byte() 148 rv = pmbus_set_page(client, page); in pmbus_write_byte() 161 static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value) in _pmbus_write_byte() argument [all …]
|
/drivers/block/ |
D | brd.c | 55 static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) in brd_lookup_page() 58 struct page *page; in brd_lookup_page() local 73 page = radix_tree_lookup(&brd->brd_pages, idx); in brd_lookup_page() 76 BUG_ON(page && page->index != idx); in brd_lookup_page() 78 return page; in brd_lookup_page() 86 static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) in brd_insert_page() 89 struct page *page; in brd_insert_page() local 92 page = brd_lookup_page(brd, sector); in brd_insert_page() 93 if (page) in brd_insert_page() 94 return page; in brd_insert_page() [all …]
|
/drivers/video/ |
D | fb_defio.c | 26 static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs) in fb_deferred_io_page() 29 struct page *page; in fb_deferred_io_page() local 32 page = vmalloc_to_page(screen_base + offs); in fb_deferred_io_page() 34 page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT); in fb_deferred_io_page() 36 return page; in fb_deferred_io_page() 44 struct page *page; in fb_deferred_io_fault() local 51 page = fb_deferred_io_page(info, offset); in fb_deferred_io_fault() 52 if (!page) in fb_deferred_io_fault() 55 get_page(page); in fb_deferred_io_fault() 58 page->mapping = vma->vm_file->f_mapping; in fb_deferred_io_fault() [all …]
|
/drivers/target/iscsi/ |
D | iscsi_target_stat.c | 70 struct iscsi_wwn_stat_grps *igrps, char *page) in iscsi_stat_instance_show_attr_inst() argument 75 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_index); in iscsi_stat_instance_show_attr_inst() 80 struct iscsi_wwn_stat_grps *igrps, char *page) in iscsi_stat_instance_show_attr_min_ver() argument 82 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION); in iscsi_stat_instance_show_attr_min_ver() 87 struct iscsi_wwn_stat_grps *igrps, char *page) in iscsi_stat_instance_show_attr_max_ver() argument 89 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_DRAFT20_VERSION); in iscsi_stat_instance_show_attr_max_ver() 94 struct iscsi_wwn_stat_grps *igrps, char *page) in iscsi_stat_instance_show_attr_portals() argument 99 return snprintf(page, PAGE_SIZE, "%u\n", tiqn->tiqn_num_tpg_nps); in iscsi_stat_instance_show_attr_portals() 104 struct iscsi_wwn_stat_grps *igrps, char *page) in iscsi_stat_instance_show_attr_nodes() argument 106 return snprintf(page, PAGE_SIZE, "%u\n", ISCSI_INST_NUM_NODES); in iscsi_stat_instance_show_attr_nodes() [all …]
|
D | iscsi_target_configfs.c | 55 char *page) in lio_target_np_show_sctp() argument 64 rb = sprintf(page, "1\n"); in lio_target_np_show_sctp() 66 rb = sprintf(page, "0\n"); in lio_target_np_show_sctp() 73 const char *page, in lio_target_np_store_sctp() argument 85 op = simple_strtoul(page, &endptr, 0); in lio_target_np_store_sctp() 130 char *page) in lio_target_np_show_iser() argument 139 rb = sprintf(page, "1\n"); in lio_target_np_show_iser() 141 rb = sprintf(page, "0\n"); in lio_target_np_show_iser() 148 const char *page, in lio_target_np_store_iser() argument 160 op = simple_strtoul(page, &endptr, 0); in lio_target_np_store_iser() [all …]
|
/drivers/infiniband/hw/mthca/ |
D | mthca_memfree.c | 61 } page[0]; member 109 struct page *page; in mthca_alloc_icm_pages() local 115 page = alloc_pages(gfp_mask | __GFP_ZERO, order); in mthca_alloc_icm_pages() 116 if (!page) in mthca_alloc_icm_pages() 119 sg_set_page(mem, page, PAGE_SIZE << order, 0); in mthca_alloc_icm_pages() 283 struct page *page = NULL; in mthca_table_find() local 309 page = sg_page(&chunk->mem[i]); in mthca_table_find() 318 return page ? lowmem_page_address(page) + offset : NULL; in mthca_table_find() 439 static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page) in mthca_uarc_virt() argument 443 page * MTHCA_ICM_PAGE_SIZE; in mthca_uarc_virt() [all …]
|
/drivers/infiniband/hw/ehca/ |
D | ipz_pt_fn.c | 84 u64 page = __pa(queue->queue_pages[i]); in ipz_queue_abs_to_offset() local 85 if (addr >= page && addr < page + queue->pagesize) { in ipz_queue_abs_to_offset() 86 *q_offset = addr - page + i * queue->pagesize; in ipz_queue_abs_to_offset() 130 struct ipz_small_queue_page *page; in alloc_small_queue_page() local 136 page = list_entry(pd->free[order].next, in alloc_small_queue_page() 139 page = kmem_cache_zalloc(small_qp_cache, GFP_KERNEL); in alloc_small_queue_page() 140 if (!page) in alloc_small_queue_page() 143 page->page = get_zeroed_page(GFP_KERNEL); in alloc_small_queue_page() 144 if (!page->page) { in alloc_small_queue_page() 145 kmem_cache_free(small_qp_cache, page); in alloc_small_queue_page() [all …]
|
/drivers/target/ |
D | target_core_stat.c | 78 struct se_dev_stat_grps *sgrps, char *page) in target_stat_scsi_dev_show_attr_inst() argument 84 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); in target_stat_scsi_dev_show_attr_inst() 89 struct se_dev_stat_grps *sgrps, char *page) in target_stat_scsi_dev_show_attr_indx() argument 94 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_index); in target_stat_scsi_dev_show_attr_indx() 99 struct se_dev_stat_grps *sgrps, char *page) in target_stat_scsi_dev_show_attr_role() argument 101 return snprintf(page, PAGE_SIZE, "Target\n"); in target_stat_scsi_dev_show_attr_role() 106 struct se_dev_stat_grps *sgrps, char *page) in target_stat_scsi_dev_show_attr_ports() argument 111 return snprintf(page, PAGE_SIZE, "%u\n", dev->dev_port_count); in target_stat_scsi_dev_show_attr_ports() 155 struct se_dev_stat_grps *sgrps, char *page) in target_stat_scsi_tgt_dev_show_attr_inst() argument 161 return snprintf(page, PAGE_SIZE, "%u\n", hba->hba_index); in target_stat_scsi_tgt_dev_show_attr_inst() [all …]
|
D | target_core_configfs.c | 78 char *page) in target_core_attr_show() argument 80 return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s" in target_core_attr_show() 565 char *page) \ 567 return snprintf(page, PAGE_SIZE, "%u\n", \ 574 const char *page, \ 580 ret = strict_strtoul(page, 0, &val); \ 749 char *page) in target_core_dev_wwn_show_attr_vpd_unit_serial() argument 751 return sprintf(page, "T10 VPD Unit Serial Number: %s\n", in target_core_dev_wwn_show_attr_vpd_unit_serial() 757 const char *page, in target_core_dev_wwn_store_attr_vpd_unit_serial() argument 779 if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) { in target_core_dev_wwn_store_attr_vpd_unit_serial() [all …]
|
/drivers/staging/zcache/ |
D | zcache-main.c | 47 static inline bool PageWasActive(struct page *page) in PageWasActive() argument 52 static inline void SetPageWasActive(struct page *page) in SetPageWasActive() argument 296 static bool page_is_zero_filled(struct page *p) in page_is_zero_filled() 299 char *page; in page_is_zero_filled() local 301 page = kmap_atomic(p); in page_is_zero_filled() 302 for (pos = 0; pos < PAGE_SIZE / sizeof(*page); pos++) { in page_is_zero_filled() 303 if (page[pos]) { in page_is_zero_filled() 304 kunmap_atomic(page); in page_is_zero_filled() 308 kunmap_atomic(page); in page_is_zero_filled() 316 struct page *page = (struct page *)p; in handle_zero_filled_page() local [all …]
|
D | zbud.c | 102 struct page page; member 140 struct page *page = (struct page *)zbudpage; in zbudpage_spin_lock() local 142 while (unlikely(test_and_set_bit_lock(PG_locked, &page->flags))) { in zbudpage_spin_lock() 145 } while (test_bit(PG_locked, &page->flags)); in zbudpage_spin_lock() 151 struct page *page = (struct page *)zbudpage; in zbudpage_spin_unlock() local 153 clear_bit(PG_locked, &page->flags); in zbudpage_spin_unlock() 158 return trylock_page((struct page *)zbudpage); in zbudpage_spin_trylock() 163 return PageLocked((struct page *)zbudpage); in zbudpage_is_locked() 168 return kmap_atomic((struct page *)zbudpage); in kmap_zbudpage_atomic() 178 struct page *page = (struct page *)zbudpage; in zbudpage_is_dying() local [all …]
|
/drivers/infiniband/hw/mlx4/ |
D | doorbell.c | 47 struct mlx4_ib_user_db_page *page; in mlx4_ib_db_map_user() local 53 list_for_each_entry(page, &context->db_page_list, list) in mlx4_ib_db_map_user() 54 if (page->user_virt == (virt & PAGE_MASK)) in mlx4_ib_db_map_user() 57 page = kmalloc(sizeof *page, GFP_KERNEL); in mlx4_ib_db_map_user() 58 if (!page) { in mlx4_ib_db_map_user() 63 page->user_virt = (virt & PAGE_MASK); in mlx4_ib_db_map_user() 64 page->refcnt = 0; in mlx4_ib_db_map_user() 65 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in mlx4_ib_db_map_user() 67 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user() 68 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user() [all …]
|
/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_gmr.c | 35 struct page *pages[], in vmw_gmr2_bind() 109 struct page *pages[], in vmw_gmr_build_descriptors() 112 struct page *page, *next; in vmw_gmr_build_descriptors() local 124 page = alloc_page(__GFP_HIGHMEM); in vmw_gmr_build_descriptors() 125 if (unlikely(page == NULL)) { in vmw_gmr_build_descriptors() 130 list_add_tail(&page->lru, desc_pages); in vmw_gmr_build_descriptors() 138 desc_virtual->ppn = page_to_pfn(page); in vmw_gmr_build_descriptors() 142 page_virtual = kmap_atomic(page); in vmw_gmr_build_descriptors() 176 list_for_each_entry_safe(page, next, desc_pages, lru) { in vmw_gmr_build_descriptors() 177 list_del_init(&page->lru); in vmw_gmr_build_descriptors() [all …]
|
/drivers/net/ethernet/sfc/ |
D | rx.c | 60 return page_address(buf->page) + buf->page_offset; in efx_rx_buf_va() 108 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue) in efx_reuse_page() 111 struct page *page; in efx_reuse_page() local 116 page = rx_queue->page_ring[index]; in efx_reuse_page() 117 if (page == NULL) in efx_reuse_page() 126 if (page_count(page) == 1) { in efx_reuse_page() 128 return page; in efx_reuse_page() 130 state = page_address(page); in efx_reuse_page() 134 put_page(page); in efx_reuse_page() 155 struct page *page; in efx_init_rx_buffers() local [all …]
|
/drivers/mtd/devices/ |
D | block2mtd.c | 38 static struct page *page_read(struct address_space *mapping, int index) in page_read() 47 struct page *page; in _block2mtd_erase() local 54 page = page_read(mapping, index); in _block2mtd_erase() 55 if (IS_ERR(page)) in _block2mtd_erase() 56 return PTR_ERR(page); in _block2mtd_erase() 58 max = page_address(page) + PAGE_SIZE; in _block2mtd_erase() 59 for (p=page_address(page); p<max; p++) in _block2mtd_erase() 61 lock_page(page); in _block2mtd_erase() 62 memset(page_address(page), 0xff, PAGE_SIZE); in _block2mtd_erase() 63 set_page_dirty(page); in _block2mtd_erase() [all …]
|
/drivers/char/agp/ |
D | efficeon-agp.c | 69 static inline unsigned long efficeon_mask_memory(struct page *page) in efficeon_mask_memory() argument 71 unsigned long addr = page_to_phys(page); in efficeon_mask_memory() 164 unsigned long page = efficeon_private.l1_table[index]; in efficeon_free_gatt_table() local 165 if (page) { in efficeon_free_gatt_table() 167 ClearPageReserved(virt_to_page((char *)page)); in efficeon_free_gatt_table() 168 free_page(page); in efficeon_free_gatt_table() 212 unsigned long page; in efficeon_create_gatt_table() local 215 page = efficeon_private.l1_table[index]; in efficeon_create_gatt_table() 216 BUG_ON(page); in efficeon_create_gatt_table() 218 page = get_zeroed_page(GFP_KERNEL); in efficeon_create_gatt_table() [all …]
|
/drivers/md/ |
D | bitmap.c | 49 unsigned long page, int create) in bitmap_checkpage() argument 55 if (page >= bitmap->pages) { in bitmap_checkpage() 63 if (bitmap->bp[page].hijacked) /* it's hijacked, don't try to alloc */ in bitmap_checkpage() 66 if (bitmap->bp[page].map) /* page is already allocated, just return */ in bitmap_checkpage() 82 if (!bitmap->bp[page].map) in bitmap_checkpage() 83 bitmap->bp[page].hijacked = 1; in bitmap_checkpage() 84 } else if (bitmap->bp[page].map || in bitmap_checkpage() 85 bitmap->bp[page].hijacked) { in bitmap_checkpage() 93 bitmap->bp[page].map = mappage; in bitmap_checkpage() 102 static void bitmap_checkfree(struct bitmap_counts *bitmap, unsigned long page) in bitmap_checkfree() argument [all …]
|
/drivers/mtd/ |
D | mtdoops.c | 72 static void mark_page_used(struct mtdoops_context *cxt, int page) in mark_page_used() argument 74 set_bit(page, cxt->oops_page_used); in mark_page_used() 77 static void mark_page_unused(struct mtdoops_context *cxt, int page) in mark_page_unused() argument 79 clear_bit(page, cxt->oops_page_used); in mark_page_unused() 82 static int page_is_used(struct mtdoops_context *cxt, int page) in page_is_used() argument 84 return test_bit(page, cxt->oops_page_used); in page_is_used() 103 int page; in mtdoops_erase_block() local 129 for (page = start_page; page < start_page + erase_pages; page++) in mtdoops_erase_block() 130 mark_page_unused(cxt, page); in mtdoops_erase_block() 253 int ret, page, maxpos = 0; in find_next_position() local [all …]
|
/drivers/staging/zcache/ramster/ |
D | nodemanager.c | 174 static ssize_t r2nm_node_num_read(struct r2nm_node *node, char *page) in r2nm_node_num_read() argument 176 return sprintf(page, "%d\n", node->nd_num); in r2nm_node_num_read() 193 static ssize_t r2nm_node_num_write(struct r2nm_node *node, const char *page, in r2nm_node_num_write() argument 198 char *p = (char *)page; in r2nm_node_num_write() 230 static ssize_t r2nm_node_ipv4_port_read(struct r2nm_node *node, char *page) in r2nm_node_ipv4_port_read() argument 232 return sprintf(page, "%u\n", ntohs(node->nd_ipv4_port)); in r2nm_node_ipv4_port_read() 236 const char *page, size_t count) in r2nm_node_ipv4_port_write() argument 239 char *p = (char *)page; in r2nm_node_ipv4_port_write() 256 static ssize_t r2nm_node_ipv4_address_read(struct r2nm_node *node, char *page) in r2nm_node_ipv4_address_read() argument 258 return sprintf(page, "%pI4\n", &node->nd_ipv4_address); in r2nm_node_ipv4_address_read() [all …]
|