/drivers/staging/android/ion/ |
D | ion_page_pool.c | 28 static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool) in ion_page_pool_alloc_pages() argument 30 struct page *page = alloc_pages(pool->gfp_mask, pool->order); in ion_page_pool_alloc_pages() 37 static void ion_page_pool_free_pages(struct ion_page_pool *pool, in ion_page_pool_free_pages() argument 40 __free_pages(page, pool->order); in ion_page_pool_free_pages() 43 static int ion_page_pool_add(struct ion_page_pool *pool, struct page *page) in ion_page_pool_add() argument 45 mutex_lock(&pool->mutex); in ion_page_pool_add() 47 list_add_tail(&page->lru, &pool->high_items); in ion_page_pool_add() 48 pool->high_count++; in ion_page_pool_add() 50 list_add_tail(&page->lru, &pool->low_items); in ion_page_pool_add() 51 pool->low_count++; in ion_page_pool_add() [all …]
|
D | ion_system_heap.c | 67 struct ion_page_pool *pool; in alloc_buffer_page() local 71 pool = heap->uncached_pools[order_to_index(order)]; in alloc_buffer_page() 73 pool = heap->cached_pools[order_to_index(order)]; in alloc_buffer_page() 75 page = ion_page_pool_alloc(pool); in alloc_buffer_page() 83 struct ion_page_pool *pool; in free_buffer_page() local 94 pool = heap->uncached_pools[order_to_index(order)]; in free_buffer_page() 96 pool = heap->cached_pools[order_to_index(order)]; in free_buffer_page() 98 ion_page_pool_free(pool, page); in free_buffer_page() 262 struct ion_page_pool *pool; in ion_system_heap_debug_show() local 265 pool = sys_heap->uncached_pools[i]; in ion_system_heap_debug_show() [all …]
|
/drivers/md/ |
D | dm-thin.c | 229 struct pool { struct 285 static void metadata_operation_failed(struct pool *pool, const char *op, int r); argument 287 static enum pool_mode get_pool_mode(struct pool *pool) in get_pool_mode() argument 289 return pool->pf.mode; in get_pool_mode() 292 static void notify_of_pool_mode_change(struct pool *pool) in notify_of_pool_mode_change() argument 302 enum pool_mode mode = get_pool_mode(pool); in notify_of_pool_mode_change() 305 if (!pool->pf.error_if_no_space) in notify_of_pool_mode_change() 311 dm_table_event(pool->ti->table); in notify_of_pool_mode_change() 313 dm_device_name(pool->pool_md), in notify_of_pool_mode_change() 322 struct pool *pool; member [all …]
|
/drivers/infiniband/core/ |
D | fmr_pool.c | 95 void (*flush_function)(struct ib_fmr_pool *pool, 115 static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool, in ib_fmr_cache_lookup() argument 123 if (!pool->cache_bucket) in ib_fmr_cache_lookup() 126 bucket = pool->cache_bucket + ib_fmr_hash(*page_list); in ib_fmr_cache_lookup() 138 static void ib_fmr_batch_release(struct ib_fmr_pool *pool) in ib_fmr_batch_release() argument 145 spin_lock_irq(&pool->pool_lock); in ib_fmr_batch_release() 147 list_for_each_entry(fmr, &pool->dirty_list, list) { in ib_fmr_batch_release() 160 list_splice_init(&pool->dirty_list, &unmap_list); in ib_fmr_batch_release() 161 pool->dirty_len = 0; in ib_fmr_batch_release() 163 spin_unlock_irq(&pool->pool_lock); in ib_fmr_batch_release() [all …]
|
/drivers/infiniband/sw/rxe/ |
D | rxe_pool.c | 105 static inline const char *pool_name(struct rxe_pool *pool) in pool_name() argument 107 return rxe_type_info[pool->type].name; in pool_name() 110 static inline struct kmem_cache *pool_cache(struct rxe_pool *pool) in pool_cache() argument 112 return rxe_type_info[pool->type].cache; in pool_cache() 161 static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) in rxe_pool_init_index() argument 166 if ((max - min + 1) < pool->max_elem) { in rxe_pool_init_index() 172 pool->max_index = max; in rxe_pool_init_index() 173 pool->min_index = min; in rxe_pool_init_index() 176 pool->table = kmalloc(size, GFP_KERNEL); in rxe_pool_init_index() 177 if (!pool->table) { in rxe_pool_init_index() [all …]
|
/drivers/gpu/drm/ttm/ |
D | ttm_page_alloc_dma.c | 160 struct dma_pool *pool; member 309 static int ttm_set_pages_caching(struct dma_pool *pool, in ttm_set_pages_caching() argument 314 if (pool->type & IS_UC) { in ttm_set_pages_caching() 318 pool->dev_name, cpages); in ttm_set_pages_caching() 320 if (pool->type & IS_WC) { in ttm_set_pages_caching() 324 pool->dev_name, cpages); in ttm_set_pages_caching() 329 static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) in __ttm_dma_free_page() argument 332 dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma); in __ttm_dma_free_page() 337 static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool) in __ttm_dma_alloc_page() argument 345 d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size, in __ttm_dma_alloc_page() [all …]
|
D | ttm_page_alloc.c | 288 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, in ttm_pool_update_free_locked() argument 291 pool->npages -= freed_pages; in ttm_pool_update_free_locked() 292 pool->nfrees += freed_pages; in ttm_pool_update_free_locked() 305 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, in ttm_page_pool_free() argument 329 spin_lock_irqsave(&pool->lock, irq_flags); in ttm_page_pool_free() 331 list_for_each_entry_reverse(p, &pool->list, lru) { in ttm_page_pool_free() 339 __list_del(p->lru.prev, &pool->list); in ttm_page_pool_free() 341 ttm_pool_update_free_locked(pool, freed_pages); in ttm_page_pool_free() 346 spin_unlock_irqrestore(&pool->lock, irq_flags); in ttm_page_pool_free() 374 __list_del(&p->lru, &pool->list); in ttm_page_pool_free() [all …]
|
/drivers/staging/media/atomisp/pci/atomisp2/css2400/runtime/rmgr/src/ |
D | rmgr_vbuf.c | 139 enum ia_css_err ia_css_rmgr_init_vbuf(struct ia_css_rmgr_vbuf_pool *pool) in ia_css_rmgr_init_vbuf() argument 144 assert(pool != NULL); in ia_css_rmgr_init_vbuf() 145 if (pool == NULL) in ia_css_rmgr_init_vbuf() 148 if (pool->recycle && pool->size) { in ia_css_rmgr_init_vbuf() 152 pool->size; in ia_css_rmgr_init_vbuf() 153 pool->handles = sh_css_malloc(bytes_needed); in ia_css_rmgr_init_vbuf() 154 if (pool->handles != NULL) in ia_css_rmgr_init_vbuf() 155 memset(pool->handles, 0, bytes_needed); in ia_css_rmgr_init_vbuf() 160 pool->size = 0; in ia_css_rmgr_init_vbuf() 161 pool->handles = NULL; in ia_css_rmgr_init_vbuf() [all …]
|
/drivers/staging/lustre/lustre/lov/ |
D | lov_pool.c | 51 static void lov_pool_getref(struct pool_desc *pool) in lov_pool_getref() argument 53 CDEBUG(D_INFO, "pool %p\n", pool); in lov_pool_getref() 54 atomic_inc(&pool->pool_refcount); in lov_pool_getref() 57 void lov_pool_putref(struct pool_desc *pool) in lov_pool_putref() argument 59 CDEBUG(D_INFO, "pool %p\n", pool); in lov_pool_putref() 60 if (atomic_dec_and_test(&pool->pool_refcount)) { in lov_pool_putref() 61 LASSERT(hlist_unhashed(&pool->pool_hash)); in lov_pool_putref() 62 LASSERT(list_empty(&pool->pool_list)); in lov_pool_putref() 63 LASSERT(!pool->pool_debugfs_entry); in lov_pool_putref() 64 lov_ost_pool_free(&pool->pool_obds); in lov_pool_putref() [all …]
|
/drivers/staging/octeon/ |
D | ethernet-mem.c | 30 static int cvm_oct_fill_hw_skbuff(int pool, int size, int elements) in cvm_oct_fill_hw_skbuff() argument 41 cvmx_fpa_free(skb->data, pool, size / 128); in cvm_oct_fill_hw_skbuff() 53 static void cvm_oct_free_hw_skbuff(int pool, int size, int elements) in cvm_oct_free_hw_skbuff() argument 58 memory = cvmx_fpa_alloc(pool); in cvm_oct_free_hw_skbuff() 69 pool, elements); in cvm_oct_free_hw_skbuff() 72 pool, elements); in cvm_oct_free_hw_skbuff() 83 static int cvm_oct_fill_hw_memory(int pool, int size, int elements) in cvm_oct_fill_hw_memory() argument 103 elements * size, pool); in cvm_oct_fill_hw_memory() 108 cvmx_fpa_free(fpa, pool, 0); in cvm_oct_fill_hw_memory() 120 static void cvm_oct_free_hw_memory(int pool, int size, int elements) in cvm_oct_free_hw_memory() argument [all …]
|
/drivers/dma/ |
D | coh901318_lli.c | 19 #define DEBUGFS_POOL_COUNTER_RESET(pool) (pool->debugfs_pool_counter = 0) argument 20 #define DEBUGFS_POOL_COUNTER_ADD(pool, add) (pool->debugfs_pool_counter += add) argument 22 #define DEBUGFS_POOL_COUNTER_RESET(pool) argument 23 #define DEBUGFS_POOL_COUNTER_ADD(pool, add) argument 35 int coh901318_pool_create(struct coh901318_pool *pool, in coh901318_pool_create() argument 39 spin_lock_init(&pool->lock); in coh901318_pool_create() 40 pool->dev = dev; in coh901318_pool_create() 41 pool->dmapool = dma_pool_create("lli_pool", dev, size, align, 0); in coh901318_pool_create() 43 DEBUGFS_POOL_COUNTER_RESET(pool); in coh901318_pool_create() 47 int coh901318_pool_destroy(struct coh901318_pool *pool) in coh901318_pool_destroy() argument [all …]
|
/drivers/net/ethernet/mellanox/mlxsw/ |
D | spectrum_cnt.c | 101 struct mlxsw_sp_counter_pool *pool; in mlxsw_sp_counter_pool_init() local 118 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in mlxsw_sp_counter_pool_init() 119 if (!pool) in mlxsw_sp_counter_pool_init() 122 pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE); in mlxsw_sp_counter_pool_init() 123 map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long); in mlxsw_sp_counter_pool_init() 125 pool->usage = kzalloc(map_size, GFP_KERNEL); in mlxsw_sp_counter_pool_init() 126 if (!pool->usage) { in mlxsw_sp_counter_pool_init() 131 pool->sub_pools = mlxsw_sp_counter_sub_pools; in mlxsw_sp_counter_pool_init() 137 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_pool_init() 143 if (sub_pool->base_index + sub_pool->size > pool->pool_size) in mlxsw_sp_counter_pool_init() [all …]
|
/drivers/net/ethernet/marvell/ |
D | mvneta_bm.h | 33 #define MVNETA_BM_XBAR_POOL_REG(pool) \ argument 34 (((pool) < 2) ? MVNETA_BM_XBAR_01_REG : MVNETA_BM_XBAR_23_REG) 35 #define MVNETA_BM_TARGET_ID_OFFS(pool) (((pool) & 1) ? 16 : 0) argument 36 #define MVNETA_BM_TARGET_ID_MASK(pool) \ argument 37 (0xf << MVNETA_BM_TARGET_ID_OFFS(pool)) 38 #define MVNETA_BM_TARGET_ID_VAL(pool, id) \ argument 39 ((id) << MVNETA_BM_TARGET_ID_OFFS(pool)) 40 #define MVNETA_BM_XBAR_ATTR_OFFS(pool) (((pool) & 1) ? 20 : 4) argument 41 #define MVNETA_BM_XBAR_ATTR_MASK(pool) \ argument 42 (0xff << MVNETA_BM_XBAR_ATTR_OFFS(pool)) [all …]
|
/drivers/tee/ |
D | tee_shm_pool.c | 52 static void pool_res_mem_destroy(struct tee_shm_pool *pool) in pool_res_mem_destroy() argument 54 gen_pool_destroy(pool->private_mgr.private_data); in pool_res_mem_destroy() 55 gen_pool_destroy(pool->dma_buf_mgr.private_data); in pool_res_mem_destroy() 107 struct tee_shm_pool *pool = NULL; in tee_shm_pool_alloc_res_mem() local 110 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in tee_shm_pool_alloc_res_mem() 111 if (!pool) { in tee_shm_pool_alloc_res_mem() 119 ret = pool_res_mem_mgr_init(&pool->private_mgr, priv_info, in tee_shm_pool_alloc_res_mem() 127 ret = pool_res_mem_mgr_init(&pool->dma_buf_mgr, dmabuf_info, in tee_shm_pool_alloc_res_mem() 132 pool->destroy = pool_res_mem_destroy; in tee_shm_pool_alloc_res_mem() 133 return pool; in tee_shm_pool_alloc_res_mem() [all …]
|
/drivers/gpu/drm/i915/ |
D | i915_gem_batch_pool.c | 48 struct i915_gem_batch_pool *pool) in i915_gem_batch_pool_init() argument 52 pool->engine = engine; in i915_gem_batch_pool_init() 54 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) in i915_gem_batch_pool_init() 55 INIT_LIST_HEAD(&pool->cache_list[n]); in i915_gem_batch_pool_init() 64 void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool) in i915_gem_batch_pool_fini() argument 68 lockdep_assert_held(&pool->engine->i915->drm.struct_mutex); in i915_gem_batch_pool_fini() 70 for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) { in i915_gem_batch_pool_fini() 74 &pool->cache_list[n], in i915_gem_batch_pool_fini() 78 INIT_LIST_HEAD(&pool->cache_list[n]); in i915_gem_batch_pool_fini() 96 i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, in i915_gem_batch_pool_get() argument [all …]
|
/drivers/net/ethernet/ibm/ |
D | ibmveth.c | 158 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, in ibmveth_init_buffer_pool() argument 162 pool->size = pool_size; in ibmveth_init_buffer_pool() 163 pool->index = pool_index; in ibmveth_init_buffer_pool() 164 pool->buff_size = buff_size; in ibmveth_init_buffer_pool() 165 pool->threshold = pool_size * 7 / 8; in ibmveth_init_buffer_pool() 166 pool->active = pool_active; in ibmveth_init_buffer_pool() 170 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool) in ibmveth_alloc_buffer_pool() argument 174 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); in ibmveth_alloc_buffer_pool() 176 if (!pool->free_map) in ibmveth_alloc_buffer_pool() 179 pool->dma_addr = kcalloc(pool->size, sizeof(dma_addr_t), GFP_KERNEL); in ibmveth_alloc_buffer_pool() [all …]
|
/drivers/mtd/ubi/ |
D | fastmap-wl.c | 57 struct ubi_fm_pool *pool) in return_unused_pool_pebs() argument 62 for (i = pool->used; i < pool->size; i++) { in return_unused_pool_pebs() 63 e = ubi->lookuptbl[pool->pebs[i]]; in return_unused_pool_pebs() 122 struct ubi_fm_pool *pool = &ubi->fm_pool; in ubi_refill_pools() local 129 return_unused_pool_pebs(ubi, pool); in ubi_refill_pools() 132 pool->size = 0; in ubi_refill_pools() 136 if (pool->size < pool->max_size) { in ubi_refill_pools() 144 pool->pebs[pool->size] = e->pnum; in ubi_refill_pools() 145 pool->size++; in ubi_refill_pools() 169 pool->used = 0; in ubi_refill_pools() [all …]
|
/drivers/net/ethernet/ti/ |
D | davinci_cpdma.c | 107 struct cpdma_desc_pool *pool; member 188 struct cpdma_desc_pool *pool = ctlr->pool; in cpdma_desc_pool_destroy() local 190 if (!pool) in cpdma_desc_pool_destroy() 193 WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool), in cpdma_desc_pool_destroy() 195 gen_pool_size(pool->gen_pool), in cpdma_desc_pool_destroy() 196 gen_pool_avail(pool->gen_pool)); in cpdma_desc_pool_destroy() 197 if (pool->cpumap) in cpdma_desc_pool_destroy() 198 dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap, in cpdma_desc_pool_destroy() 199 pool->phys); in cpdma_desc_pool_destroy() 211 struct cpdma_desc_pool *pool; in cpdma_desc_pool_create() local [all …]
|
/drivers/xen/ |
D | tmem.c | 167 static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key, in tmem_cleancache_put_page() argument 173 if (pool < 0) in tmem_cleancache_put_page() 178 (void)xen_tmem_put_page((u32)pool, oid, ind, page); in tmem_cleancache_put_page() 181 static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key, in tmem_cleancache_get_page() argument 189 if (pool < 0) in tmem_cleancache_get_page() 193 ret = xen_tmem_get_page((u32)pool, oid, ind, page); in tmem_cleancache_get_page() 200 static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key, in tmem_cleancache_flush_page() argument 206 if (pool < 0) in tmem_cleancache_flush_page() 210 (void)xen_tmem_flush_page((u32)pool, oid, ind); in tmem_cleancache_flush_page() 213 static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key) in tmem_cleancache_flush_inode() argument [all …]
|
/drivers/atm/ |
D | ambassador.c | 687 static int rx_give (amb_dev * dev, rx_in * rx, unsigned char pool) { in rx_give() argument 688 amb_rxq * rxq = &dev->rxq[pool]; in rx_give() 691 PRINTD (DBG_FLOW|DBG_RX, "rx_give %p[%hu]", dev, pool); in rx_give() 702 wr_mem (dev, offsetof(amb_mem, mb.adapter.rx_address[pool]), virt_to_bus (rxq->in.ptr)); in rx_give() 712 static int rx_take (amb_dev * dev, unsigned char pool) { in rx_take() argument 713 amb_rxq * rxq = &dev->rxq[pool]; in rx_take() 716 PRINTD (DBG_FLOW|DBG_RX, "rx_take %p[%hu]", dev, pool); in rx_take() 745 static void drain_rx_pool (amb_dev * dev, unsigned char pool) { in drain_rx_pool() argument 746 amb_rxq * rxq = &dev->rxq[pool]; in drain_rx_pool() 748 PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pool %p %hu", dev, pool); in drain_rx_pool() [all …]
|
/drivers/s390/scsi/ |
D | zfcp_aux.c | 203 adapter->pool.erp_req = in zfcp_allocate_low_mem_buffers() 205 if (!adapter->pool.erp_req) in zfcp_allocate_low_mem_buffers() 208 adapter->pool.gid_pn_req = in zfcp_allocate_low_mem_buffers() 210 if (!adapter->pool.gid_pn_req) in zfcp_allocate_low_mem_buffers() 213 adapter->pool.scsi_req = in zfcp_allocate_low_mem_buffers() 215 if (!adapter->pool.scsi_req) in zfcp_allocate_low_mem_buffers() 218 adapter->pool.scsi_abort = in zfcp_allocate_low_mem_buffers() 220 if (!adapter->pool.scsi_abort) in zfcp_allocate_low_mem_buffers() 223 adapter->pool.status_read_req = in zfcp_allocate_low_mem_buffers() 226 if (!adapter->pool.status_read_req) in zfcp_allocate_low_mem_buffers() [all …]
|
/drivers/net/ethernet/chelsio/libcxgb/ |
D | libcxgb_ppm.c | 122 struct cxgbi_ppm_pool *pool; in ppm_get_cpu_entries() local 127 pool = per_cpu_ptr(ppm->pool, cpu); in ppm_get_cpu_entries() 128 spin_lock_bh(&pool->lock); in ppm_get_cpu_entries() 131 i = ppm_find_unused_entries(pool->bmap, ppm->pool_index_max, in ppm_get_cpu_entries() 132 pool->next, count, 0); in ppm_get_cpu_entries() 134 pool->next = 0; in ppm_get_cpu_entries() 135 spin_unlock_bh(&pool->lock); in ppm_get_cpu_entries() 139 pool->next = i + count; in ppm_get_cpu_entries() 140 if (pool->next >= ppm->pool_index_max) in ppm_get_cpu_entries() 141 pool->next = 0; in ppm_get_cpu_entries() [all …]
|
/drivers/scsi/lpfc/ |
D | lpfc_mem.c | 89 struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; in lpfc_mem_alloc() local 123 pool->elements = kmalloc(sizeof(struct lpfc_dmabuf) * in lpfc_mem_alloc() 125 if (!pool->elements) in lpfc_mem_alloc() 128 pool->max_count = 0; in lpfc_mem_alloc() 129 pool->current_count = 0; in lpfc_mem_alloc() 131 pool->elements[i].virt = dma_pool_alloc(phba->lpfc_mbuf_pool, in lpfc_mem_alloc() 132 GFP_KERNEL, &pool->elements[i].phys); in lpfc_mem_alloc() 133 if (!pool->elements[i].virt) in lpfc_mem_alloc() 135 pool->max_count++; in lpfc_mem_alloc() 136 pool->current_count++; in lpfc_mem_alloc() [all …]
|
/drivers/soc/ti/ |
D | knav_qmss_queue.c | 665 static void kdesc_fill_pool(struct knav_pool *pool) in kdesc_fill_pool() argument 670 region = pool->region; in kdesc_fill_pool() 671 pool->desc_size = region->desc_size; in kdesc_fill_pool() 672 for (i = 0; i < pool->num_desc; i++) { in kdesc_fill_pool() 673 int index = pool->region_offset + i; in kdesc_fill_pool() 677 dma_size = ALIGN(pool->desc_size, SMP_CACHE_BYTES); in kdesc_fill_pool() 678 dma_sync_single_for_device(pool->dev, dma_addr, dma_size, in kdesc_fill_pool() 680 knav_queue_push(pool->queue, dma_addr, dma_size, 0); in kdesc_fill_pool() 685 static void kdesc_empty_pool(struct knav_pool *pool) in kdesc_empty_pool() argument 692 if (!pool->queue) in kdesc_empty_pool() [all …]
|
/drivers/scsi/megaraid/ |
D | megaraid_mm.c | 518 mm_dmapool_t *pool; in mraid_mm_attach_buf() local 535 pool = &adp->dma_pool_list[i]; in mraid_mm_attach_buf() 537 if (xferlen > pool->buf_size) in mraid_mm_attach_buf() 543 spin_lock_irqsave(&pool->lock, flags); in mraid_mm_attach_buf() 545 if (!pool->in_use) { in mraid_mm_attach_buf() 547 pool->in_use = 1; in mraid_mm_attach_buf() 549 kioc->buf_vaddr = pool->vaddr; in mraid_mm_attach_buf() 550 kioc->buf_paddr = pool->paddr; in mraid_mm_attach_buf() 552 spin_unlock_irqrestore(&pool->lock, flags); in mraid_mm_attach_buf() 556 spin_unlock_irqrestore(&pool->lock, flags); in mraid_mm_attach_buf() [all …]
|