Searched refs:queue_size (Results 1 – 13 of 13) sorted by relevance
167 u16 queue_size; member521 if (ENDP_QUEUE_SIZE > --endp->queue_size) { in u132_hcd_giveback_urb()560 if (ENDP_QUEUE_SIZE > --endp->queue_size) { in u132_hcd_abandon_urb()1926 endp->queue_size = 1; in create_endpoint_and_queue_int()1943 if (endp->queue_size++ < ENDP_QUEUE_SIZE) { in queue_int_on_old_endpoint()1949 endp->queue_size -= 1; in queue_int_on_old_endpoint()2025 endp->queue_size = 1; in create_endpoint_and_queue_bulk()2040 if (endp->queue_size++ < ENDP_QUEUE_SIZE) { in queue_bulk_on_old_endpoint()2046 endp->queue_size -= 1; in queue_bulk_on_old_endpoint()2114 endp->queue_size = 1; in create_endpoint_and_queue_control()[all …]
94 unsigned queue_size; member391 m->queue_size--; in map_io()397 m->queue_size++; in map_io()439 if (!m->queue_if_no_path && m->queue_size) in queue_if_no_path()1042 if (!m->nr_valid_paths++ && m->queue_size) { in reinstate_path()1397 DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count); in multipath_status()
320 int queue_size; member343 ta->queue_size = MAX_TAGS; in init_tags()366 TagAlloc[cmd->device->id][cmd->device->lun].queue_size ) { in is_lun_busy()2149 if (ta->queue_size > ta->nr_allocated) in NCR5380_information_transfer()2150 ta->nr_allocated = ta->queue_size; in NCR5380_information_transfer()
326 int queue_size; member349 ta->queue_size = MAX_TAGS; in init_tags()372 TagAlloc[cmd->device->id][cmd->device->lun].queue_size) { in is_lun_busy()2186 if (ta->queue_size > ta->nr_allocated) in NCR5380_information_transfer()2187 ta->nr_allocated = ta->queue_size; in NCR5380_information_transfer()
651 ushort queue_size; /* Max number of cp that can be queued */ member1125 info.queue_size = DEV2H16(info.queue_size); in port_detect()1284 shost->can_queue = (ushort) info.queue_size; in port_detect()
298 const uint queue_size = in qp_alloc_queue() local303 q_header = vmalloc(queue_size); in qp_alloc_queue()620 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); in qp_host_alloc_queue() local624 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); in qp_host_alloc_queue()635 (struct page **)((u8 *)queue + queue_size); in qp_host_alloc_queue()648 static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size) in qp_host_free_queue() argument
450 u32 queue_size; /* bytes for small queues, pages otherwise */ member
349 parms->squeue.queue_size = in hipz_h_alloc_resource_qp()351 parms->rqueue.queue_size = in hipz_h_alloc_resource_qp()
294 if (!parms->queue_size) in init_qp_queue()303 nr_q_pages = parms->queue_size; in init_qp_queue()
194 word queue_size; member
823 this->queue_size = 1000; in diva_register_appl()
496 u8 queue_size; member
1165 req->queue_size = be_encoded_q_len(txq->len); in be_cmd_txq_create()