• Home
  • Raw
  • Download

Lines Matching refs:entry

99 	struct list_head entry;  member
115 unsigned int entry; member
195 struct list_head entry; member
201 struct list_head entry; member
301 list_add_tail(&nt->entry, &ntb_transport_list); in ntb_bus_init()
309 list_for_each_entry_safe(client_dev, cd, &nt->client_devs, entry) { in ntb_bus_remove()
312 list_del(&client_dev->entry); in ntb_bus_remove()
316 list_del(&nt->entry); in ntb_bus_remove()
338 list_for_each_entry(nt, &ntb_transport_list, entry) in ntb_transport_unregister_client_dev()
339 list_for_each_entry_safe(client, cd, &nt->client_devs, entry) in ntb_transport_unregister_client_dev()
342 list_del(&client->entry); in ntb_transport_unregister_client_dev()
364 list_for_each_entry(nt, &ntb_transport_list, entry) { in ntb_transport_register_client_dev()
390 list_add_tail(&client_dev->entry, &nt->client_devs); in ntb_transport_register_client_dev()
498 qp->remote_rx_info->entry); in debugfs_read()
533 static void ntb_list_add(spinlock_t *lock, struct list_head *entry, in ntb_list_add() argument
539 list_add_tail(entry, list); in ntb_list_add()
546 struct ntb_queue_entry *entry; in ntb_list_rm() local
551 entry = NULL; in ntb_list_rm()
554 entry = list_first_entry(list, struct ntb_queue_entry, entry); in ntb_list_rm()
555 list_del(&entry->entry); in ntb_list_rm()
560 return entry; in ntb_list_rm()
567 struct ntb_queue_entry *entry; in ntb_list_mv() local
573 entry = NULL; in ntb_list_mv()
575 entry = list_first_entry(list, struct ntb_queue_entry, entry); in ntb_list_mv()
576 list_move_tail(&entry->entry, to_list); in ntb_list_mv()
581 return entry; in ntb_list_mv()
618 qp->remote_rx_info->entry = qp->rx_max_entry - 1; in ntb_transport_setup_qp_mw()
1170 struct ntb_queue_entry *entry; in ntb_complete_rxc() local
1178 entry = list_first_entry(&qp->rx_post_q, in ntb_complete_rxc()
1179 struct ntb_queue_entry, entry); in ntb_complete_rxc()
1180 if (!(entry->flags & DESC_DONE_FLAG)) in ntb_complete_rxc()
1183 entry->rx_hdr->flags = 0; in ntb_complete_rxc()
1184 iowrite32(entry->index, &qp->rx_info->entry); in ntb_complete_rxc()
1186 cb_data = entry->cb_data; in ntb_complete_rxc()
1187 len = entry->len; in ntb_complete_rxc()
1189 list_move_tail(&entry->entry, &qp->rx_free_q); in ntb_complete_rxc()
1204 struct ntb_queue_entry *entry = data; in ntb_rx_copy_callback() local
1206 entry->flags |= DESC_DONE_FLAG; in ntb_rx_copy_callback()
1208 ntb_complete_rxc(entry->qp); in ntb_rx_copy_callback()
1211 static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) in ntb_memcpy_rx() argument
1213 void *buf = entry->buf; in ntb_memcpy_rx()
1214 size_t len = entry->len; in ntb_memcpy_rx()
1221 ntb_rx_copy_callback(entry); in ntb_memcpy_rx()
1224 static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset) in ntb_async_rx() argument
1227 struct ntb_transport_qp *qp = entry->qp; in ntb_async_rx()
1233 void *buf = entry->buf; in ntb_async_rx()
1235 len = entry->len; in ntb_async_rx()
1276 txd->callback_param = entry; in ntb_async_rx()
1296 ntb_memcpy_rx(entry, offset); in ntb_async_rx()
1303 struct ntb_queue_entry *entry; in ntb_process_rxc() local
1333 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); in ntb_process_rxc()
1334 if (!entry) { in ntb_process_rxc()
1340 entry->rx_hdr = hdr; in ntb_process_rxc()
1341 entry->index = qp->rx_index; in ntb_process_rxc()
1343 if (hdr->len > entry->len) { in ntb_process_rxc()
1346 hdr->len, entry->len); in ntb_process_rxc()
1349 entry->len = -EIO; in ntb_process_rxc()
1350 entry->flags |= DESC_DONE_FLAG; in ntb_process_rxc()
1356 qp->rx_index, hdr->ver, hdr->len, entry->len); in ntb_process_rxc()
1361 entry->len = hdr->len; in ntb_process_rxc()
1363 ntb_async_rx(entry, offset); in ntb_process_rxc()
1411 struct ntb_queue_entry *entry = data; in ntb_tx_copy_callback() local
1412 struct ntb_transport_qp *qp = entry->qp; in ntb_tx_copy_callback()
1413 struct ntb_payload_header __iomem *hdr = entry->tx_hdr; in ntb_tx_copy_callback()
1415 iowrite32(entry->flags | DESC_DONE_FLAG, &hdr->flags); in ntb_tx_copy_callback()
1423 if (entry->len > 0) { in ntb_tx_copy_callback()
1424 qp->tx_bytes += entry->len; in ntb_tx_copy_callback()
1427 qp->tx_handler(qp, qp->cb_data, entry->cb_data, in ntb_tx_copy_callback()
1428 entry->len); in ntb_tx_copy_callback()
1431 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, &qp->tx_free_q); in ntb_tx_copy_callback()
1434 static void ntb_memcpy_tx(struct ntb_queue_entry *entry, void __iomem *offset) in ntb_memcpy_tx() argument
1441 __copy_from_user_inatomic_nocache(offset, entry->buf, entry->len); in ntb_memcpy_tx()
1443 memcpy_toio(offset, entry->buf, entry->len); in ntb_memcpy_tx()
1449 ntb_tx_copy_callback(entry); in ntb_memcpy_tx()
1453 struct ntb_queue_entry *entry) in ntb_async_tx() argument
1464 size_t len = entry->len; in ntb_async_tx()
1465 void *buf = entry->buf; in ntb_async_tx()
1469 entry->tx_hdr = hdr; in ntb_async_tx()
1471 iowrite32(entry->len, &hdr->len); in ntb_async_tx()
1506 txd->callback_param = entry; in ntb_async_tx()
1524 ntb_memcpy_tx(entry, offset); in ntb_async_tx()
1529 struct ntb_queue_entry *entry) in ntb_process_tx() argument
1531 if (qp->tx_index == qp->remote_rx_info->entry) { in ntb_process_tx()
1536 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { in ntb_process_tx()
1540 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, in ntb_process_tx()
1545 ntb_async_tx(qp, entry); in ntb_process_tx()
1558 struct ntb_queue_entry *entry; in ntb_send_link_down() local
1567 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); in ntb_send_link_down()
1568 if (entry) in ntb_send_link_down()
1573 if (!entry) in ntb_send_link_down()
1576 entry->cb_data = NULL; in ntb_send_link_down()
1577 entry->buf = NULL; in ntb_send_link_down()
1578 entry->len = 0; in ntb_send_link_down()
1579 entry->flags = LINK_DOWN_FLAG; in ntb_send_link_down()
1581 rc = ntb_process_tx(qp, entry); in ntb_send_link_down()
1615 struct ntb_queue_entry *entry; in ntb_transport_create_queue() local
1673 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); in ntb_transport_create_queue()
1674 if (!entry) in ntb_transport_create_queue()
1677 entry->qp = qp; in ntb_transport_create_queue()
1678 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, in ntb_transport_create_queue()
1683 entry = kzalloc_node(sizeof(*entry), GFP_ATOMIC, node); in ntb_transport_create_queue()
1684 if (!entry) in ntb_transport_create_queue()
1687 entry->qp = qp; in ntb_transport_create_queue()
1688 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, in ntb_transport_create_queue()
1700 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) in ntb_transport_create_queue()
1701 kfree(entry); in ntb_transport_create_queue()
1703 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) in ntb_transport_create_queue()
1704 kfree(entry); in ntb_transport_create_queue()
1724 struct ntb_queue_entry *entry; in ntb_transport_free_queue() local
1774 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q))) in ntb_transport_free_queue()
1775 kfree(entry); in ntb_transport_free_queue()
1777 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) { in ntb_transport_free_queue()
1779 kfree(entry); in ntb_transport_free_queue()
1782 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) { in ntb_transport_free_queue()
1784 kfree(entry); in ntb_transport_free_queue()
1787 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) in ntb_transport_free_queue()
1788 kfree(entry); in ntb_transport_free_queue()
1808 struct ntb_queue_entry *entry; in ntb_transport_rx_remove() local
1814 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q); in ntb_transport_rx_remove()
1815 if (!entry) in ntb_transport_rx_remove()
1818 buf = entry->cb_data; in ntb_transport_rx_remove()
1819 *len = entry->len; in ntb_transport_rx_remove()
1821 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q); in ntb_transport_rx_remove()
1842 struct ntb_queue_entry *entry; in ntb_transport_rx_enqueue() local
1847 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q); in ntb_transport_rx_enqueue()
1848 if (!entry) in ntb_transport_rx_enqueue()
1851 entry->cb_data = cb; in ntb_transport_rx_enqueue()
1852 entry->buf = data; in ntb_transport_rx_enqueue()
1853 entry->len = len; in ntb_transport_rx_enqueue()
1854 entry->flags = 0; in ntb_transport_rx_enqueue()
1856 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q); in ntb_transport_rx_enqueue()
1880 struct ntb_queue_entry *entry; in ntb_transport_tx_enqueue() local
1886 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); in ntb_transport_tx_enqueue()
1887 if (!entry) { in ntb_transport_tx_enqueue()
1892 entry->cb_data = cb; in ntb_transport_tx_enqueue()
1893 entry->buf = data; in ntb_transport_tx_enqueue()
1894 entry->len = len; in ntb_transport_tx_enqueue()
1895 entry->flags = 0; in ntb_transport_tx_enqueue()
1897 rc = ntb_process_tx(qp, entry); in ntb_transport_tx_enqueue()
1899 ntb_list_add(&qp->ntb_tx_free_q_lock, &entry->entry, in ntb_transport_tx_enqueue()
2021 unsigned int tail = qp->remote_rx_info->entry; in ntb_transport_tx_free_entry()