Home
last modified time | relevance | path

Searched refs:oct (Results 1 – 25 of 31) sorted by relevance

12

/drivers/net/ethernet/cavium/liquidio/
Dcn66xx_device.c29 int lio_cn6xxx_soft_reset(struct octeon_device *oct) in lio_cn6xxx_soft_reset() argument
31 octeon_write_csr64(oct, CN6XXX_WIN_WR_MASK_REG, 0xFF); in lio_cn6xxx_soft_reset()
33 dev_dbg(&oct->pci_dev->dev, "BIST enabled for soft reset\n"); in lio_cn6xxx_soft_reset()
35 lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_BIST); in lio_cn6xxx_soft_reset()
36 octeon_write_csr64(oct, CN6XXX_SLI_SCRATCH1, 0x1234ULL); in lio_cn6xxx_soft_reset()
38 lio_pci_readq(oct, CN6XXX_CIU_SOFT_RST); in lio_cn6xxx_soft_reset()
39 lio_pci_writeq(oct, 1, CN6XXX_CIU_SOFT_RST); in lio_cn6xxx_soft_reset()
44 if (octeon_read_csr64(oct, CN6XXX_SLI_SCRATCH1)) { in lio_cn6xxx_soft_reset()
45 dev_err(&oct->pci_dev->dev, "Soft reset failed\n"); in lio_cn6xxx_soft_reset()
49 dev_dbg(&oct->pci_dev->dev, "Reset completed\n"); in lio_cn6xxx_soft_reset()
[all …]
Dcn23xx_pf_device.c39 void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct) in cn23xx_dump_pf_initialized_regs() argument
43 struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip; in cn23xx_dump_pf_initialized_regs()
46 dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%llx\n", in cn23xx_dump_pf_initialized_regs()
48 CVM_CAST64(octeon_read_csr64(oct, CN23XX_WIN_WR_MASK_REG))); in cn23xx_dump_pf_initialized_regs()
49 dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", in cn23xx_dump_pf_initialized_regs()
51 CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1))); in cn23xx_dump_pf_initialized_regs()
52 dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", in cn23xx_dump_pf_initialized_regs()
54 lio_pci_readq(oct, CN23XX_RST_SOFT_RST)); in cn23xx_dump_pf_initialized_regs()
57 dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", in cn23xx_dump_pf_initialized_regs()
59 lio_pci_readq(oct, CN23XX_DPI_DMA_CONTROL)); in cn23xx_dump_pf_initialized_regs()
[all …]
Dcn68xx_device.c31 static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct) in lio_cn68xx_set_dpi_regs() argument
36 lio_pci_writeq(oct, CN6XXX_DPI_DMA_CTL_MASK, CN6XXX_DPI_DMA_CONTROL); in lio_cn68xx_set_dpi_regs()
37 dev_dbg(&oct->pci_dev->dev, "DPI_DMA_CONTROL: 0x%016llx\n", in lio_cn68xx_set_dpi_regs()
38 lio_pci_readq(oct, CN6XXX_DPI_DMA_CONTROL)); in lio_cn68xx_set_dpi_regs()
45 lio_pci_writeq(oct, 0, CN6XXX_DPI_DMA_ENG_ENB(i)); in lio_cn68xx_set_dpi_regs()
46 lio_pci_writeq(oct, fifo_sizes[i], CN6XXX_DPI_DMA_ENG_BUF(i)); in lio_cn68xx_set_dpi_regs()
47 dev_dbg(&oct->pci_dev->dev, "DPI_ENG_BUF%d: 0x%016llx\n", i, in lio_cn68xx_set_dpi_regs()
48 lio_pci_readq(oct, CN6XXX_DPI_DMA_ENG_BUF(i))); in lio_cn68xx_set_dpi_regs()
55 lio_pci_writeq(oct, 1, CN6XXX_DPI_CTL); in lio_cn68xx_set_dpi_regs()
56 dev_dbg(&oct->pci_dev->dev, "DPI_CTL: 0x%016llx\n", in lio_cn68xx_set_dpi_regs()
[all …]
Docteon_device.c568 static void *__retrieve_octeon_config_info(struct octeon_device *oct, in __retrieve_octeon_config_info() argument
571 u32 oct_id = oct->octeon_id; in __retrieve_octeon_config_info()
576 if (oct->chip_id == OCTEON_CN66XX) { in __retrieve_octeon_config_info()
578 } else if ((oct->chip_id == OCTEON_CN68XX) && in __retrieve_octeon_config_info()
581 } else if ((oct->chip_id == OCTEON_CN68XX) && in __retrieve_octeon_config_info()
584 } else if (oct->chip_id == OCTEON_CN23XX_PF_VID) { in __retrieve_octeon_config_info()
586 } else if (oct->chip_id == OCTEON_CN23XX_VF_VID) { in __retrieve_octeon_config_info()
596 static int __verify_octeon_config_info(struct octeon_device *oct, void *conf) in __verify_octeon_config_info() argument
598 switch (oct->chip_id) { in __verify_octeon_config_info()
601 return lio_validate_cn6xxx_config_info(oct, conf); in __verify_octeon_config_info()
[all …]
Dcn23xx_vf_device.c30 u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us) in cn23xx_vf_get_oq_ticks() argument
33 u32 oqticks_per_us = (u32)oct->pfvf_hsword.coproc_tics_per_us; in cn23xx_vf_get_oq_ticks()
50 static int cn23xx_vf_reset_io_queues(struct octeon_device *oct, u32 num_queues) in cn23xx_vf_reset_io_queues() argument
59 d64 = octeon_read_csr64(oct, in cn23xx_vf_reset_io_queues()
62 octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), in cn23xx_vf_reset_io_queues()
68 u64 reg_val = octeon_read_csr64(oct, in cn23xx_vf_reset_io_queues()
74 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))); in cn23xx_vf_reset_io_queues()
78 dev_err(&oct->pci_dev->dev, in cn23xx_vf_reset_io_queues()
85 octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), in cn23xx_vf_reset_io_queues()
89 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no))); in cn23xx_vf_reset_io_queues()
[all …]
Docteon_console.c35 static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
38 static int octeon_console_read(struct octeon_device *oct, u32 console_num,
147 static inline u64 __cvmx_bootmem_desc_get(struct octeon_device *oct, in __cvmx_bootmem_desc_get() argument
155 return octeon_read_device_mem32(oct, base); in __cvmx_bootmem_desc_get()
157 return octeon_read_device_mem64(oct, base); in __cvmx_bootmem_desc_get()
173 static void CVMX_BOOTMEM_NAMED_GET_NAME(struct octeon_device *oct, in CVMX_BOOTMEM_NAMED_GET_NAME() argument
179 octeon_pci_read_core_mem(oct, addr, (u8 *)str, len); in CVMX_BOOTMEM_NAMED_GET_NAME()
195 static int __cvmx_bootmem_check_version(struct octeon_device *oct, in __cvmx_bootmem_check_version() argument
201 if (!oct->bootmem_desc_addr) in __cvmx_bootmem_check_version()
202 oct->bootmem_desc_addr = in __cvmx_bootmem_check_version()
[all …]
Drequest_manager.c39 static void __check_db_timeout(struct octeon_device *oct, u64 iq_no);
43 static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no) in IQ_INSTR_MODE_64B() argument
46 (struct octeon_instr_queue *)oct->instr_queue[iq_no]; in IQ_INSTR_MODE_64B()
50 #define IQ_INSTR_MODE_32B(oct, iq_no) (!IQ_INSTR_MODE_64B(oct, iq_no)) argument
56 int octeon_init_instr_queue(struct octeon_device *oct, in octeon_init_instr_queue() argument
65 int numa_node = dev_to_node(&oct->pci_dev->dev); in octeon_init_instr_queue()
67 if (OCTEON_CN6XXX(oct)) in octeon_init_instr_queue()
68 conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn6xxx))); in octeon_init_instr_queue()
69 else if (OCTEON_CN23XX_PF(oct)) in octeon_init_instr_queue()
70 conf = &(CFG_GET_IQ_CFG(CHIP_CONF(oct, cn23xx_pf))); in octeon_init_instr_queue()
[all …]
Dlio_core.c81 int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) in lio_setup_glists() argument
116 int numa_node = dev_to_node(&oct->pci_dev->dev); in lio_setup_glists()
123 lio_dma_alloc(oct, in lio_setup_glists()
161 struct octeon_device *oct = lio->oct_dev; in liquidio_set_feature() local
176 dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n", in liquidio_set_feature()
256 struct octeon_device *oct = lio->oct_dev; in liquidio_link_ctrl_cmd_completion() local
297 dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name); in liquidio_link_ctrl_cmd_completion()
301 dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n", in liquidio_link_ctrl_cmd_completion()
306 dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n", in liquidio_link_ctrl_cmd_completion()
311 dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n", in liquidio_link_ctrl_cmd_completion()
[all …]
Dlio_vf_main.c70 static int octeon_device_init(struct octeon_device *oct);
73 static int lio_wait_for_oq_pkts(struct octeon_device *oct) in lio_wait_for_oq_pkts() argument
76 (struct octeon_device_priv *)oct->priv; in lio_wait_for_oq_pkts()
84 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) { in lio_wait_for_oq_pkts()
85 if (!(oct->io_qmask.oq & BIT_ULL(i))) in lio_wait_for_oq_pkts()
87 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]); in lio_wait_for_oq_pkts()
105 static void pcierror_quiesce_device(struct octeon_device *oct) in pcierror_quiesce_device() argument
117 if (wait_for_pending_requests(oct)) in pcierror_quiesce_device()
118 dev_err(&oct->pci_dev->dev, "There were pending requests\n"); in pcierror_quiesce_device()
121 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { in pcierror_quiesce_device()
[all …]
Dlio_main.c150 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
170 struct octeon_device *oct = oct_priv->dev; in octeon_droq_bh() local
172 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) { in octeon_droq_bh()
173 if (!(oct->io_qmask.oq & BIT_ULL(q_no))) in octeon_droq_bh()
175 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no], in octeon_droq_bh()
177 lio_enable_irq(oct->droq[q_no], NULL); in octeon_droq_bh()
179 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) { in octeon_droq_bh()
183 int adjusted_q_no = q_no + oct->sriov_info.pf_srn; in octeon_droq_bh()
186 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no), in octeon_droq_bh()
189 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0); in octeon_droq_bh()
[all …]
Dlio_vf_rep.c55 lio_vf_rep_send_soft_command(struct octeon_device *oct, in lio_vf_rep_send_soft_command() argument
66 octeon_alloc_soft_command(oct, req_size, in lio_vf_rep_send_soft_command()
82 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, in lio_vf_rep_send_soft_command()
85 err = octeon_send_soft_command(oct, sc); in lio_vf_rep_send_soft_command()
89 err = wait_for_sc_completion_timeout(oct, sc, 0); in lio_vf_rep_send_soft_command()
95 dev_err(&oct->pci_dev->dev, "VF rep send config failed\n"); in lio_vf_rep_send_soft_command()
103 octeon_free_soft_command(oct, sc); in lio_vf_rep_send_soft_command()
113 struct octeon_device *oct; in lio_vf_rep_open() local
116 oct = vf_rep->oct; in lio_vf_rep_open()
123 ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, in lio_vf_rep_open()
[all …]
Docteon_mem_ops.c31 octeon_toggle_bar1_swapmode(struct octeon_device *oct, u32 idx) in octeon_toggle_bar1_swapmode() argument
35 mask = oct->fn_list.bar1_idx_read(oct, idx); in octeon_toggle_bar1_swapmode()
37 oct->fn_list.bar1_idx_write(oct, idx, mask); in octeon_toggle_bar1_swapmode()
40 #define octeon_toggle_bar1_swapmode(oct, idx) argument
44 octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr, in octeon_pci_fastwrite() argument
52 octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX); in octeon_pci_fastwrite()
61 octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX); in octeon_pci_fastwrite()
68 octeon_pci_fastread(struct octeon_device *oct, u8 __iomem *mapped_addr, in octeon_pci_fastread() argument
76 octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX); in octeon_pci_fastread()
85 octeon_toggle_bar1_swapmode(oct, MEMOPS_IDX); in octeon_pci_fastread()
[all …]
Dlio_ethtool.c219 struct octeon_device *oct = lio->oct_dev; in lio_get_link_ksettings() local
247 dev_dbg(&oct->pci_dev->dev, "ecmd->base.transceiver is XCVR_EXTERNAL\n"); in lio_get_link_ksettings()
250 dev_err(&oct->pci_dev->dev, "Unknown link interface mode: %d\n", in lio_get_link_ksettings()
260 if (oct->subsystem_id == OCTEON_CN2350_25GB_SUBSYS_ID || in lio_get_link_ksettings()
261 oct->subsystem_id == OCTEON_CN2360_25GB_SUBSYS_ID) { in lio_get_link_ksettings()
262 if (OCTEON_CN23XX_PF(oct)) { in lio_get_link_ksettings()
270 if (oct->no_speed_setting == 0) { in lio_get_link_ksettings()
282 if (oct->no_speed_setting == 0) { in lio_get_link_ksettings()
286 oct->speed_setting = 25; in lio_get_link_ksettings()
289 if (oct->speed_setting == 10) { in lio_get_link_ksettings()
[all …]
Docteon_droq.c140 octeon_droq_destroy_ring_buffers(struct octeon_device *oct, in octeon_droq_destroy_ring_buffers() argument
152 lio_unmap_ring(oct->pci_dev, in octeon_droq_destroy_ring_buffers()
167 octeon_droq_setup_ring_buffers(struct octeon_device *oct, in octeon_droq_setup_ring_buffers() argument
175 buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info); in octeon_droq_setup_ring_buffers()
178 dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n", in octeon_droq_setup_ring_buffers()
198 int octeon_delete_droq(struct octeon_device *oct, u32 q_no) in octeon_delete_droq() argument
200 struct octeon_droq *droq = oct->droq[q_no]; in octeon_delete_droq()
202 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); in octeon_delete_droq()
204 octeon_droq_destroy_ring_buffers(oct, droq); in octeon_delete_droq()
208 lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), in octeon_delete_droq()
[all …]
Dcn66xx_device.h68 int lio_cn6xxx_soft_reset(struct octeon_device *oct);
69 void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct);
70 void lio_cn6xxx_setup_pcie_mps(struct octeon_device *oct,
72 void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device *oct,
74 void lio_cn6xxx_setup_global_input_regs(struct octeon_device *oct);
75 void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct);
76 void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no);
77 void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no);
78 int lio_cn6xxx_enable_io_queues(struct octeon_device *oct);
79 void lio_cn6xxx_disable_io_queues(struct octeon_device *oct);
[all …]
Docteon_main.h73 void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac);
75 void octeon_schedule_rxq_oom_work(struct octeon_device *oct,
93 static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx) in octeon_unmap_pci_barx() argument
95 dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n", in octeon_unmap_pci_barx()
98 if (oct->mmio[baridx].done) in octeon_unmap_pci_barx()
99 iounmap(oct->mmio[baridx].hw_addr); in octeon_unmap_pci_barx()
101 if (oct->mmio[baridx].start) in octeon_unmap_pci_barx()
102 pci_release_region(oct->pci_dev, baridx * 2); in octeon_unmap_pci_barx()
111 static inline int octeon_map_pci_barx(struct octeon_device *oct, in octeon_map_pci_barx() argument
116 if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) { in octeon_map_pci_barx()
[all …]
Docteon_nic.c30 octeon_alloc_soft_command_resp(struct octeon_device *oct, in octeon_alloc_soft_command_resp() argument
41 octeon_alloc_soft_command(oct, 0, rdatasize, 0); in octeon_alloc_soft_command_resp()
52 if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) { in octeon_alloc_soft_command_resp()
68 rdp->pcie_port = oct->pcie_port; in octeon_alloc_soft_command_resp()
73 if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct)) in octeon_alloc_soft_command_resp()
83 int octnet_send_nic_data_pkt(struct octeon_device *oct, in octnet_send_nic_data_pkt() argument
89 return octeon_send_command(oct, ndata->q_no, ring_doorbell, &ndata->cmd, in octnet_send_nic_data_pkt()
95 *octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct, in octnic_alloc_ctrl_pkt_sc() argument
109 octeon_alloc_soft_command(oct, datasize, rdatasize, 0); in octnic_alloc_ctrl_pkt_sc()
127 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD, in octnic_alloc_ctrl_pkt_sc()
[all …]
Docteon_device.h212 typedef int (*octeon_console_print_fn)(struct octeon_device *oct,
423 struct octeon_device *oct; member
621 #define OCTEON_CN6XXX(oct) ({ \ argument
622 typeof(oct) _oct = (oct); \
625 #define OCTEON_CN23XX_PF(oct) ((oct)->chip_id == OCTEON_CN23XX_PF_VID) argument
626 #define OCTEON_CN23XX_VF(oct) ((oct)->chip_id == OCTEON_CN23XX_VF_VID) argument
627 #define CHIP_CONF(oct, TYPE) \ argument
628 (((struct octeon_ ## TYPE *)((oct)->chip))->conf)
638 void octeon_free_device_mem(struct octeon_device *oct);
655 int octeon_register_device(struct octeon_device *oct,
[all …]
Dresponse_manager.c30 int octeon_setup_response_list(struct octeon_device *oct) in octeon_setup_response_list() argument
36 INIT_LIST_HEAD(&oct->response_list[i].head); in octeon_setup_response_list()
37 spin_lock_init(&oct->response_list[i].lock); in octeon_setup_response_list()
38 atomic_set(&oct->response_list[i].pending_req_count, 0); in octeon_setup_response_list()
40 spin_lock_init(&oct->cmd_resp_wqlock); in octeon_setup_response_list()
42 oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0); in octeon_setup_response_list()
43 if (!oct->dma_comp_wq.wq) { in octeon_setup_response_list()
44 dev_err(&oct->pci_dev->dev, "failed to create wq thread\n"); in octeon_setup_response_list()
48 cwq = &oct->dma_comp_wq; in octeon_setup_response_list()
50 cwq->wk.ctxptr = oct; in octeon_setup_response_list()
[all …]
Docteon_mailbox.c131 int octeon_mbox_write(struct octeon_device *oct, in octeon_mbox_write() argument
134 struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no]; in octeon_mbox_write()
209 static void get_vf_stats(struct octeon_device *oct, in get_vf_stats() argument
214 for (i = 0; i < oct->num_iqs; i++) { in get_vf_stats()
215 if (!oct->instr_queue[i]) in get_vf_stats()
217 stats->tx_packets += oct->instr_queue[i]->stats.tx_done; in get_vf_stats()
218 stats->tx_bytes += oct->instr_queue[i]->stats.tx_tot_bytes; in get_vf_stats()
221 for (i = 0; i < oct->num_oqs; i++) { in get_vf_stats()
222 if (!oct->droq[i]) in get_vf_stats()
224 stats->rx_packets += oct->droq[i]->stats.rx_pkts_received; in get_vf_stats()
[all …]
Docteon_iq.h327 int octeon_setup_sc_buffer_pool(struct octeon_device *oct);
328 int octeon_free_sc_done_list(struct octeon_device *oct);
329 int octeon_free_sc_zombie_list(struct octeon_device *oct);
330 int octeon_free_sc_buffer_pool(struct octeon_device *oct);
332 octeon_alloc_soft_command(struct octeon_device *oct,
335 void octeon_free_soft_command(struct octeon_device *oct,
364 int lio_wait_for_instr_fetch(struct octeon_device *oct);
367 octeon_ring_doorbell_locked(struct octeon_device *oct, u32 iq_no);
370 octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
374 lio_process_iq_request_list(struct octeon_device *oct,
[all …]
Docteon_nic.h112 static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no) in octnet_iq_is_full() argument
114 return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending) in octnet_iq_is_full()
115 >= (oct->instr_queue[q_no]->max_count - 2)); in octnet_iq_is_full()
119 octnet_prepare_pci_cmd_o2(struct octeon_device *oct, in octnet_prepare_pci_cmd_o2() argument
140 port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port; in octnet_prepare_pci_cmd_o2()
173 octnet_prepare_pci_cmd_o3(struct octeon_device *oct, in octnet_prepare_pci_cmd_o3() argument
191 ih3->pkind = oct->instr_queue[setup->s.iq_no]->txpciq.s.pkind; in octnet_prepare_pci_cmd_o3()
206 pki_ih3->uqpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.use_qpg; in octnet_prepare_pci_cmd_o3()
208 port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port; in octnet_prepare_pci_cmd_o3()
216 pki_ih3->qpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.qpg; in octnet_prepare_pci_cmd_o3()
[all …]
Dcn23xx_pf_device.h55 int setup_cn23xx_octeon_pf_device(struct octeon_device *oct);
57 int validate_cn23xx_pf_config_info(struct octeon_device *oct,
60 u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
62 void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct);
64 int cn23xx_sriov_config(struct octeon_device *oct);
66 int cn23xx_fw_loaded(struct octeon_device *oct);
68 void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx,
71 int cn23xx_get_vf_stats(struct octeon_device *oct, int ifidx,
Docteon_network.h225 int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs);
229 int lio_wait_for_clean_oq(struct octeon_device *oct);
238 int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_qs);
260 *recv_buffer_alloc(struct octeon_device *oct, in recv_buffer_alloc() argument
286 pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0, in recv_buffer_alloc()
290 if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) { in recv_buffer_alloc()
331 recv_buffer_recycle(struct octeon_device *oct, void *buf) in recv_buffer_recycle() argument
336 dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n", in recv_buffer_recycle()
343 dma_unmap_page(&oct->pci_dev->dev, in recv_buffer_recycle()
363 *recv_buffer_reuse(struct octeon_device *oct, void *buf) in recv_buffer_reuse() argument
[all …]
Dcn23xx_vf_device.h39 void cn23xx_vf_ask_pf_to_do_flr(struct octeon_device *oct);
41 int cn23xx_octeon_pfvf_handshake(struct octeon_device *oct);
43 int cn23xx_setup_octeon_vf_device(struct octeon_device *oct);
45 u32 cn23xx_vf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us);
47 void cn23xx_dump_vf_initialized_regs(struct octeon_device *oct);

12