Home
last modified time | relevance | path

Searched full:fifo (Results 1 – 25 of 3377) sorted by relevance

12345678910>>...136

/kernel/linux/linux-5.10/drivers/staging/fwserial/
Ddma_fifo.c3 * DMA-able FIFO implementation
21 #define FAIL(fifo, condition, format...) ({ \ argument
22 fifo->corrupt = !!(condition); \
23 WARN(fifo->corrupt, format); \
35 * dma_fifo_init: initialize the fifo to a valid but inoperative state
36 * @fifo: address of in-place "struct dma_fifo" object
38 void dma_fifo_init(struct dma_fifo *fifo) in dma_fifo_init() argument
40 memset(fifo, 0, sizeof(*fifo)); in dma_fifo_init()
41 INIT_LIST_HEAD(&fifo->pending); in dma_fifo_init()
46 * @fifo: address of in-place "struct dma_fifo" object
[all …]
Ddma_fifo.h3 * DMA-able FIFO interface
12 * The design basis for the DMA FIFO is to provide an output side that
16 * for the lifetime of the FIFO).
20 * the FIFO will only advance the output in the original input sequence.
21 * This means the FIFO will eventually stall if a transaction is never retired.
23 * Chunking the output side into cache line multiples means that some FIFO
28 * This potential waste requires additional hidden capacity within the FIFO
32 * fragmentation when wrapping at the end of the FIFO. Input is allowed into the
33 * guard area, but the in and out FIFO markers are wrapped when DMA is pended.
45 int size; /* 'apparent' size of fifo */
[all …]
/kernel/linux/linux-4.19/drivers/staging/fwserial/
Ddma_fifo.c3 * DMA-able FIFO implementation
21 #define FAIL(fifo, condition, format...) ({ \ argument
22 fifo->corrupt = !!(condition); \
23 WARN(fifo->corrupt, format); \
35 * dma_fifo_init: initialize the fifo to a valid but inoperative state
36 * @fifo: address of in-place "struct dma_fifo" object
38 void dma_fifo_init(struct dma_fifo *fifo) in dma_fifo_init() argument
40 memset(fifo, 0, sizeof(*fifo)); in dma_fifo_init()
41 INIT_LIST_HEAD(&fifo->pending); in dma_fifo_init()
46 * @fifo: address of in-place "struct dma_fifo" object
[all …]
Ddma_fifo.h3 * DMA-able FIFO interface
12 * The design basis for the DMA FIFO is to provide an output side that
16 * for the lifetime of the FIFO).
20 * the FIFO will only advance the output in the original input sequence.
21 * This means the FIFO will eventually stall if a transaction is never retired.
23 * Chunking the output side into cache line multiples means that some FIFO
28 * This potential waste requires additional hidden capacity within the FIFO
32 * fragmentation when wrapping at the end of the FIFO. Input is allowed into the
33 * guard area, but the in and out FIFO markers are wrapped when DMA is pended.
45 int size; /* 'apparent' size of fifo */
[all …]
/kernel/linux/linux-5.10/lib/
Dkfifo.c3 * A generic kernel FIFO implementation
17 * internal helper to calculate the unused elements in a fifo
19 static inline unsigned int kfifo_unused(struct __kfifo *fifo) in kfifo_unused() argument
21 return (fifo->mask + 1) - (fifo->in - fifo->out); in kfifo_unused()
24 int __kfifo_alloc(struct __kfifo *fifo, unsigned int size, in __kfifo_alloc() argument
33 fifo->in = 0; in __kfifo_alloc()
34 fifo->out = 0; in __kfifo_alloc()
35 fifo->esize = esize; in __kfifo_alloc()
38 fifo->data = NULL; in __kfifo_alloc()
39 fifo->mask = 0; in __kfifo_alloc()
[all …]
/kernel/linux/linux-4.19/lib/
Dkfifo.c2 * A generic kernel FIFO implementation
31 * internal helper to calculate the unused elements in a fifo
33 static inline unsigned int kfifo_unused(struct __kfifo *fifo) in kfifo_unused() argument
35 return (fifo->mask + 1) - (fifo->in - fifo->out); in kfifo_unused()
38 int __kfifo_alloc(struct __kfifo *fifo, unsigned int size, in __kfifo_alloc() argument
47 fifo->in = 0; in __kfifo_alloc()
48 fifo->out = 0; in __kfifo_alloc()
49 fifo->esize = esize; in __kfifo_alloc()
52 fifo->data = NULL; in __kfifo_alloc()
53 fifo->mask = 0; in __kfifo_alloc()
[all …]
/kernel/linux/linux-5.10/include/linux/
Dkfifo.h3 * A generic kernel FIFO implementation
12 * How to porting drivers to the new generic FIFO API:
31 * and one writer is using the fifo and no kfifo_reset() will be called.
98 * helper macro to distinguish between real in place fifo where the fifo
99 * array is a part of the structure and the fifo type where the array is
100 * outside of the fifo structure.
102 #define __is_kfifo_ptr(fifo) \ argument
103 (sizeof(*fifo) == sizeof(STRUCT_KFIFO_PTR(typeof(*(fifo)->type))))
106 * DECLARE_KFIFO_PTR - macro to declare a fifo pointer object
107 * @fifo: name of the declared fifo
[all …]
/kernel/linux/linux-4.19/include/linux/
Dkfifo.h2 * A generic kernel FIFO implementation
26 * How to porting drivers to the new generic FIFO API:
45 * and one writer is using the fifo and no kfifo_reset() will be called.
112 * helper macro to distinguish between real in place fifo where the fifo
113 * array is a part of the structure and the fifo type where the array is
114 * outside of the fifo structure.
116 #define __is_kfifo_ptr(fifo) \ argument
117 (sizeof(*fifo) == sizeof(STRUCT_KFIFO_PTR(typeof(*(fifo)->type))))
120 * DECLARE_KFIFO_PTR - macro to declare a fifo pointer object
121 * @fifo: name of the declared fifo
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/engine/fifo/
Dbase.c37 nvkm_fifo_recover_chan(struct nvkm_fifo *fifo, int chid) in nvkm_fifo_recover_chan() argument
40 if (WARN_ON(!fifo->func->recover_chan)) in nvkm_fifo_recover_chan()
42 spin_lock_irqsave(&fifo->lock, flags); in nvkm_fifo_recover_chan()
43 fifo->func->recover_chan(fifo, chid); in nvkm_fifo_recover_chan()
44 spin_unlock_irqrestore(&fifo->lock, flags); in nvkm_fifo_recover_chan()
48 nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags) in nvkm_fifo_pause() argument
50 return fifo->func->pause(fifo, flags); in nvkm_fifo_pause()
54 nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags) in nvkm_fifo_start() argument
56 return fifo->func->start(fifo, flags); in nvkm_fifo_start()
60 nvkm_fifo_fault(struct nvkm_fifo *fifo, struct nvkm_fault_data *info) in nvkm_fifo_fault() argument
[all …]
DKbuild2 nvkm-y += nvkm/engine/fifo/base.o
3 nvkm-y += nvkm/engine/fifo/nv04.o
4 nvkm-y += nvkm/engine/fifo/nv10.o
5 nvkm-y += nvkm/engine/fifo/nv17.o
6 nvkm-y += nvkm/engine/fifo/nv40.o
7 nvkm-y += nvkm/engine/fifo/nv50.o
8 nvkm-y += nvkm/engine/fifo/g84.o
9 nvkm-y += nvkm/engine/fifo/gf100.o
10 nvkm-y += nvkm/engine/fifo/gk104.o
11 nvkm-y += nvkm/engine/fifo/gk110.o
[all …]
Dgk104.c52 gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn, in gk104_fifo_engine_status() argument
55 struct nvkm_engine *engine = fifo->engine[engn].engine; in gk104_fifo_engine_status()
56 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_engine_status()
102 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_class_new() local
103 if (oclass->engn == &fifo->func->chan) { in gk104_fifo_class_new()
105 return user->ctor(fifo, oclass, argv, argc, pobject); in gk104_fifo_class_new()
107 if (oclass->engn == &fifo->func->user) { in gk104_fifo_class_new()
119 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_class_get() local
122 if (fifo->func->user.ctor && c++ == index) { in gk104_fifo_class_get()
123 oclass->base = fifo->func->user.user; in gk104_fifo_class_get()
[all …]
Dgf100.c37 gf100_fifo_uevent_init(struct nvkm_fifo *fifo) in gf100_fifo_uevent_init() argument
39 struct nvkm_device *device = fifo->engine.subdev.device; in gf100_fifo_uevent_init()
44 gf100_fifo_uevent_fini(struct nvkm_fifo *fifo) in gf100_fifo_uevent_fini() argument
46 struct nvkm_device *device = fifo->engine.subdev.device; in gf100_fifo_uevent_fini()
51 gf100_fifo_runlist_commit(struct gf100_fifo *fifo) in gf100_fifo_runlist_commit() argument
54 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gf100_fifo_runlist_commit()
61 cur = fifo->runlist.mem[fifo->runlist.active]; in gf100_fifo_runlist_commit()
62 fifo->runlist.active = !fifo->runlist.active; in gf100_fifo_runlist_commit()
65 list_for_each_entry(chan, &fifo->chan, head) { in gf100_fifo_runlist_commit()
85 if (wait_event_timeout(fifo->runlist.wait, in gf100_fifo_runlist_commit()
[all …]
/kernel/linux/linux-4.19/drivers/gpu/drm/nouveau/nvkm/engine/fifo/
Dbase.c37 nvkm_fifo_recover_chan(struct nvkm_fifo *fifo, int chid) in nvkm_fifo_recover_chan() argument
40 if (WARN_ON(!fifo->func->recover_chan)) in nvkm_fifo_recover_chan()
42 spin_lock_irqsave(&fifo->lock, flags); in nvkm_fifo_recover_chan()
43 fifo->func->recover_chan(fifo, chid); in nvkm_fifo_recover_chan()
44 spin_unlock_irqrestore(&fifo->lock, flags); in nvkm_fifo_recover_chan()
48 nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags) in nvkm_fifo_pause() argument
50 return fifo->func->pause(fifo, flags); in nvkm_fifo_pause()
54 nvkm_fifo_start(struct nvkm_fifo *fifo, unsigned long *flags) in nvkm_fifo_start() argument
56 return fifo->func->start(fifo, flags); in nvkm_fifo_start()
60 nvkm_fifo_fault(struct nvkm_fifo *fifo, struct nvkm_fault_data *info) in nvkm_fifo_fault() argument
[all …]
DKbuild1 nvkm-y += nvkm/engine/fifo/base.o
2 nvkm-y += nvkm/engine/fifo/nv04.o
3 nvkm-y += nvkm/engine/fifo/nv10.o
4 nvkm-y += nvkm/engine/fifo/nv17.o
5 nvkm-y += nvkm/engine/fifo/nv40.o
6 nvkm-y += nvkm/engine/fifo/nv50.o
7 nvkm-y += nvkm/engine/fifo/g84.o
8 nvkm-y += nvkm/engine/fifo/gf100.o
9 nvkm-y += nvkm/engine/fifo/gk104.o
10 nvkm-y += nvkm/engine/fifo/gk110.o
[all …]
Dgk104.c52 gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn, in gk104_fifo_engine_status() argument
55 struct nvkm_engine *engine = fifo->engine[engn].engine; in gk104_fifo_engine_status()
56 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gk104_fifo_engine_status()
102 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_class_new() local
103 if (oclass->engn == &fifo->func->chan) { in gk104_fifo_class_new()
105 return user->ctor(fifo, oclass, argv, argc, pobject); in gk104_fifo_class_new()
107 if (oclass->engn == &fifo->func->user) { in gk104_fifo_class_new()
119 struct gk104_fifo *fifo = gk104_fifo(base); in gk104_fifo_class_get() local
122 if (fifo->func->user.ctor && c++ == index) { in gk104_fifo_class_get()
123 oclass->base = fifo->func->user.user; in gk104_fifo_class_get()
[all …]
Dgf100.c36 gf100_fifo_uevent_init(struct nvkm_fifo *fifo) in gf100_fifo_uevent_init() argument
38 struct nvkm_device *device = fifo->engine.subdev.device; in gf100_fifo_uevent_init()
43 gf100_fifo_uevent_fini(struct nvkm_fifo *fifo) in gf100_fifo_uevent_fini() argument
45 struct nvkm_device *device = fifo->engine.subdev.device; in gf100_fifo_uevent_fini()
50 gf100_fifo_runlist_commit(struct gf100_fifo *fifo) in gf100_fifo_runlist_commit() argument
53 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; in gf100_fifo_runlist_commit()
60 cur = fifo->runlist.mem[fifo->runlist.active]; in gf100_fifo_runlist_commit()
61 fifo->runlist.active = !fifo->runlist.active; in gf100_fifo_runlist_commit()
64 list_for_each_entry(chan, &fifo->chan, head) { in gf100_fifo_runlist_commit()
84 if (wait_event_timeout(fifo->runlist.wait, in gf100_fifo_runlist_commit()
[all …]
/kernel/linux/linux-4.19/drivers/staging/axis-fifo/
Daxis-fifo.c3 * Xilinx AXIS FIFO: interface to the Xilinx AXI-Stream FIFO IP core
94 #define XLLF_INT_TFPF_MASK 0x00400000 /* Tx FIFO Programmable Full */
95 #define XLLF_INT_TFPE_MASK 0x00200000 /* Tx FIFO Programmable Empty */
96 #define XLLF_INT_RFPF_MASK 0x00100000 /* Rx FIFO Programmable Full */
97 #define XLLF_INT_RFPE_MASK 0x00080000 /* Rx FIFO Programmable Empty */
133 unsigned int rx_fifo_depth; /* max words in the receive fifo */
134 unsigned int tx_fifo_depth; /* max words in the transmit fifo */
135 int has_rx_fifo; /* whether the IP has the rx fifo enabled */
136 int has_tx_fifo; /* whether the IP has the tx fifo enabled */
159 struct axis_fifo *fifo = dev_get_drvdata(dev); in sysfs_write() local
[all …]
/kernel/linux/linux-5.10/sound/soc/meson/
Daxg-fifo.c16 #include "axg-fifo.h"
20 * capture frontend DAI. The logic behind this two types of fifo is very
67 static void __dma_enable(struct axg_fifo *fifo, bool enable) in __dma_enable() argument
69 regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_DMA_EN, in __dma_enable()
76 struct axg_fifo *fifo = axg_fifo_data(ss); in axg_fifo_pcm_trigger() local
82 __dma_enable(fifo, true); in axg_fifo_pcm_trigger()
87 __dma_enable(fifo, false); in axg_fifo_pcm_trigger()
100 struct axg_fifo *fifo = axg_fifo_data(ss); in axg_fifo_pcm_pointer() local
104 regmap_read(fifo->map, FIFO_STATUS2, &addr); in axg_fifo_pcm_pointer()
115 struct axg_fifo *fifo = axg_fifo_data(ss); in axg_fifo_pcm_hw_params() local
[all …]
Daiu-fifo.c12 #include "aiu-fifo.h"
36 struct aiu_fifo *fifo = dai->playback_dma_data; in aiu_fifo_pointer() local
40 addr = snd_soc_component_read(component, fifo->mem_offset + AIU_MEM_RD); in aiu_fifo_pointer()
48 struct aiu_fifo *fifo = dai->playback_dma_data; in aiu_fifo_enable() local
53 fifo->mem_offset + AIU_MEM_CONTROL, in aiu_fifo_enable()
82 struct aiu_fifo *fifo = dai->playback_dma_data; in aiu_fifo_prepare() local
85 fifo->mem_offset + AIU_MEM_CONTROL, in aiu_fifo_prepare()
89 fifo->mem_offset + AIU_MEM_CONTROL, in aiu_fifo_prepare()
100 struct aiu_fifo *fifo = dai->playback_dma_data; in aiu_fifo_hw_params() local
108 /* Setup the fifo boundaries */ in aiu_fifo_hw_params()
[all …]
/kernel/linux/linux-5.10/drivers/staging/axis-fifo/
Daxis-fifo.c3 * Xilinx AXIS FIFO: interface to the Xilinx AXI-Stream FIFO IP core
92 #define XLLF_INT_TFPF_MASK 0x00400000 /* Tx FIFO Programmable Full */
93 #define XLLF_INT_TFPE_MASK 0x00200000 /* Tx FIFO Programmable Empty */
94 #define XLLF_INT_RFPF_MASK 0x00100000 /* Rx FIFO Programmable Full */
95 #define XLLF_INT_RFPE_MASK 0x00080000 /* Rx FIFO Programmable Empty */
130 unsigned int rx_fifo_depth; /* max words in the receive fifo */
131 unsigned int tx_fifo_depth; /* max words in the transmit fifo */
132 int has_rx_fifo; /* whether the IP has the rx fifo enabled */
133 int has_tx_fifo; /* whether the IP has the tx fifo enabled */
156 struct axis_fifo *fifo = dev_get_drvdata(dev); in sysfs_write() local
[all …]
/kernel/linux/linux-4.19/sound/soc/meson/
Daxg-fifo.c16 #include "axg-fifo.h"
20 * capture frontend DAI. The logic behind this two types of fifo is very
67 static void __dma_enable(struct axg_fifo *fifo, bool enable) in __dma_enable() argument
69 regmap_update_bits(fifo->map, FIFO_CTRL0, CTRL0_DMA_EN, in __dma_enable()
75 struct axg_fifo *fifo = axg_fifo_data(ss); in axg_fifo_pcm_trigger() local
81 __dma_enable(fifo, true); in axg_fifo_pcm_trigger()
86 __dma_enable(fifo, false); in axg_fifo_pcm_trigger()
97 struct axg_fifo *fifo = axg_fifo_data(ss); in axg_fifo_pcm_pointer() local
101 regmap_read(fifo->map, FIFO_STATUS2, &addr); in axg_fifo_pcm_pointer()
110 struct axg_fifo *fifo = axg_fifo_data(ss); in axg_fifo_pcm_hw_params() local
[all …]
/kernel/linux/linux-4.19/drivers/usb/renesas_usbhs/
Dfifo.c98 struct usbhs_fifo *fifo);
99 static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
107 struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe); in usbhs_pkt_pop() local
121 if (fifo) in usbhs_pkt_pop()
122 chan = usbhsf_dma_chan_get(fifo, pkt); in usbhs_pkt_pop()
133 if (fifo) in usbhs_pkt_pop()
134 usbhsf_fifo_unselect(pipe, fifo); in usbhs_pkt_pop()
245 * FIFO ctrl
248 struct usbhs_fifo *fifo) in usbhsf_send_terminator() argument
252 usbhs_bset(priv, fifo->ctr, BVAL, BVAL); in usbhsf_send_terminator()
[all …]
/kernel/linux/linux-5.10/drivers/usb/renesas_usbhs/
Dfifo.c98 struct usbhs_fifo *fifo);
99 static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
109 struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe); in usbhs_pkt_pop() local
123 if (fifo) in usbhs_pkt_pop()
124 chan = usbhsf_dma_chan_get(fifo, pkt); in usbhs_pkt_pop()
141 if (fifo) in usbhs_pkt_pop()
142 usbhsf_fifo_unselect(pipe, fifo); in usbhs_pkt_pop()
253 * FIFO ctrl
256 struct usbhs_fifo *fifo) in usbhsf_send_terminator() argument
260 usbhs_bset(priv, fifo->ctr, BVAL, BVAL); in usbhsf_send_terminator()
[all …]
/kernel/linux/linux-5.10/drivers/iio/imu/inv_icm42600/
Dinv_icm42600_buffer.c19 /* FIFO header: 1 byte */
51 /* FIFO empty */ in inv_icm42600_fifo_decode_packet()
104 if (st->fifo.en & INV_ICM42600_SENSOR_GYRO) in inv_icm42600_buffer_update_fifo_period()
109 if (st->fifo.en & INV_ICM42600_SENSOR_ACCEL) in inv_icm42600_buffer_update_fifo_period()
119 st->fifo.period = period; in inv_icm42600_buffer_update_fifo_period()
128 /* update only FIFO EN bits */ in inv_icm42600_buffer_set_fifo_en()
146 st->fifo.en = fifo_en; in inv_icm42600_buffer_set_fifo_en()
181 * inv_icm42600_buffer_update_watermark - update watermark FIFO threshold
186 * FIFO watermark threshold is computed based on the required watermark values
199 * to the FIFO frequency. Beware that this is only true because we are not
[all …]
/kernel/linux/linux-5.10/drivers/platform/mellanox/
Dmlxbf-tmfifo.c38 /* Virtual devices sharing the TM FIFO. */
67 * @fifo: pointer to the tmfifo structure
83 struct mlxbf_tmfifo *fifo; member
113 * @tx_buf: tx buffer used to buffer data before writing into the FIFO
129 * @fifo: pointer to the tmfifo structure
134 struct mlxbf_tmfifo *fifo; member
143 * @rx_base: mapped register base address for the Rx FIFO
144 * @tx_base: mapped register base address for the Tx FIFO
145 * @rx_fifo_size: number of entries of the Rx FIFO
146 * @tx_fifo_size: number of entries of the Tx FIFO
[all …]

12345678910>>...136