1 /*
2 V4L2 API compliance buffer ioctl tests.
3
4 Copyright (C) 2012 Hans Verkuil <hverkuil@xs4all.nl>
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
19 */
20
21 #include <algorithm>
22 #include <atomic>
23 #include <csignal>
24 #include <map>
25 #include <set>
26 #include <vector>
27
28 #include <arpa/inet.h>
29 #include <poll.h>
30 #include <pthread.h>
31 #include <sys/epoll.h>
32
33 #include "v4l2-compliance.h"
34
35 static cv4l_fmt cur_fmt;
36 static cv4l_fmt cur_m2m_fmt;
37 static int stream_from_fd = -1;
38 static bool stream_use_hdr;
39 static unsigned max_bytesused[VIDEO_MAX_PLANES];
40 static unsigned min_data_offset[VIDEO_MAX_PLANES];
41
operator <(struct timeval const & n1,struct timeval const & n2)42 static bool operator<(struct timeval const& n1, struct timeval const& n2)
43 {
44 return n1.tv_sec < n2.tv_sec ||
45 (n1.tv_sec == n2.tv_sec && n1.tv_usec < n2.tv_usec);
46 }
47
48 struct buf_seq {
buf_seqbuf_seq49 buf_seq() { init(); }
initbuf_seq50 void init()
51 {
52 last_seq = -1;
53 field_nr = 1;
54 }
55
56 int last_seq;
57 unsigned last_field;
58 unsigned field_nr;
59 };
60
61 static struct buf_seq last_seq, last_m2m_seq;
62
63 static int buf_req_fds[VIDEO_MAX_FRAME * 2];
64
named_ioctl_fd(int fd,bool trace,const char * cmd_name,unsigned long cmd,void * arg)65 static inline int named_ioctl_fd(int fd, bool trace, const char *cmd_name, unsigned long cmd, void *arg)
66 {
67 int retval;
68 int e;
69
70 retval = ioctl(fd, cmd, arg);
71 e = retval == 0 ? 0 : errno;
72 if (trace)
73 fprintf(stderr, "\t\t%s returned %d (%s)\n",
74 cmd_name, retval, strerror(e));
75 return retval == -1 ? e : (retval ? -1 : 0);
76 }
77 #define doioctl_fd(fd, r, p) named_ioctl_fd((fd), node->g_trace(), #r, r, p)
78
79 enum QueryBufMode {
80 Unqueued,
81 Prepared,
82 Queued,
83 Dequeued
84 };
85
86 using buf_info_map = std::map<struct timeval, struct v4l2_buffer>;
87 static buf_info_map buffer_info;
88
89 #define FILE_HDR_ID v4l2_fourcc('V', 'h', 'd', 'r')
90
stream_close()91 static void stream_close()
92 {
93 if (stream_from_fd >= 0) {
94 close(stream_from_fd);
95 stream_from_fd = -1;
96 }
97 }
98
stream_for_fmt(__u32 pixelformat)99 static void stream_for_fmt(__u32 pixelformat)
100 {
101 stream_close();
102
103 std::string file = stream_from(fcc2s(pixelformat), stream_use_hdr);
104 if (file.empty())
105 return;
106
107 stream_from_fd = open(file.c_str(), O_RDONLY);
108
109 if (stream_from_fd < 0)
110 fprintf(stderr, "cannot open file %s\n", file.c_str());
111 }
112
stream_reset()113 static void stream_reset()
114 {
115 if (stream_from_fd >= 0)
116 lseek(stream_from_fd, 0, SEEK_SET);
117 }
118
fill_output_buffer(const cv4l_queue & q,cv4l_buffer & buf,bool first_run=true)119 static bool fill_output_buffer(const cv4l_queue &q, cv4l_buffer &buf, bool first_run = true)
120 {
121 bool seek = false;
122
123 if (stream_from_fd < 0)
124 return true;
125
126 if (stream_use_hdr) {
127 __u32 v;
128
129 if (read(stream_from_fd, &v, sizeof(v)) != sizeof(v))
130 seek = true;
131 else if (ntohl(v) != FILE_HDR_ID) {
132 fprintf(stderr, "Unknown header ID\n");
133 return false;
134 }
135 }
136
137 for (unsigned p = 0; !seek && p < buf.g_num_planes(); p++) {
138 __u32 len = buf.g_length(p);
139
140 buf.s_bytesused(len, p);
141 buf.s_data_offset(0, p);
142 if (stream_from_fd < 0)
143 continue;
144
145 if (!stream_use_hdr) {
146 ssize_t sz = read(stream_from_fd, q.g_dataptr(buf.g_index(), p), len);
147
148 if (sz < static_cast<ssize_t>(len)) {
149 seek = true;
150 break;
151 }
152 continue;
153 }
154 __u32 bytesused;
155
156 if (read(stream_from_fd, &bytesused, sizeof(bytesused)) != sizeof(bytesused)) {
157 seek = true;
158 break;
159 }
160 bytesused = ntohl(bytesused);
161 if (bytesused > len) {
162 fprintf(stderr, "plane size is too large (%u > %u)\n",
163 bytesused, len);
164 return false;
165 }
166 buf.s_bytesused(bytesused, p);
167
168 ssize_t sz = read(stream_from_fd, q.g_dataptr(buf.g_index(), p), bytesused);
169
170 if (sz < static_cast<ssize_t>(bytesused)) {
171 seek = true;
172 break;
173 }
174 }
175 if (!seek)
176 return true;
177 if (!first_run)
178 return false;
179
180 stream_reset();
181 if (fill_output_buffer(q, buf, false))
182 return true;
183
184 close(stream_from_fd);
185 stream_from_fd = -1;
186
187 unsigned size = 0;
188
189 for (unsigned p = 0; p < buf.g_num_planes(); p++)
190 size += buf.g_length(p);
191
192 if (stream_use_hdr)
193 fprintf(stderr, "the input file is too small\n");
194 else
195 fprintf(stderr, "the input file is smaller than %d bytes\n", size);
196 return false;
197 }
198
199 class buffer : public cv4l_buffer {
200 public:
buffer(unsigned type=0,unsigned memory=0,unsigned index=0)201 explicit buffer(unsigned type = 0, unsigned memory = 0, unsigned index = 0) :
202 cv4l_buffer(type, memory, index) {}
buffer(const cv4l_queue & q,unsigned index=0)203 explicit buffer(const cv4l_queue &q, unsigned index = 0) :
204 cv4l_buffer(q, index) {}
buffer(const cv4l_buffer & b)205 explicit buffer(const cv4l_buffer &b) : cv4l_buffer(b) {}
206
querybuf(node * node,unsigned index)207 int querybuf(node *node, unsigned index)
208 {
209 return node->querybuf(*this, index);
210 }
prepare_buf(node * node,bool fill_bytesused=true)211 int prepare_buf(node *node, bool fill_bytesused = true)
212 {
213 if (v4l_type_is_output(g_type()))
214 fill_output_buf(fill_bytesused);
215 int err = node->prepare_buf(*this);
216 if (err == 0 &&
217 v4l_type_is_video(g_type()) && v4l_type_is_output(g_type()))
218 fail_on_test(g_field() == V4L2_FIELD_ANY);
219 return err;
220 }
prepare_buf(node * node,const cv4l_queue & q)221 int prepare_buf(node *node, const cv4l_queue &q)
222 {
223 if (v4l_type_is_output(g_type()))
224 fill_output_buffer(q, *this);
225 return prepare_buf(node, false);
226 }
dqbuf(node * node)227 int dqbuf(node *node)
228 {
229 return node->dqbuf(*this);
230 }
qbuf(node * node,bool fill_bytesused=true)231 int qbuf(node *node, bool fill_bytesused = true)
232 {
233 int err;
234
235 if (v4l_type_is_output(g_type()))
236 fill_output_buf(fill_bytesused);
237 err = node->qbuf(*this);
238 if (err == 0 &&
239 v4l_type_is_video(g_type()) && v4l_type_is_output(g_type())) {
240 fail_on_test(g_field() == V4L2_FIELD_ANY);
241 buffer_info[g_timestamp()] = buf;
242 }
243 return err;
244 }
qbuf(node * node,const cv4l_queue & q)245 int qbuf(node *node, const cv4l_queue &q)
246 {
247 if (v4l_type_is_output(g_type()))
248 fill_output_buffer(q, *this);
249 return qbuf(node, false);
250 }
check(const cv4l_queue & q,enum QueryBufMode mode,bool is_m2m=false)251 int check(const cv4l_queue &q, enum QueryBufMode mode, bool is_m2m = false)
252 {
253 int ret = check(q.g_type(), q.g_memory(), g_index(), mode, last_seq, is_m2m);
254
255 if (!ret)
256 ret = check_planes(q, mode);
257 return ret;
258 }
check(const cv4l_queue & q,enum QueryBufMode mode,__u32 index,bool is_m2m=false)259 int check(const cv4l_queue &q, enum QueryBufMode mode, __u32 index, bool is_m2m = false)
260 {
261 int ret = check(q.g_type(), q.g_memory(), index, mode, last_seq, is_m2m);
262
263 if (!ret)
264 ret = check_planes(q, mode);
265 return ret;
266 }
check(const cv4l_queue & q,buf_seq & seq,bool is_m2m=false)267 int check(const cv4l_queue &q, buf_seq &seq, bool is_m2m = false)
268 {
269 int ret = check(q.g_type(), q.g_memory(), g_index(), Dequeued, seq, is_m2m);
270
271 if (!ret)
272 ret = check_planes(q, Dequeued);
273 return ret;
274 }
check(enum QueryBufMode mode,__u32 index,bool is_m2m=false)275 int check(enum QueryBufMode mode, __u32 index, bool is_m2m = false)
276 {
277 return check(g_type(), g_memory(), index, mode, last_seq, is_m2m);
278 }
check(buf_seq & seq,bool is_m2m=false)279 int check(buf_seq &seq, bool is_m2m = false)
280 {
281 return check(g_type(), g_memory(), g_index(), Dequeued, seq, is_m2m);
282 }
283
284 private:
285 int check(unsigned type, unsigned memory, unsigned index,
286 enum QueryBufMode mode, struct buf_seq &seq, bool is_m2m);
287 int check_planes(const cv4l_queue &q, enum QueryBufMode mode);
288 void fill_output_buf(bool fill_bytesused);
289 };
290
fill_output_buf(bool fill_bytesused=true)291 void buffer::fill_output_buf(bool fill_bytesused = true)
292 {
293 timespec ts;
294 v4l2_timecode tc;
295
296 clock_gettime(CLOCK_MONOTONIC, &ts);
297 if (ts_is_copy()) {
298 s_timestamp_ts(ts);
299 s_timestamp_src(V4L2_BUF_FLAG_TSTAMP_SRC_SOE);
300 }
301 s_flags(g_flags() | V4L2_BUF_FLAG_TIMECODE);
302 tc.type = V4L2_TC_TYPE_30FPS;
303 tc.flags = V4L2_TC_USERBITS_8BITCHARS;
304 tc.frames = ts.tv_nsec * 30 / 1000000000;
305 tc.seconds = ts.tv_sec % 60;
306 tc.minutes = (ts.tv_sec / 60) % 60;
307 tc.hours = (ts.tv_sec / 3600) % 30;
308 tc.userbits[0] = 't';
309 tc.userbits[1] = 'e';
310 tc.userbits[2] = 's';
311 tc.userbits[3] = 't';
312 s_timecode(tc);
313 s_field(V4L2_FIELD_ANY);
314 if (!fill_bytesused)
315 return;
316 for (unsigned p = 0; p < g_num_planes(); p++) {
317 s_bytesused(g_length(p), p);
318 s_data_offset(0, p);
319 }
320 }
321
check_planes(const cv4l_queue & q,enum QueryBufMode mode)322 int buffer::check_planes(const cv4l_queue &q, enum QueryBufMode mode)
323 {
324 if (mode == Dequeued || mode == Prepared) {
325 for (unsigned p = 0; p < g_num_planes(); p++) {
326 if (g_memory() == V4L2_MEMORY_USERPTR)
327 fail_on_test(g_userptr(p) != q.g_userptr(g_index(), p));
328 else if (g_memory() == V4L2_MEMORY_DMABUF)
329 fail_on_test(g_fd(p) != q.g_fd(g_index(), p));
330 }
331 }
332 return 0;
333 }
334
check(unsigned type,unsigned memory,unsigned index,enum QueryBufMode mode,struct buf_seq & seq,bool is_m2m)335 int buffer::check(unsigned type, unsigned memory, unsigned index,
336 enum QueryBufMode mode, struct buf_seq &seq, bool is_m2m)
337 {
338 unsigned timestamp = g_timestamp_type();
339 bool ts_copy = ts_is_copy();
340 unsigned timestamp_src = g_timestamp_src();
341 unsigned frame_types = 0;
342 unsigned buf_states = 0;
343 const cv4l_fmt &fmt = is_m2m ? cur_m2m_fmt : cur_fmt;
344
345 fail_on_test(g_type() != type);
346 fail_on_test(g_memory() == 0);
347 fail_on_test(g_memory() != memory);
348 fail_on_test(g_index() >= VIDEO_MAX_FRAME);
349 fail_on_test(g_index() != index);
350 fail_on_test(buf.reserved2);
351 if (g_flags() & V4L2_BUF_FLAG_REQUEST_FD)
352 fail_on_test(g_request_fd() < 0);
353 else
354 fail_on_test(g_request_fd());
355 fail_on_test(timestamp != V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC &&
356 timestamp != V4L2_BUF_FLAG_TIMESTAMP_COPY);
357 fail_on_test(timestamp_src != V4L2_BUF_FLAG_TSTAMP_SRC_SOE &&
358 timestamp_src != V4L2_BUF_FLAG_TSTAMP_SRC_EOF);
359 fail_on_test(!ts_copy && v4l_type_is_output(g_type()) &&
360 timestamp_src == V4L2_BUF_FLAG_TSTAMP_SRC_SOE);
361 if (g_flags() & V4L2_BUF_FLAG_KEYFRAME)
362 frame_types++;
363 if (g_flags() & V4L2_BUF_FLAG_PFRAME)
364 frame_types++;
365 if (g_flags() & V4L2_BUF_FLAG_BFRAME)
366 frame_types++;
367 fail_on_test(frame_types > 1);
368 if (g_flags() & V4L2_BUF_FLAG_QUEUED)
369 buf_states++;
370 if (g_flags() & V4L2_BUF_FLAG_DONE)
371 buf_states++;
372 if (g_flags() & V4L2_BUF_FLAG_PREPARED)
373 buf_states++;
374 if (g_flags() & V4L2_BUF_FLAG_IN_REQUEST) {
375 fail_on_test(!(g_flags() & V4L2_BUF_FLAG_REQUEST_FD));
376 if (!(g_flags() & V4L2_BUF_FLAG_PREPARED))
377 buf_states++;
378 }
379 fail_on_test(buf_states > 1);
380 fail_on_test(buf.length == 0);
381 if (v4l_type_is_planar(g_type())) {
382 fail_on_test(buf.length > VIDEO_MAX_PLANES);
383 for (unsigned p = 0; p < buf.length; p++) {
384 struct v4l2_plane *vp = buf.m.planes + p;
385
386 fail_on_test(check_0(vp->reserved, sizeof(vp->reserved)));
387 fail_on_test(vp->length == 0);
388 }
389 }
390
391 if (v4l_type_is_capture(g_type()) && !ts_copy && !is_vivid &&
392 (g_flags() & V4L2_BUF_FLAG_TIMECODE))
393 warn_once("V4L2_BUF_FLAG_TIMECODE was used!\n");
394
395 if (mode == Dequeued) {
396 for (unsigned p = 0; p < g_num_planes(); p++) {
397 if (!(g_flags() & V4L2_BUF_FLAG_LAST))
398 fail_on_test(!g_bytesused(p));
399 if (!g_bytesused(p))
400 fail_on_test(g_data_offset(p));
401 else
402 fail_on_test(g_data_offset(p) >= g_bytesused(p));
403 fail_on_test(g_bytesused(p) > g_length(p));
404 }
405 fail_on_test(!g_timestamp().tv_sec && !g_timestamp().tv_usec);
406 fail_on_test(g_flags() & V4L2_BUF_FLAG_DONE);
407
408 // The vivid driver has unreliable timings causing wrong
409 // sequence numbers on occasion. Skip this test until this
410 // bug is solved.
411 if (!is_vivid)
412 fail_on_test((int)g_sequence() < seq.last_seq + 1);
413 else if ((int)g_sequence() < seq.last_seq + 1)
414 info("(int)g_sequence() < seq.last_seq + 1): %d < %d\n",
415 (int)g_sequence(), seq.last_seq + 1);
416
417 if (v4l_type_is_video(g_type())) {
418 fail_on_test(g_field() == V4L2_FIELD_ALTERNATE);
419 fail_on_test(g_field() == V4L2_FIELD_ANY);
420 if (fmt.g_field() == V4L2_FIELD_ALTERNATE) {
421 fail_on_test(g_field() != V4L2_FIELD_BOTTOM &&
422 g_field() != V4L2_FIELD_TOP);
423 fail_on_test(g_field() == seq.last_field);
424 seq.field_nr ^= 1;
425 if (seq.field_nr) {
426 if (static_cast<int>(g_sequence()) != seq.last_seq)
427 warn("got sequence number %u, expected %u\n",
428 g_sequence(), seq.last_seq);
429 } else {
430 fail_on_test((int)g_sequence() == seq.last_seq + 1);
431 if (static_cast<int>(g_sequence()) != seq.last_seq + 1)
432 warn("got sequence number %u, expected %u\n",
433 g_sequence(), seq.last_seq + 1);
434 }
435 } else {
436 fail_on_test(g_field() != fmt.g_field());
437 if (static_cast<int>(g_sequence()) != seq.last_seq + 1)
438 warn_or_info(is_vivid,
439 "got sequence number %u, expected %u\n",
440 g_sequence(), seq.last_seq + 1);
441 }
442 } else if (!v4l_type_is_meta(g_type()) && static_cast<int>(g_sequence()) != seq.last_seq + 1) {
443 // Don't do this for meta streams: the sequence counter is typically
444 // linked to the video capture to sync the metadata with the video
445 // data. So the sequence counter would start at a non-zero value.
446 warn_or_info(is_vivid, "got sequence number %u, expected %u\n",
447 g_sequence(), seq.last_seq + 1);
448 }
449 seq.last_seq = static_cast<int>(g_sequence());
450 seq.last_field = g_field();
451 } else {
452 fail_on_test(g_sequence());
453 if (mode == Queued && ts_copy && v4l_type_is_output(g_type())) {
454 fail_on_test(!g_timestamp().tv_sec && !g_timestamp().tv_usec);
455 } else {
456 fail_on_test(g_timestamp().tv_sec || g_timestamp().tv_usec);
457 }
458 if (!v4l_type_is_output(g_type()) || mode == Unqueued)
459 fail_on_test(frame_types);
460 if (mode == Unqueued)
461 fail_on_test(g_flags() & (V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_PREPARED |
462 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR));
463 else if (mode == Prepared)
464 fail_on_test((g_flags() & (V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_PREPARED |
465 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR)) !=
466 V4L2_BUF_FLAG_PREPARED);
467 else
468 fail_on_test(!(g_flags() & (V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_PREPARED)));
469 }
470 return 0;
471 }
472
testQueryBuf(struct node * node,unsigned type,unsigned count)473 static int testQueryBuf(struct node *node, unsigned type, unsigned count)
474 {
475 buffer buf(type);
476 unsigned i;
477 int ret;
478
479 for (i = 0; i < count; i++) {
480 fail_on_test(buf.querybuf(node, i));
481 if (v4l_type_is_planar(buf.g_type()))
482 fail_on_test(buf.buf.m.planes != buf.planes);
483 fail_on_test(buf.check(Unqueued, i));
484 }
485 ret = buf.querybuf(node, count);
486 fail_on_test_val(ret != EINVAL, ret);
487 return 0;
488 }
489
testSetupVbi(struct node * node,int type)490 static int testSetupVbi(struct node *node, int type)
491 {
492 if (!v4l_type_is_vbi(type))
493 return 0;
494
495 if (!(node->cur_io_caps & V4L2_IN_CAP_STD))
496 return -1;
497
498 node->s_type(type);
499 cv4l_fmt vbi_fmt;
500
501 if (!node->g_fmt(vbi_fmt))
502 node->s_fmt(vbi_fmt);
503 return 0;
504 }
505
testCanSetSameTimings(struct node * node)506 static int testCanSetSameTimings(struct node *node)
507 {
508 if (node->cur_io_caps & V4L2_IN_CAP_STD) {
509 v4l2_std_id std;
510
511 fail_on_test(node->g_std(std));
512 fail_on_test(node->s_std(std));
513 }
514 if (node->cur_io_caps & V4L2_IN_CAP_DV_TIMINGS) {
515 v4l2_dv_timings timings;
516
517 fail_on_test(node->g_dv_timings(timings));
518 fail_on_test(node->s_dv_timings(timings));
519 }
520 if (node->cur_io_caps & V4L2_IN_CAP_NATIVE_SIZE) {
521 v4l2_selection sel = {
522 node->g_selection_type(),
523 V4L2_SEL_TGT_NATIVE_SIZE,
524 };
525
526 fail_on_test(node->g_selection(sel));
527 fail_on_test(node->s_selection(sel));
528 }
529 return 0;
530 }
531
testRemoveBufs(struct node * node)532 int testRemoveBufs(struct node *node)
533 {
534 int ret;
535 unsigned i;
536
537 node->reopen();
538
539 for (i = 1; i <= V4L2_BUF_TYPE_LAST; i++) {
540 struct v4l2_remove_buffers removebufs = { };
541 unsigned buffers;
542
543 if (!(node->valid_buftypes & (1 << i)))
544 continue;
545
546 cv4l_queue q(i, V4L2_MEMORY_MMAP);
547
548 if (testSetupVbi(node, i))
549 continue;
550 ret = q.remove_bufs(node, 0, 0);
551 if (ret == ENOTTY)
552 continue;
553
554 q.init(i, V4L2_MEMORY_MMAP);
555 ret = q.create_bufs(node, 0);
556
557 memset(&removebufs, 0xff, sizeof(removebufs));
558 removebufs.index = 0;
559 removebufs.count = 0;
560 removebufs.type = q.g_type();
561 fail_on_test(doioctl(node, VIDIOC_REMOVE_BUFS, &removebufs));
562 fail_on_test(check_0(removebufs.reserved, sizeof(removebufs.reserved)));
563
564 buffer buf(i);
565
566 /* Create only 1 buffer */
567 fail_on_test(q.create_bufs(node, 1));
568 buffers = q.g_buffers();
569 fail_on_test(buffers != 1);
570 /* Removing buffer index 1 must fail */
571 fail_on_test(q.remove_bufs(node, 1, buffers) != EINVAL);
572 /* Removing buffer index 0 is valid */
573 fail_on_test(q.remove_bufs(node, 0, buffers));
574 /* Removing buffer index 0 again must fail */
575 fail_on_test(q.remove_bufs(node, 0, 1) != EINVAL);
576 /* Create 3 buffers indexes 0 to 2 */
577 fail_on_test(q.create_bufs(node, 3));
578 /* Remove them one by one */
579 fail_on_test(q.remove_bufs(node, 2, 1));
580 fail_on_test(q.remove_bufs(node, 0, 1));
581 fail_on_test(q.remove_bufs(node, 1, 1));
582 /* Removing buffer index 0 again must fail */
583 fail_on_test(q.remove_bufs(node, 0, 1) != EINVAL);
584
585 /* Create 4 buffers indexes 0 to 3 */
586 fail_on_test(q.create_bufs(node, 4));
587 /* Remove buffers index 1 and 2 */
588 fail_on_test(q.remove_bufs(node, 1, 2));
589 /* Add 3 more buffers should be indexes 4 to 6 */
590 fail_on_test(q.create_bufs(node, 3));
591 /* Query buffers:
592 * 1 and 2 have been removed they must fail
593 * 0 and 3 to 6 must exist*/
594 fail_on_test(buf.querybuf(node, 0));
595 fail_on_test(buf.querybuf(node, 1) != EINVAL);
596 fail_on_test(buf.querybuf(node, 2) != EINVAL);
597 fail_on_test(buf.querybuf(node, 3));
598 fail_on_test(buf.querybuf(node, 4));
599 fail_on_test(buf.querybuf(node, 5));
600 fail_on_test(buf.querybuf(node, 6));
601
602 /* Remove existing buffer index 6 with bad type must fail */
603 memset(&removebufs, 0xff, sizeof(removebufs));
604 removebufs.index = 6;
605 removebufs.count = 1;
606 removebufs.type = 0;
607 fail_on_test(doioctl(node, VIDIOC_REMOVE_BUFS, &removebufs) != EINVAL);
608
609 /* Remove existing buffer index 6 with bad type and count == 0
610 * must fail */
611 memset(&removebufs, 0xff, sizeof(removebufs));
612 removebufs.index = 6;
613 removebufs.count = 0;
614 removebufs.type = 0;
615 fail_on_test(doioctl(node, VIDIOC_REMOVE_BUFS, &removebufs) != EINVAL);
616
617 /* Remove with count == 0 must always return 0 */
618 fail_on_test(q.remove_bufs(node, 0, 0));
619 fail_on_test(q.remove_bufs(node, 1, 0));
620 fail_on_test(q.remove_bufs(node, 6, 0));
621 fail_on_test(q.remove_bufs(node, 7, 0));
622 fail_on_test(q.remove_bufs(node, 0xffffffff, 0));
623
624 /* Remove crossing max allowed buffers boundary must fail */
625 fail_on_test(q.remove_bufs(node, q.g_max_num_buffers() - 2, 7) != EINVAL);
626
627 /* Remove overflow must fail */
628 fail_on_test(q.remove_bufs(node, 3, 0xfffffff) != EINVAL);
629
630 /* Remove 2 buffers on index 2 when index 2 is free must fail */
631 fail_on_test(q.remove_bufs(node, 2, 2) != EINVAL);
632
633 /* Remove 2 buffers on index 0 when index 1 is free must fail */
634 fail_on_test(q.remove_bufs(node, 0, 2) != EINVAL);
635
636 /* Remove 2 buffers on index 1 when index 1 and 2 are free must fail */
637 fail_on_test(q.remove_bufs(node, 1, 2) != EINVAL);
638
639 /* Create 2 buffers, that must fill the hole */
640 fail_on_test(q.create_bufs(node, 2));
641 /* Remove all buffers */
642 fail_on_test(q.remove_bufs(node, 0, 7));
643
644 fail_on_test(q.reqbufs(node, 0));
645 }
646
647 return 0;
648 }
649
testReqBufs(struct node * node)650 int testReqBufs(struct node *node)
651 {
652 struct v4l2_create_buffers crbufs = { };
653 struct v4l2_requestbuffers reqbufs = { };
654 bool can_stream = node->g_caps() & V4L2_CAP_STREAMING;
655 bool can_rw = node->g_caps() & V4L2_CAP_READWRITE;
656 bool mmap_valid;
657 bool userptr_valid;
658 bool dmabuf_valid;
659 int ret;
660 unsigned i, m;
661
662 node->reopen();
663
664 cv4l_queue q(0, 0);
665
666 ret = q.reqbufs(node, 0);
667 if (ret == ENOTTY) {
668 fail_on_test(can_stream);
669 return ret;
670 }
671 fail_on_test_val(ret != EINVAL, ret);
672 fail_on_test(node->node2 == nullptr);
673 for (i = 1; i <= V4L2_BUF_TYPE_LAST; i++) {
674 bool is_vbi_raw = (i == V4L2_BUF_TYPE_VBI_CAPTURE ||
675 i == V4L2_BUF_TYPE_VBI_OUTPUT);
676 bool is_overlay = v4l_type_is_overlay(i);
677 __u32 caps = 0;
678
679 if (!(node->valid_buftypes & (1 << i)))
680 continue;
681
682 if (testSetupVbi(node, i))
683 continue;
684
685 info("test buftype %s\n", buftype2s(i).c_str());
686 if (node->valid_buftype == 0)
687 node->valid_buftype = i;
688
689 q.init(0, 0);
690 fail_on_test(q.reqbufs(node, i) != EINVAL);
691 q.init(i, V4L2_MEMORY_MMAP);
692 ret = q.reqbufs(node, 0);
693 fail_on_test_val(ret && ret != EINVAL, ret);
694 mmap_valid = !ret;
695 if (mmap_valid)
696 node->buf_caps = caps = q.g_capabilities();
697 if (caps) {
698 fail_on_test(mmap_valid ^ !!(caps & V4L2_BUF_CAP_SUPPORTS_MMAP));
699 if (caps & V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS)
700 node->supports_orphaned_bufs = true;
701 }
702
703 q.init(i, V4L2_MEMORY_USERPTR);
704 ret = q.reqbufs(node, 0);
705 fail_on_test_val(ret && ret != EINVAL, ret);
706 userptr_valid = !ret;
707 fail_on_test(!mmap_valid && userptr_valid);
708 if (caps)
709 fail_on_test(userptr_valid ^ !!(caps & V4L2_BUF_CAP_SUPPORTS_USERPTR));
710
711 q.init(i, V4L2_MEMORY_DMABUF);
712 ret = q.reqbufs(node, 0);
713 fail_on_test_val(ret && ret != EINVAL, ret);
714 dmabuf_valid = !ret;
715 fail_on_test(!mmap_valid && dmabuf_valid);
716 fail_on_test(dmabuf_valid && (caps != q.g_capabilities()));
717 if (caps)
718 fail_on_test(dmabuf_valid ^ !!(caps & V4L2_BUF_CAP_SUPPORTS_DMABUF));
719
720 fail_on_test((can_stream && !is_overlay) && !mmap_valid && !userptr_valid && !dmabuf_valid);
721 fail_on_test((!can_stream || is_overlay) && (mmap_valid || userptr_valid || dmabuf_valid));
722 if (!can_stream || is_overlay)
723 continue;
724
725 if (mmap_valid) {
726 q.init(i, V4L2_MEMORY_MMAP);
727 fail_on_test(q.reqbufs(node, 1));
728 fail_on_test(q.g_buffers() == 0);
729 fail_on_test(q.g_memory() != V4L2_MEMORY_MMAP);
730 fail_on_test(q.g_type() != i);
731 fail_on_test(q.reqbufs(node, 1));
732 fail_on_test(testQueryBuf(node, i, q.g_buffers()));
733 node->valid_memorytype |= 1 << V4L2_MEMORY_MMAP;
734 }
735
736 if (userptr_valid) {
737 q.init(i, V4L2_MEMORY_USERPTR);
738 fail_on_test(q.reqbufs(node, 1));
739 fail_on_test(q.g_buffers() == 0);
740 fail_on_test(q.g_memory() != V4L2_MEMORY_USERPTR);
741 fail_on_test(q.g_type() != i);
742 fail_on_test(q.reqbufs(node, 1));
743 fail_on_test(testQueryBuf(node, i, q.g_buffers()));
744 node->valid_memorytype |= 1 << V4L2_MEMORY_USERPTR;
745 }
746
747 if (dmabuf_valid) {
748 q.init(i, V4L2_MEMORY_DMABUF);
749 fail_on_test(q.reqbufs(node, 1));
750 fail_on_test(q.g_buffers() == 0);
751 fail_on_test(q.g_memory() != V4L2_MEMORY_DMABUF);
752 fail_on_test(q.g_type() != i);
753 fail_on_test(q.reqbufs(node, 1));
754 fail_on_test(testQueryBuf(node, i, q.g_buffers()));
755 node->valid_memorytype |= 1 << V4L2_MEMORY_DMABUF;
756 }
757
758 /*
759 * It should be possible to set the same std, timings or
760 * native size even after reqbufs was called.
761 */
762 fail_on_test(testCanSetSameTimings(node));
763
764 if (can_rw) {
765 char buf = 0;
766
767 if (node->can_capture)
768 ret = node->read(&buf, 1);
769 else
770 ret = node->write(&buf, 1);
771 if (ret != -1)
772 return fail("Expected -1, got %d\n", ret);
773 if (errno != EBUSY)
774 return fail("Expected EBUSY, got %d\n", errno);
775 }
776 fail_on_test(q.reqbufs(node, 0));
777
778 for (m = V4L2_MEMORY_MMAP; m <= V4L2_MEMORY_DMABUF; m++) {
779 bool cache_hints_cap = false;
780 bool coherent;
781
782 cache_hints_cap = q.g_capabilities() & V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS;
783 if (!(node->valid_memorytype & (1 << m)))
784 continue;
785 cv4l_queue q2(i, m);
786 fail_on_test(q.reqbufs(node, 1));
787 if (!node->is_m2m) {
788 fail_on_test(q2.reqbufs(node->node2, 1) != EBUSY);
789 fail_on_test(q2.reqbufs(node->node2) != EBUSY);
790 fail_on_test(q.reqbufs(node));
791 fail_on_test(q2.reqbufs(node->node2, 1));
792 fail_on_test(q2.reqbufs(node->node2));
793 }
794 memset(&reqbufs, 0xff, sizeof(reqbufs));
795 reqbufs.count = 1;
796 reqbufs.type = i;
797 reqbufs.memory = m;
798 reqbufs.flags = V4L2_MEMORY_FLAG_NON_COHERENT;
799 fail_on_test(doioctl(node, VIDIOC_REQBUFS, &reqbufs));
800 coherent = reqbufs.flags & V4L2_MEMORY_FLAG_NON_COHERENT;
801 if (!cache_hints_cap) {
802 fail_on_test(coherent);
803 } else {
804 if (m == V4L2_MEMORY_MMAP)
805 fail_on_test(!coherent);
806 else
807 fail_on_test(coherent);
808 }
809 q.reqbufs(node);
810
811 ret = q.create_bufs(node, 0);
812 if (ret == ENOTTY) {
813 warn("VIDIOC_CREATE_BUFS not supported\n");
814 break;
815 }
816
817 memset(&crbufs, 0xff, sizeof(crbufs));
818 node->g_fmt(crbufs.format, i);
819 crbufs.count = 1;
820 crbufs.memory = m;
821 crbufs.flags = V4L2_MEMORY_FLAG_NON_COHERENT;
822 fail_on_test(doioctl(node, VIDIOC_CREATE_BUFS, &crbufs));
823 fail_on_test(check_0(crbufs.reserved, sizeof(crbufs.reserved)));
824 fail_on_test(crbufs.index != q.g_buffers());
825
826 coherent = crbufs.flags & V4L2_MEMORY_FLAG_NON_COHERENT;
827 if (!cache_hints_cap) {
828 fail_on_test(coherent);
829 } else {
830 if (m == V4L2_MEMORY_MMAP)
831 fail_on_test(!coherent);
832 else
833 fail_on_test(coherent);
834 }
835
836 if (cache_hints_cap) {
837 /*
838 * Different memory consistency model. Should fail for MMAP
839 * queues which support cache hints.
840 */
841 crbufs.flags = 0;
842 if (m == V4L2_MEMORY_MMAP)
843 fail_on_test(doioctl(node, VIDIOC_CREATE_BUFS, &crbufs) != EINVAL);
844 else
845 fail_on_test(doioctl(node, VIDIOC_CREATE_BUFS, &crbufs));
846 }
847 q.reqbufs(node);
848
849 fail_on_test(q.create_bufs(node, 1));
850 fail_on_test(q.g_buffers() == 0);
851 fail_on_test(q.g_type() != i);
852 fail_on_test(testQueryBuf(node, i, q.g_buffers()));
853 fail_on_test(q.create_bufs(node, 1));
854 fail_on_test(testQueryBuf(node, i, q.g_buffers()));
855 if (!node->is_m2m)
856 fail_on_test(q2.create_bufs(node->node2, 1) != EBUSY);
857 q.reqbufs(node);
858
859 cv4l_fmt fmt;
860
861 node->g_fmt(fmt, q.g_type());
862 if (V4L2_TYPE_IS_MULTIPLANAR(q.g_type())) {
863 // num_planes == 0 is not allowed
864 fmt.s_num_planes(0);
865 fail_on_test(q.create_bufs(node, 1, &fmt) != EINVAL);
866 node->g_fmt(fmt, q.g_type());
867
868 if (fmt.g_num_planes() > 1) {
869 // fewer planes than required by the format
870 // is not allowed
871 fmt.s_num_planes(fmt.g_num_planes() - 1);
872 fail_on_test(q.create_bufs(node, 1, &fmt) != EINVAL);
873 node->g_fmt(fmt, q.g_type());
874
875 // A last plane with a 0 sizeimage is not allowed
876 fmt.s_sizeimage(0, fmt.g_num_planes() - 1);
877 fail_on_test(q.create_bufs(node, 1, &fmt) != EINVAL);
878 node->g_fmt(fmt, q.g_type());
879 }
880
881 if (fmt.g_num_planes() < VIDEO_MAX_PLANES) {
882 // Add an extra plane, but with size 0. The vb2
883 // framework should test for this.
884 fmt.s_num_planes(fmt.g_num_planes() + 1);
885 fmt.s_sizeimage(0, fmt.g_num_planes() - 1);
886 fail_on_test(q.create_bufs(node, 1, &fmt) != EINVAL);
887 node->g_fmt(fmt, q.g_type());
888
889 // This test is debatable: should we allow CREATE_BUFS
890 // to create buffers with more planes than required
891 // by the format?
892 //
893 // For now disallow this. If there is a really good
894 // reason for allowing this, then that should be
895 // documented and carefully tested.
896 //
897 // It is the driver in queue_setup that has to check
898 // this.
899 fmt.s_num_planes(fmt.g_num_planes() + 1);
900 fmt.s_sizeimage(65536, fmt.g_num_planes() - 1);
901 fail_on_test(q.create_bufs(node, 1, &fmt) != EINVAL);
902 node->g_fmt(fmt, q.g_type());
903 }
904 }
905 if (is_vbi_raw) {
906 fmt.fmt.vbi.count[0] = 0;
907 fmt.fmt.vbi.count[1] = 0;
908 } else {
909 fmt.s_sizeimage(0, 0);
910 }
911 // zero size for the first plane is not allowed
912 fail_on_test(q.create_bufs(node, 1, &fmt) != EINVAL);
913 node->g_fmt(fmt, q.g_type());
914
915 // plane sizes that are too small are not allowed
916 for (unsigned p = 0; p < fmt.g_num_planes(); p++) {
917 if (is_vbi_raw) {
918 fmt.fmt.vbi.count[0] /= 2;
919 fmt.fmt.vbi.count[1] /= 2;
920 } else {
921 fmt.s_sizeimage(fmt.g_sizeimage(p) / 2, p);
922 }
923 }
924 fail_on_test(q.create_bufs(node, 1, &fmt) != EINVAL);
925 fail_on_test(testQueryBuf(node, fmt.type, q.g_buffers()));
926 node->g_fmt(fmt, q.g_type());
927
928 // Add 1 MB to each plane or double the vbi counts.
929 // This is allowed.
930 for (unsigned p = 0; p < fmt.g_num_planes(); p++) {
931 if (is_vbi_raw) {
932 fmt.fmt.vbi.count[0] *= 2;
933 fmt.fmt.vbi.count[1] *= 2;
934 } else {
935 fmt.s_sizeimage(fmt.g_sizeimage(p) + (1 << 20), p);
936 }
937 }
938 fail_on_test(q.create_bufs(node, 1, &fmt));
939 buffer buf(q);
940
941 // Check that the new buffer lengths are at least those of
942 // the large sizes as specified by CREATE_BUFS
943 fail_on_test(buf.querybuf(node, 0));
944 fail_on_test(buf.g_num_planes() != fmt.g_num_planes());
945 // Verify that the new buffers actually have the requested
946 // buffer size
947 for (unsigned p = 0; p < buf.g_num_planes(); p++)
948 fail_on_test(buf.g_length(p) < fmt.g_sizeimage(p));
949 node->g_fmt(fmt, q.g_type());
950 }
951 fail_on_test(q.reqbufs(node));
952 }
953 return 0;
954 }
955
testCreateBufsMax(struct node * node)956 int testCreateBufsMax(struct node *node)
957 {
958 unsigned int i;
959 int ret;
960
961 node->reopen();
962
963 cv4l_queue q(0, 0);
964
965 for (i = 1; i <= V4L2_BUF_TYPE_LAST; i++) {
966 if (!(node->valid_buftypes & (1 << i)))
967 continue;
968
969 q.init(i, V4L2_MEMORY_MMAP);
970 ret = q.create_bufs(node, 0);
971 if (!ret && (q.g_capabilities() & V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS)) {
972 fail_on_test(q.create_bufs(node, q.g_max_num_buffers()));
973 /* Some drivers may not have allocated all the requested buffers
974 * because of memory limitation, that is OK but make the next test
975 * failed so skip it
976 */
977 if (q.g_max_num_buffers() != q.g_buffers())
978 continue;
979 ret = q.create_bufs(node, 1);
980 fail_on_test(ret != ENOBUFS);
981 }
982 }
983
984 return 0;
985 }
986
testExpBuf(struct node * node)987 int testExpBuf(struct node *node)
988 {
989 bool have_expbuf = false;
990 int type;
991
992 for (type = 0; type <= V4L2_BUF_TYPE_LAST; type++) {
993 if (!(node->valid_buftypes & (1 << type)))
994 continue;
995 if (v4l_type_is_overlay(type))
996 continue;
997
998 if (testSetupVbi(node, type))
999 continue;
1000
1001 cv4l_queue q(type, V4L2_MEMORY_MMAP);
1002
1003 if (!(node->valid_memorytype & (1 << V4L2_MEMORY_MMAP))) {
1004 if (q.has_expbuf(node)) {
1005 if (node->valid_buftypes)
1006 fail("VIDIOC_EXPBUF is supported, but the V4L2_MEMORY_MMAP support is missing or malfunctioning.\n");
1007 fail("VIDIOC_EXPBUF is supported, but the V4L2_MEMORY_MMAP support is missing, probably due to earlier failing format tests.\n");
1008 }
1009 return ENOTTY;
1010 }
1011
1012 fail_on_test(q.reqbufs(node, 2));
1013 if (q.has_expbuf(node)) {
1014 fail_on_test(q.export_bufs(node, q.g_type()));
1015 have_expbuf = true;
1016 } else {
1017 fail_on_test(!q.export_bufs(node, q.g_type()));
1018 }
1019 q.close_exported_fds();
1020 fail_on_test(q.reqbufs(node));
1021 }
1022 return have_expbuf ? 0 : ENOTTY;
1023 }
1024
testReadWrite(struct node * node)1025 int testReadWrite(struct node *node)
1026 {
1027 bool can_rw = node->g_caps() & V4L2_CAP_READWRITE;
1028 int fd_flags = fcntl(node->g_fd(), F_GETFL);
1029 char buf = 0;
1030 int ret, ret2;
1031 int err, err2;
1032
1033 fcntl(node->g_fd(), F_SETFL, fd_flags | O_NONBLOCK);
1034 errno = 0;
1035 if (node->can_capture)
1036 ret = node->read(&buf, 1);
1037 else
1038 ret = node->write(&buf, 1);
1039 err = errno;
1040 fail_on_test(v4l_has_vbi(node->g_v4l_fd()) &&
1041 !(node->cur_io_caps & V4L2_IN_CAP_STD) && ret >= 0);
1042
1043 // Note: RDS can only return multiples of 3, so we accept
1044 // both 0 and 1 as return code.
1045 // EBUSY can be returned when attempting to read/write to a
1046 // multiplanar format.
1047 // EINVAL can be returned if read()/write() is not supported
1048 // for the current input/output.
1049 if (can_rw)
1050 fail_on_test((ret < 0 && err != EAGAIN && err != EBUSY && err != EINVAL) || ret > 1);
1051 else
1052 fail_on_test(ret >= 0 || err != EINVAL);
1053 if (!can_rw)
1054 return ENOTTY;
1055
1056 node->reopen();
1057 fcntl(node->g_fd(), F_SETFL, fd_flags | O_NONBLOCK);
1058
1059 /* check that the close cleared the busy flag */
1060 errno = 0;
1061 if (node->can_capture)
1062 ret2 = node->read(&buf, 1);
1063 else
1064 ret2 = node->write(&buf, 1);
1065 err2 = errno;
1066 fail_on_test(ret2 != ret || err2 != err);
1067 return 0;
1068 }
1069
setupM2M(struct node * node,cv4l_queue & q,bool init=true)1070 static int setupM2M(struct node *node, cv4l_queue &q, bool init = true)
1071 {
1072 __u32 caps;
1073
1074 last_m2m_seq.init();
1075
1076 fail_on_test(q.reqbufs(node, 2));
1077 fail_on_test(q.mmap_bufs(node));
1078 caps = q.g_capabilities();
1079 fail_on_test(node->supports_orphaned_bufs ^ !!(caps & V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS));
1080 if (v4l_type_is_video(q.g_type())) {
1081 cv4l_fmt fmt(q.g_type());
1082
1083 node->g_fmt(fmt);
1084 cur_m2m_fmt = fmt;
1085 if (init) {
1086 last_m2m_seq.last_field = fmt.g_field();
1087 if (v4l_type_is_output(q.g_type()))
1088 stream_for_fmt(fmt.g_pixelformat());
1089 }
1090 }
1091 for (unsigned i = 0; i < q.g_buffers(); i++) {
1092 buffer buf(q);
1093
1094 fail_on_test(buf.querybuf(node, i));
1095 buf.s_flags(buf.g_flags() & ~V4L2_BUF_FLAG_REQUEST_FD);
1096 fail_on_test(buf.qbuf(node, q));
1097 fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_DONE);
1098 }
1099 fail_on_test(node->streamon(q.g_type()));
1100 return 0;
1101 }
1102
captureBufs(struct node * node,struct node * node_m2m_cap,const cv4l_queue & q,cv4l_queue & m2m_q,unsigned frame_count,int pollmode,unsigned & capture_count)1103 static int captureBufs(struct node *node, struct node *node_m2m_cap, const cv4l_queue &q,
1104 cv4l_queue &m2m_q, unsigned frame_count, int pollmode,
1105 unsigned &capture_count)
1106 {
1107 static constexpr const char *pollmode_str[] = {
1108 "",
1109 " (select)",
1110 " (epoll)",
1111 };
1112 unsigned valid_output_flags =
1113 V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK |
1114 V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME;
1115 int fd_flags = fcntl(node->g_fd(), F_GETFL);
1116 cv4l_fmt fmt_q;
1117 buffer buf(q);
1118 unsigned count = frame_count;
1119 unsigned req_idx = q.g_buffers();
1120 bool stopped = false;
1121 bool got_eos = false;
1122 bool got_source_change = false;
1123 struct epoll_event ev;
1124 int epollfd = -1;
1125 int ret;
1126
1127 if (node->is_m2m) {
1128 if (count <= q.g_buffers())
1129 count = 1;
1130 else
1131 count -= q.g_buffers();
1132 }
1133
1134 capture_count = 0;
1135
1136 if (show_info) {
1137 printf("\t %s%s:\n",
1138 buftype2s(q.g_type()).c_str(), pollmode_str[pollmode]);
1139 }
1140
1141 /*
1142 * It should be possible to set the same std, timings or
1143 * native size even while streaming.
1144 */
1145 fail_on_test(testCanSetSameTimings(node));
1146
1147 node->g_fmt(fmt_q, q.g_type());
1148 if (node->buftype_pixfmts[q.g_type()][fmt_q.g_pixelformat()] &
1149 V4L2_FMT_FLAG_COMPRESSED)
1150 valid_output_flags = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1151 if (node->is_m2m) {
1152 node_m2m_cap->g_fmt(fmt_q, m2m_q.g_type());
1153 if (node_m2m_cap->buftype_pixfmts[m2m_q.g_type()][fmt_q.g_pixelformat()] &
1154 V4L2_FMT_FLAG_COMPRESSED)
1155 valid_output_flags = V4L2_BUF_FLAG_TIMECODE | V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1156
1157 struct v4l2_event_subscription sub = { 0 };
1158
1159 sub.type = V4L2_EVENT_EOS;
1160 if (node->codec_mask & (STATEFUL_ENCODER | STATEFUL_DECODER))
1161 doioctl(node, VIDIOC_SUBSCRIBE_EVENT, &sub);
1162 }
1163
1164 if (pollmode == POLL_MODE_EPOLL) {
1165 epollfd = epoll_create1(0);
1166
1167 fail_on_test(epollfd < 0);
1168
1169 /*
1170 * Many older versions of the vb2 and m2m have a bug where
1171 * EPOLLIN and EPOLLOUT events are never signaled unless they
1172 * are part of the initial EPOLL_CTL_ADD. We set an initial
1173 * empty set of events, which we then modify with EPOLL_CTL_MOD,
1174 * in order to detect that condition.
1175 */
1176 ev.events = 0;
1177 fail_on_test(epoll_ctl(epollfd, EPOLL_CTL_ADD, node->g_fd(), &ev));
1178
1179 if (node->is_m2m)
1180 ev.events = EPOLLIN | EPOLLOUT | EPOLLPRI;
1181 else if (v4l_type_is_output(q.g_type()))
1182 ev.events = EPOLLOUT;
1183 else
1184 ev.events = EPOLLIN;
1185 ev.data.fd = node->g_fd();
1186 fail_on_test(epoll_ctl(epollfd, EPOLL_CTL_MOD, node->g_fd(), &ev));
1187 }
1188
1189 if (pollmode)
1190 fcntl(node->g_fd(), F_SETFL, fd_flags | O_NONBLOCK);
1191 for (;;) {
1192 buf.init(q);
1193
1194 bool can_read = true;
1195 bool have_event = false;
1196
1197 if (pollmode == POLL_MODE_SELECT) {
1198 struct timeval tv = { 2, 0 };
1199 fd_set rfds, wfds, efds;
1200
1201 FD_ZERO(&rfds);
1202 FD_SET(node->g_fd(), &rfds);
1203 FD_ZERO(&wfds);
1204 FD_SET(node->g_fd(), &wfds);
1205 FD_ZERO(&efds);
1206 FD_SET(node->g_fd(), &efds);
1207 if (node->is_m2m)
1208 ret = select(node->g_fd() + 1, &rfds, &wfds, &efds, &tv);
1209 else if (v4l_type_is_output(q.g_type()))
1210 ret = select(node->g_fd() + 1, nullptr, &wfds, nullptr, &tv);
1211 else
1212 ret = select(node->g_fd() + 1, &rfds, nullptr, nullptr, &tv);
1213 fail_on_test(ret == 0);
1214 fail_on_test(ret < 0);
1215 fail_on_test(!FD_ISSET(node->g_fd(), &rfds) &&
1216 !FD_ISSET(node->g_fd(), &wfds) &&
1217 !FD_ISSET(node->g_fd(), &efds));
1218 can_read = FD_ISSET(node->g_fd(), &rfds);
1219 have_event = FD_ISSET(node->g_fd(), &efds);
1220 } else if (pollmode == POLL_MODE_EPOLL) {
1221 /*
1222 * This can fail with a timeout on older kernels for
1223 * drivers using vb2_core_poll() v4l2_m2m_poll().
1224 */
1225 ret = epoll_wait(epollfd, &ev, 1, 2000);
1226 fail_on_test(ret == 0);
1227 fail_on_test_val(ret < 0, ret);
1228 can_read = ev.events & EPOLLIN;
1229 have_event = ev.events & EPOLLPRI;
1230 }
1231
1232 if (have_event) {
1233 struct v4l2_event ev;
1234
1235 while (!doioctl(node, VIDIOC_DQEVENT, &ev)) {
1236 if (ev.type == V4L2_EVENT_EOS) {
1237 fail_on_test(got_eos);
1238 got_eos = true;
1239 fail_on_test(!stopped);
1240 }
1241 if (ev.type == V4L2_EVENT_SOURCE_CHANGE) {
1242 fail_on_test(got_source_change);
1243 got_source_change = true;
1244 //fail_on_test(stopped);
1245 stopped = true;
1246 }
1247 }
1248 }
1249
1250 ret = EAGAIN;
1251 if (!node->is_m2m || !stopped)
1252 ret = buf.dqbuf(node);
1253 if (ret != EAGAIN) {
1254 fail_on_test_val(ret, ret);
1255 if (show_info)
1256 printf("\t\t%s Buffer: %d Sequence: %d Field: %s Size: %d Flags: %s Timestamp: %lld.%06llds\n",
1257 v4l_type_is_output(buf.g_type()) ? "Out" : "Cap",
1258 buf.g_index(), buf.g_sequence(),
1259 field2s(buf.g_field()).c_str(), buf.g_bytesused(),
1260 bufferflags2s(buf.g_flags()).c_str(),
1261 static_cast<__u64>(buf.g_timestamp().tv_sec), static_cast<__u64>(buf.g_timestamp().tv_usec));
1262 for (unsigned p = 0; p < buf.g_num_planes(); p++) {
1263 if (max_bytesused[p] < buf.g_bytesused(p))
1264 max_bytesused[p] = buf.g_bytesused(p);
1265 if (min_data_offset[p] > buf.g_data_offset(p))
1266 min_data_offset[p] = buf.g_data_offset(p);
1267 }
1268
1269 fail_on_test(buf.check(q, last_seq));
1270 if (!show_info && !no_progress) {
1271 printf("\r\t%s: Frame #%03d%s",
1272 buftype2s(q.g_type()).c_str(),
1273 frame_count - count,
1274 pollmode_str[pollmode]);
1275 if (node->g_trace())
1276 printf("\n");
1277 fflush(stdout);
1278 }
1279 if (v4l_type_is_capture(buf.g_type()) && node->is_m2m && buf.ts_is_copy()) {
1280 fail_on_test(buffer_info.find(buf.g_timestamp()) == buffer_info.end());
1281 struct v4l2_buffer &orig_buf = buffer_info[buf.g_timestamp()];
1282 fail_on_test(buf.g_field() != orig_buf.field);
1283 fail_on_test((buf.g_flags() & valid_output_flags) !=
1284 (orig_buf.flags & valid_output_flags));
1285 if (buf.g_flags() & V4L2_BUF_FLAG_TIMECODE)
1286 fail_on_test(memcmp(&buf.g_timecode(), &orig_buf.timecode,
1287 sizeof(orig_buf.timecode)));
1288 }
1289 fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_DONE);
1290
1291 if (buf.g_flags() & V4L2_BUF_FLAG_REQUEST_FD) {
1292 buf.querybuf(node, buf.g_index());
1293 fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_REQUEST_FD);
1294 fail_on_test(buf.g_request_fd());
1295 fail_on_test(!buf.qbuf(node));
1296 buf.s_flags(V4L2_BUF_FLAG_REQUEST_FD);
1297 buf.s_request_fd(buf_req_fds[req_idx]);
1298 }
1299 fail_on_test(buf.qbuf(node, q));
1300 // This is not necessarily wrong (v4l-touch drivers can do this),
1301 // but it is certainly unusual enough to warn about this.
1302 if (buf.g_flags() & V4L2_BUF_FLAG_DONE)
1303 warn_once("QBUF returned the buffer as DONE.\n");
1304 if (buf.g_flags() & V4L2_BUF_FLAG_REQUEST_FD) {
1305 fail_on_test(doioctl_fd(buf_req_fds[req_idx],
1306 MEDIA_REQUEST_IOC_QUEUE, nullptr));
1307 // testRequests will close some of these request fds,
1308 // so we need to find the next valid fds.
1309 do {
1310 req_idx = (req_idx + 1) % (2 * q.g_buffers());
1311 } while (buf_req_fds[req_idx] < 0);
1312 }
1313 count--;
1314 if (!node->is_m2m && !count)
1315 break;
1316 if (!count && (node->codec_mask & STATEFUL_ENCODER)) {
1317 struct v4l2_encoder_cmd cmd;
1318
1319 memset(&cmd, 0, sizeof(cmd));
1320 cmd.cmd = V4L2_ENC_CMD_STOP;
1321 fail_on_test(doioctl(node, VIDIOC_ENCODER_CMD, &cmd));
1322 stopped = true;
1323 }
1324 if (!count && (node->codec_mask & STATEFUL_DECODER)) {
1325 struct v4l2_decoder_cmd cmd;
1326
1327 memset(&cmd, 0, sizeof(cmd));
1328 cmd.cmd = V4L2_DEC_CMD_STOP;
1329 fail_on_test(doioctl(node, VIDIOC_DECODER_CMD, &cmd));
1330 stopped = true;
1331 }
1332 }
1333 if (!node->is_m2m || !can_read)
1334 continue;
1335
1336 buf.init(m2m_q);
1337 do {
1338 ret = buf.dqbuf(node_m2m_cap);
1339 } while (ret == EAGAIN);
1340 capture_count++;
1341
1342 if (show_info)
1343 printf("\t\t%s Buffer: %d Sequence: %d Field: %s Size: %d Flags: %s Timestamp: %lld.%06llds\n",
1344 v4l_type_is_output(buf.g_type()) ? "Out" : "Cap",
1345 buf.g_index(), buf.g_sequence(),
1346 field2s(buf.g_field()).c_str(), buf.g_bytesused(),
1347 bufferflags2s(buf.g_flags()).c_str(),
1348 static_cast<__u64>(buf.g_timestamp().tv_sec), static_cast<__u64>(buf.g_timestamp().tv_usec));
1349 fail_on_test_val(ret, ret);
1350 if (v4l_type_is_capture(buf.g_type()) && buf.g_bytesused())
1351 fail_on_test(buf.check(m2m_q, last_m2m_seq, true));
1352 if (v4l_type_is_capture(buf.g_type()) && buf.ts_is_copy() && buf.g_bytesused()) {
1353 fail_on_test(buffer_info.find(buf.g_timestamp()) == buffer_info.end());
1354 struct v4l2_buffer &orig_buf = buffer_info[buf.g_timestamp()];
1355 if (cur_fmt.g_field() == cur_m2m_fmt.g_field())
1356 fail_on_test(buf.g_field() != orig_buf.field);
1357 fail_on_test((buf.g_flags() & valid_output_flags) !=
1358 (orig_buf.flags & valid_output_flags));
1359 if (buf.g_flags() & V4L2_BUF_FLAG_TIMECODE)
1360 fail_on_test(memcmp(&buf.g_timecode(), &orig_buf.timecode,
1361 sizeof(orig_buf.timecode)));
1362 }
1363 fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_DONE);
1364 if (!count || stopped) {
1365 if (!(node_m2m_cap->codec_mask & (STATEFUL_ENCODER | STATEFUL_DECODER)))
1366 break;
1367 if (buf.g_flags() & V4L2_BUF_FLAG_LAST) {
1368 fail_on_test(buf.dqbuf(node_m2m_cap) != EPIPE);
1369 fail_on_test(!got_eos && !got_source_change);
1370 if (!count)
1371 break;
1372 fail_on_test(node_m2m_cap->streamoff(m2m_q.g_type()));
1373 m2m_q.munmap_bufs(node_m2m_cap);
1374 fail_on_test(setupM2M(node_m2m_cap, m2m_q, false));
1375 stopped = false;
1376 got_source_change = false;
1377
1378 struct v4l2_decoder_cmd cmd;
1379
1380 memset(&cmd, 0, sizeof(cmd));
1381 cmd.cmd = V4L2_DEC_CMD_START;
1382 fail_on_test(doioctl(node_m2m_cap, VIDIOC_DECODER_CMD, &cmd));
1383 continue;
1384 }
1385 }
1386 buf.s_flags(buf.g_flags() & ~V4L2_BUF_FLAG_REQUEST_FD);
1387 fail_on_test(buf.qbuf(node_m2m_cap, m2m_q));
1388 // If the queued buffer is immediately returned as a last
1389 // empty buffer, then FLAG_DONE is set here.
1390 // Need to look at this more closely.
1391 //fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_DONE);
1392 }
1393 if (pollmode)
1394 fcntl(node->g_fd(), F_SETFL, fd_flags);
1395 if (epollfd >= 0)
1396 close(epollfd);
1397 if (!show_info && !no_progress) {
1398 printf("\r\t \r");
1399 fflush(stdout);
1400 }
1401 if (node->is_m2m)
1402 printf("\t%s: Captured %d buffers\n", buftype2s(m2m_q.g_type()).c_str(), capture_count);
1403
1404 return 0;
1405 }
1406
bufferOutputErrorTest(struct node * node,const buffer & orig_buf)1407 static int bufferOutputErrorTest(struct node *node, const buffer &orig_buf)
1408 {
1409 buffer buf(orig_buf);
1410 bool have_prepare = false;
1411 int ret;
1412
1413 for (unsigned p = 0; p < buf.g_num_planes(); p++) {
1414 buf.s_bytesused(buf.g_length(p) + 1, p);
1415 buf.s_data_offset(0, p);
1416 }
1417 ret = buf.prepare_buf(node, false);
1418 fail_on_test_val(ret != EINVAL && ret != ENOTTY, ret);
1419 have_prepare = ret != ENOTTY;
1420 fail_on_test(buf.qbuf(node, false) != EINVAL);
1421
1422 if (v4l_type_is_planar(buf.g_type())) {
1423 for (unsigned p = 0; p < buf.g_num_planes(); p++) {
1424 buf.s_bytesused(buf.g_length(p) / 2, p);
1425 buf.s_data_offset(buf.g_bytesused(p), p);
1426 }
1427 if (have_prepare)
1428 fail_on_test(buf.prepare_buf(node, false) != EINVAL);
1429 fail_on_test(buf.qbuf(node, false) != EINVAL);
1430 }
1431 buf.init(orig_buf);
1432 if (have_prepare) {
1433 fail_on_test(buf.prepare_buf(node, false));
1434 fail_on_test(buf.check(Prepared, 0));
1435 buf.init(orig_buf);
1436 for (unsigned p = 0; p < buf.g_num_planes(); p++) {
1437 buf.s_bytesused(0xdeadbeef, p);
1438 buf.s_data_offset(0xdeadbeef, p);
1439 }
1440 }
1441 fail_on_test(buf.qbuf(node, false));
1442 fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_DONE);
1443 for (unsigned p = 0; p < buf.g_num_planes(); p++) {
1444 fail_on_test(buf.g_bytesused(p) != orig_buf.g_bytesused(p));
1445 fail_on_test(buf.g_data_offset(p));
1446 }
1447 return 0;
1448 }
1449
setupMmap(struct node * node,cv4l_queue & q)1450 static int setupMmap(struct node *node, cv4l_queue &q)
1451 {
1452 bool cache_hints = q.g_capabilities() & V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS;
1453
1454 fail_on_test(q.mmap_bufs(node));
1455 for (unsigned i = 0; i < q.g_buffers(); i++) {
1456 buffer buf(q);
1457 unsigned int flags;
1458 int ret;
1459
1460 fail_on_test(buf.querybuf(node, i));
1461 fail_on_test(buf.check(q, Unqueued, i));
1462
1463 /*
1464 * Do not set cache hints for all the buffers, but only on
1465 * some of them, so that we can test more cases.
1466 */
1467 if (i == 0) {
1468 flags = buf.g_flags();
1469 flags |= V4L2_BUF_FLAG_NO_CACHE_INVALIDATE;
1470 flags |= V4L2_BUF_FLAG_NO_CACHE_CLEAN;
1471 buf.s_flags(flags);
1472 }
1473
1474 for (unsigned p = 0; p < buf.g_num_planes(); p++) {
1475 // Try a random offset
1476 fail_on_test(node->mmap(buf.g_length(p),
1477 buf.g_mem_offset(p) + 0xdeadbeef) != MAP_FAILED);
1478 }
1479 fail_on_test(!buf.dqbuf(node));
1480 if (v4l_type_is_output(buf.g_type()) && i == 0) {
1481 for (unsigned p = 0; p < buf.g_num_planes(); p++) {
1482 buf.s_bytesused(buf.g_length(p), p);
1483 buf.s_data_offset(0, p);
1484 }
1485 fill_output_buffer(q, buf);
1486 fail_on_test(bufferOutputErrorTest(node, buf));
1487 fail_on_test(buf.querybuf(node, i));
1488 fail_on_test(buf.check(q, Queued, i));
1489 } else {
1490 ret = buf.prepare_buf(node, q);
1491 fail_on_test_val(ret && ret != ENOTTY, ret);
1492 if (ret == 0) {
1493 fail_on_test(buf.querybuf(node, i));
1494 fail_on_test(buf.check(q, Prepared, i));
1495 fail_on_test(!buf.prepare_buf(node));
1496 }
1497
1498 fail_on_test(buf.qbuf(node));
1499 fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_DONE);
1500 fail_on_test(!buf.qbuf(node));
1501 fail_on_test(!buf.prepare_buf(node));
1502 // Test with invalid buffer index
1503 buf.s_index(buf.g_index() + VIDEO_MAX_FRAME);
1504 fail_on_test(!buf.prepare_buf(node));
1505 fail_on_test(!buf.qbuf(node));
1506 fail_on_test(!buf.querybuf(node, buf.g_index()));
1507 buf.s_index(buf.g_index() - VIDEO_MAX_FRAME);
1508 fail_on_test(buf.g_index() != i);
1509 }
1510 flags = buf.g_flags();
1511 if (cache_hints) {
1512 if (i == 0) {
1513 /* We do expect cache hints on this buffer */
1514 fail_on_test(!(flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE));
1515 fail_on_test(!(flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN));
1516 } else {
1517 /* We expect no cache hints on this buffer */
1518 fail_on_test(flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE);
1519 fail_on_test(flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN);
1520 }
1521 } else if (node->might_support_cache_hints) {
1522 fail_on_test(flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE);
1523 fail_on_test(flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN);
1524 }
1525 fail_on_test(buf.querybuf(node, i));
1526 fail_on_test(buf.check(q, Queued, i));
1527 fail_on_test(!buf.dqbuf(node));
1528 }
1529 return 0;
1530 }
1531
testMmap(struct node * node,struct node * node_m2m_cap,unsigned frame_count,enum poll_mode pollmode)1532 int testMmap(struct node *node, struct node *node_m2m_cap, unsigned frame_count,
1533 enum poll_mode pollmode)
1534 {
1535 bool can_stream = node->g_caps() & V4L2_CAP_STREAMING;
1536 bool have_createbufs = true;
1537 int type;
1538 int ret;
1539
1540 if (!node->valid_buftypes)
1541 return ENOTTY;
1542
1543 buffer_info.clear();
1544 for (type = 0; type <= V4L2_BUF_TYPE_LAST; type++) {
1545 cv4l_fmt fmt;
1546
1547 if (!(node->valid_buftypes & (1 << type)))
1548 continue;
1549 if (v4l_type_is_overlay(type))
1550 continue;
1551 if (node->is_m2m && !v4l_type_is_output(type))
1552 continue;
1553
1554 cv4l_queue q(type, V4L2_MEMORY_MMAP);
1555 cv4l_queue m2m_q(v4l_type_invert(type));
1556
1557 if (testSetupVbi(node, type))
1558 continue;
1559
1560 stream_close();
1561
1562 ret = q.reqbufs(node, 0);
1563 if (ret) {
1564 fail_on_test(can_stream);
1565 return ret;
1566 }
1567 fail_on_test(!can_stream);
1568
1569 fail_on_test(node->streamon(q.g_type()) != EINVAL);
1570 fail_on_test(node->streamoff(q.g_type()));
1571
1572 q.init(type, V4L2_MEMORY_MMAP);
1573 fail_on_test(q.reqbufs(node, 2));
1574 fail_on_test(node->streamoff(q.g_type()));
1575 last_seq.init();
1576
1577 // Test queuing buffers...
1578 for (unsigned i = 0; i < q.g_buffers(); i++) {
1579 buffer buf(q);
1580
1581 fail_on_test(buf.querybuf(node, i));
1582 fail_on_test(buf.qbuf(node));
1583 fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_DONE);
1584 fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_REQUEST_FD);
1585 fail_on_test(buf.g_request_fd());
1586 }
1587 // calling STREAMOFF...
1588 fail_on_test(node->streamoff(q.g_type()));
1589 // and now we should be able to queue those buffers again since
1590 // STREAMOFF should return them back to the dequeued state.
1591 for (unsigned i = 0; i < q.g_buffers(); i++) {
1592 buffer buf(q);
1593
1594 fail_on_test(buf.querybuf(node, i));
1595 fail_on_test(buf.qbuf(node));
1596 fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_DONE);
1597 }
1598 // Now request buffers again, freeing the old buffers.
1599 // Good check for whether all the internal vb2 calls are in
1600 // balance.
1601 fail_on_test(q.reqbufs(node, q.g_buffers()));
1602 fail_on_test(node->g_fmt(cur_fmt, q.g_type()));
1603
1604 ret = q.create_bufs(node, 0);
1605 fail_on_test_val(ret != ENOTTY && ret != 0, ret);
1606 if (ret == ENOTTY)
1607 have_createbufs = false;
1608 if (have_createbufs) {
1609 q.reqbufs(node);
1610 q.create_bufs(node, 2, &cur_fmt, V4L2_MEMORY_FLAG_NON_COHERENT);
1611 fail_on_test(setupMmap(node, q));
1612 q.munmap_bufs(node);
1613 q.reqbufs(node, 2);
1614
1615 cv4l_fmt fmt(cur_fmt);
1616
1617 if (node->is_video) {
1618 last_seq.last_field = cur_fmt.g_field();
1619 fmt.s_height(fmt.g_height() / 2);
1620 for (unsigned p = 0; p < fmt.g_num_planes(); p++)
1621 fmt.s_sizeimage(fmt.g_sizeimage(p) / 2, p);
1622 fail_on_test(q.create_bufs(node, 1, &fmt) != EINVAL);
1623 fail_on_test(testQueryBuf(node, cur_fmt.type, q.g_buffers()));
1624 fmt = cur_fmt;
1625 for (unsigned p = 0; p < fmt.g_num_planes(); p++)
1626 fmt.s_sizeimage(fmt.g_sizeimage(p) * 2, p);
1627 }
1628 fail_on_test(q.create_bufs(node, 1, &fmt));
1629 if (node->is_video) {
1630 buffer buf(q);
1631
1632 fail_on_test(buf.querybuf(node, q.g_buffers() - 1));
1633 for (unsigned p = 0; p < fmt.g_num_planes(); p++)
1634 fail_on_test(buf.g_length(p) < fmt.g_sizeimage(p));
1635 }
1636 fail_on_test(q.reqbufs(node, 2));
1637 }
1638 if (v4l_type_is_output(type))
1639 stream_for_fmt(cur_fmt.g_pixelformat());
1640
1641 fail_on_test(setupMmap(node, q));
1642
1643 if (node->inject_error(VIVID_CID_START_STR_ERROR))
1644 fail_on_test(!node->streamon(q.g_type()));
1645
1646 if (node->codec_mask & STATEFUL_DECODER) {
1647 struct v4l2_event_subscription sub = { 0 };
1648
1649 sub.type = V4L2_EVENT_SOURCE_CHANGE;
1650 fail_on_test(doioctl(node, VIDIOC_SUBSCRIBE_EVENT, &sub));
1651 }
1652
1653 fail_on_test(node->streamon(q.g_type()));
1654 fail_on_test(node->streamon(q.g_type()));
1655
1656 unsigned capture_count;
1657
1658 if (node->is_m2m) {
1659 if (node->codec_mask & STATEFUL_DECODER) {
1660 int fd_flags = fcntl(node->g_fd(), F_GETFL);
1661 struct timeval tv = { 1, 0 };
1662 fd_set efds;
1663 v4l2_event ev;
1664
1665 fcntl(node->g_fd(), F_SETFL, fd_flags | O_NONBLOCK);
1666 FD_ZERO(&efds);
1667 FD_SET(node->g_fd(), &efds);
1668 ret = select(node->g_fd() + 1, nullptr, nullptr, &efds, &tv);
1669 fail_on_test_val(ret < 0, ret);
1670 fail_on_test(ret == 0);
1671 fail_on_test(node->dqevent(ev));
1672 fcntl(node->g_fd(), F_SETFL, fd_flags);
1673 fail_on_test(ev.type != V4L2_EVENT_SOURCE_CHANGE);
1674 fail_on_test(!(ev.u.src_change.changes & V4L2_EVENT_SRC_CH_RESOLUTION));
1675 }
1676 fail_on_test(setupM2M(node_m2m_cap, m2m_q));
1677 }
1678
1679 fail_on_test(captureBufs(node, node_m2m_cap, q, m2m_q, frame_count,
1680 pollmode, capture_count));
1681 fail_on_test(node->streamoff(q.g_type()));
1682 fail_on_test(node->streamoff(q.g_type()));
1683 if (node->is_m2m)
1684 fail_on_test(node_m2m_cap->streamoff(m2m_q.g_type()));
1685
1686 if (node->codec_mask & STATEFUL_ENCODER) {
1687 struct v4l2_encoder_cmd cmd;
1688 buffer buf_cap(m2m_q);
1689
1690 memset(&cmd, 0, sizeof(cmd));
1691 cmd.cmd = V4L2_ENC_CMD_STOP;
1692
1693 /* No buffers are queued, call STREAMON, then STOP */
1694 fail_on_test(node->streamon(q.g_type()));
1695 fail_on_test(node_m2m_cap->streamon(m2m_q.g_type()));
1696 fail_on_test(doioctl(node, VIDIOC_ENCODER_CMD, &cmd));
1697
1698 fail_on_test(buf_cap.querybuf(node, 0));
1699 fail_on_test(buf_cap.qbuf(node));
1700 fail_on_test(buf_cap.dqbuf(node));
1701 fail_on_test(!(buf_cap.g_flags() & V4L2_BUF_FLAG_LAST));
1702 for (unsigned p = 0; p < buf_cap.g_num_planes(); p++)
1703 fail_on_test(buf_cap.g_bytesused(p));
1704 fail_on_test(node->streamoff(q.g_type()));
1705 fail_on_test(node_m2m_cap->streamoff(m2m_q.g_type()));
1706
1707 /* Call STREAMON, queue one CAPTURE buffer, then STOP */
1708 fail_on_test(node->streamon(q.g_type()));
1709 fail_on_test(node_m2m_cap->streamon(m2m_q.g_type()));
1710 fail_on_test(buf_cap.querybuf(node, 0));
1711 fail_on_test(buf_cap.qbuf(node));
1712 fail_on_test(doioctl(node, VIDIOC_ENCODER_CMD, &cmd));
1713
1714 fail_on_test(buf_cap.dqbuf(node));
1715 fail_on_test(!(buf_cap.g_flags() & V4L2_BUF_FLAG_LAST));
1716 for (unsigned p = 0; p < buf_cap.g_num_planes(); p++)
1717 fail_on_test(buf_cap.g_bytesused(p));
1718 fail_on_test(node->streamoff(q.g_type()));
1719 fail_on_test(node_m2m_cap->streamoff(m2m_q.g_type()));
1720 }
1721
1722 if (node->codec_mask & STATEFUL_DECODER) {
1723 struct v4l2_decoder_cmd cmd;
1724 buffer buf_cap(m2m_q);
1725
1726 memset(&cmd, 0, sizeof(cmd));
1727 cmd.cmd = V4L2_DEC_CMD_STOP;
1728
1729 /* No buffers are queued, call STREAMON, then STOP */
1730 fail_on_test(node->streamon(q.g_type()));
1731 fail_on_test(node_m2m_cap->streamon(m2m_q.g_type()));
1732 fail_on_test(doioctl(node, VIDIOC_DECODER_CMD, &cmd));
1733
1734 fail_on_test(buf_cap.querybuf(node, 0));
1735 fail_on_test(buf_cap.qbuf(node));
1736 fail_on_test(buf_cap.dqbuf(node));
1737 fail_on_test(!(buf_cap.g_flags() & V4L2_BUF_FLAG_LAST));
1738 for (unsigned p = 0; p < buf_cap.g_num_planes(); p++)
1739 fail_on_test(buf_cap.g_bytesused(p));
1740 fail_on_test(node->streamoff(q.g_type()));
1741 fail_on_test(node_m2m_cap->streamoff(m2m_q.g_type()));
1742
1743 /* Call STREAMON, queue one CAPTURE buffer, then STOP */
1744 fail_on_test(node->streamon(q.g_type()));
1745 fail_on_test(node_m2m_cap->streamon(m2m_q.g_type()));
1746 fail_on_test(buf_cap.querybuf(node, 0));
1747 fail_on_test(buf_cap.qbuf(node));
1748 fail_on_test(doioctl(node, VIDIOC_DECODER_CMD, &cmd));
1749
1750 fail_on_test(buf_cap.dqbuf(node));
1751 fail_on_test(!(buf_cap.g_flags() & V4L2_BUF_FLAG_LAST));
1752 for (unsigned p = 0; p < buf_cap.g_num_planes(); p++)
1753 fail_on_test(buf_cap.g_bytesused(p));
1754 fail_on_test(node->streamoff(q.g_type()));
1755 fail_on_test(node_m2m_cap->streamoff(m2m_q.g_type()));
1756 }
1757
1758 if (node->supports_orphaned_bufs) {
1759 fail_on_test(q.reqbufs(node, 0));
1760 q.munmap_bufs(node);
1761 } else if (q.reqbufs(node, 0) != EBUSY) {
1762 // It's either a bug or this driver should set
1763 // V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS
1764 warn("Can free buffers even if still mmap()ed\n");
1765 q.munmap_bufs(node);
1766 } else {
1767 q.munmap_bufs(node);
1768 fail_on_test(q.reqbufs(node, 0));
1769 }
1770
1771 if (node->is_m2m) {
1772 if (node->supports_orphaned_bufs) {
1773 fail_on_test(m2m_q.reqbufs(node, 0));
1774 m2m_q.munmap_bufs(node_m2m_cap);
1775 } else if (m2m_q.reqbufs(node, 0) != EBUSY) {
1776 // It's either a bug or this driver should set
1777 // V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS
1778 warn("Can free buffers even if still mmap()ed\n");
1779 q.munmap_bufs(node);
1780 } else {
1781 m2m_q.munmap_bufs(node_m2m_cap);
1782 fail_on_test(m2m_q.reqbufs(node_m2m_cap, 0));
1783 }
1784 fail_on_test(!capture_count);
1785 }
1786 stream_close();
1787 }
1788 return 0;
1789 }
1790
setupUserPtr(struct node * node,cv4l_queue & q)1791 static int setupUserPtr(struct node *node, cv4l_queue &q)
1792 {
1793 for (unsigned i = 0; i < q.g_buffers(); i++) {
1794 buffer buf(q);
1795 unsigned int flags;
1796 int ret;
1797
1798 fail_on_test(buf.querybuf(node, i));
1799 fail_on_test(buf.check(q, Unqueued, i));
1800
1801 flags = buf.g_flags();
1802 flags |= V4L2_BUF_FLAG_NO_CACHE_INVALIDATE;
1803 flags |= V4L2_BUF_FLAG_NO_CACHE_CLEAN;
1804 buf.s_flags(flags);
1805
1806 for (unsigned p = 0; p < buf.g_num_planes(); p++) {
1807 // This should not work!
1808 fail_on_test(node->mmap(buf.g_length(p), 0) != MAP_FAILED);
1809 }
1810
1811 ret = ENOTTY;
1812 // Try to use VIDIOC_PREPARE_BUF for every other buffer
1813 if ((i & 1) == 0) {
1814 for (unsigned p = 0; p < buf.g_num_planes(); p++)
1815 buf.s_userptr(nullptr, p);
1816 ret = buf.prepare_buf(node);
1817 fail_on_test(!ret);
1818 for (unsigned p = 0; p < buf.g_num_planes(); p++)
1819 buf.s_userptr((char *)0x0eadb000 + buf.g_length(p), p);
1820 ret = buf.prepare_buf(node);
1821 fail_on_test(!ret);
1822 for (unsigned p = 0; p < buf.g_num_planes(); p++)
1823 buf.s_userptr(q.g_userptr(i, p), p);
1824 ret = buf.prepare_buf(node, q);
1825 fail_on_test_val(ret && ret != ENOTTY, ret);
1826
1827 if (ret == 0) {
1828 fail_on_test(buf.querybuf(node, i));
1829 fail_on_test(buf.check(q, Prepared, i));
1830 for (unsigned p = 0; p < buf.g_num_planes(); p++) {
1831 buf.s_userptr(nullptr, p);
1832 buf.s_bytesused(0, p);
1833 buf.s_length(0, p);
1834 }
1835 }
1836 }
1837 if (ret == ENOTTY) {
1838 for (unsigned p = 0; p < buf.g_num_planes(); p++)
1839 buf.s_userptr(nullptr, p);
1840 ret = buf.qbuf(node);
1841 fail_on_test(!ret);
1842
1843 for (unsigned p = 0; p < buf.g_num_planes(); p++)
1844 buf.s_userptr((char *)0x0eadb000 + buf.g_length(p), p);
1845 ret = buf.qbuf(node);
1846 fail_on_test(!ret);
1847
1848 for (unsigned p = 0; p < buf.g_num_planes(); p++)
1849 buf.s_userptr(q.g_userptr(i, p), p);
1850 }
1851
1852 if ((i & 1) == 0)
1853 fail_on_test(buf.qbuf(node));
1854 else
1855 fail_on_test(buf.qbuf(node, q));
1856
1857 for (unsigned p = 0; p < buf.g_num_planes(); p++) {
1858 fail_on_test(buf.g_userptr(p) != q.g_userptr(i, p));
1859 fail_on_test(buf.g_length(p) != q.g_length(p));
1860 if (v4l_type_is_output(q.g_type()))
1861 fail_on_test(!buf.g_bytesused(p));
1862 }
1863 flags = buf.g_flags();
1864 fail_on_test(flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE);
1865 fail_on_test(flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN);
1866 fail_on_test(flags & V4L2_BUF_FLAG_DONE);
1867 fail_on_test(buf.querybuf(node, i));
1868 fail_on_test(buf.check(q, Queued, i));
1869 }
1870 return 0;
1871 }
1872
testUserPtr(struct node * node,struct node * node_m2m_cap,unsigned frame_count,enum poll_mode pollmode)1873 int testUserPtr(struct node *node, struct node *node_m2m_cap, unsigned frame_count,
1874 enum poll_mode pollmode)
1875 {
1876 const __u32 filler = 0xdeadbeef;
1877 bool can_stream = node->g_caps() & V4L2_CAP_STREAMING;
1878 int type;
1879 int ret;
1880
1881 if (!node->valid_buftypes)
1882 return ENOTTY;
1883
1884 buffer_info.clear();
1885 for (type = 0; type <= V4L2_BUF_TYPE_LAST; type++) {
1886 if (!(node->valid_buftypes & (1 << type)))
1887 continue;
1888 if (v4l_type_is_overlay(type))
1889 continue;
1890 if (node->is_m2m && !v4l_type_is_output(type))
1891 continue;
1892
1893 cv4l_queue q(type, V4L2_MEMORY_USERPTR);
1894 cv4l_queue m2m_q(v4l_type_invert(type));
1895
1896 if (testSetupVbi(node, type))
1897 continue;
1898
1899 stream_close();
1900 ret = q.reqbufs(node, 0);
1901 if (ret) {
1902 fail_on_test_val(!can_stream && ret != ENOTTY, ret);
1903 fail_on_test_val(can_stream && ret != EINVAL, ret);
1904 return ENOTTY;
1905 }
1906 fail_on_test(!can_stream);
1907
1908 q.init(type, V4L2_MEMORY_USERPTR);
1909 fail_on_test(q.reqbufs(node, 2));
1910 fail_on_test(node->streamoff(q.g_type()));
1911 last_seq.init();
1912 if (node->is_video)
1913 last_seq.last_field = cur_fmt.g_field();
1914 if (v4l_type_is_output(type))
1915 stream_for_fmt(cur_fmt.g_pixelformat());
1916
1917 __u32 *buffers[q.g_buffers()][q.g_num_planes()];
1918
1919 /*
1920 * The alignment malloc uses depends on the gcc version and
1921 * architecture. Applications compiled for 64-bit all use a
1922 * 16 byte alignment. Applications compiled for 32-bit will
1923 * use an 8 byte alignment if glibc was compiled with gcc
1924 * version 6 or older, and 16 bytes when compiled with a newer
1925 * gcc. This is due to the support for _float128 that gcc
1926 * added in version 7 and that required this alignment change.
1927 *
1928 * Bottom line, in order to test user pointers the assumption
1929 * has to be that the DMA can handle writing just 8 bytes to a
1930 * page, since that's the worst case scenario.
1931 */
1932 for (unsigned i = 0; i < q.g_buffers(); i++) {
1933 for (unsigned p = 0; p < q.g_num_planes(); p++) {
1934 /* ensure that len is a multiple of 4 */
1935 __u32 len = ((q.g_length(p) + 3) & ~0x3) + 4 * 4096;
1936 auto m = static_cast<__u32 *>(malloc(len));
1937
1938 fail_on_test(!m);
1939 fail_on_test((uintptr_t)m & 0x7);
1940 for (__u32 *x = m; x < m + len / 4; x++)
1941 *x = filler;
1942 buffers[i][p] = m;
1943 m = m + 2 * 4096 / 4;
1944 /*
1945 * Put the start of the buffer at the last 8 bytes
1946 * of a page.
1947 */
1948 m = (__u32 *)(((uintptr_t)m & ~0xfff) | 0xff8);
1949 q.s_userptr(i, p, m);
1950 }
1951 }
1952 // captureBufs() will update these values
1953 memset(max_bytesused, 0, sizeof(max_bytesused));
1954 memset(min_data_offset, 0xff, sizeof(min_data_offset));
1955
1956 fail_on_test(setupUserPtr(node, q));
1957
1958 if (node->codec_mask & STATEFUL_DECODER) {
1959 struct v4l2_event_subscription sub = { 0 };
1960
1961 sub.type = V4L2_EVENT_SOURCE_CHANGE;
1962 fail_on_test(doioctl(node, VIDIOC_SUBSCRIBE_EVENT, &sub));
1963 }
1964
1965 fail_on_test(node->streamon(q.g_type()));
1966 fail_on_test(node->streamon(q.g_type()));
1967
1968 unsigned capture_count;
1969
1970 if (node->is_m2m) {
1971 if (node->codec_mask & STATEFUL_DECODER) {
1972 int fd_flags = fcntl(node->g_fd(), F_GETFL);
1973 struct timeval tv = { 1, 0 };
1974 fd_set efds;
1975 v4l2_event ev;
1976
1977 fcntl(node->g_fd(), F_SETFL, fd_flags | O_NONBLOCK);
1978 FD_ZERO(&efds);
1979 FD_SET(node->g_fd(), &efds);
1980 ret = select(node->g_fd() + 1, nullptr, nullptr, &efds, &tv);
1981 fail_on_test_val(ret < 0, ret);
1982 fail_on_test(ret == 0);
1983 fail_on_test(node->dqevent(ev));
1984 fcntl(node->g_fd(), F_SETFL, fd_flags);
1985 fail_on_test(ev.type != V4L2_EVENT_SOURCE_CHANGE);
1986 fail_on_test(!(ev.u.src_change.changes & V4L2_EVENT_SRC_CH_RESOLUTION));
1987 }
1988 fail_on_test(setupM2M(node_m2m_cap, m2m_q));
1989 }
1990 fail_on_test(captureBufs(node, node_m2m_cap, q, m2m_q, frame_count,
1991 pollmode, capture_count));
1992 fail_on_test(node->streamoff(q.g_type()));
1993 fail_on_test(node->streamoff(q.g_type()));
1994 if (node->is_m2m) {
1995 fail_on_test(node_m2m_cap->streamoff(m2m_q.g_type()));
1996 m2m_q.munmap_bufs(node_m2m_cap);
1997 fail_on_test(m2m_q.reqbufs(node_m2m_cap, 0));
1998 fail_on_test(!capture_count);
1999 }
2000 for (unsigned i = 0; i < q.g_buffers(); i++) {
2001 for (unsigned p = 0; p < q.g_num_planes(); p++) {
2002 __u32 buflen = (q.g_length(p) + 3U) & ~3U;
2003 __u32 memlen = buflen + 4 * 4096;
2004 __u32 *m = buffers[i][p];
2005 auto u = static_cast<__u32 *>(q.g_userptr(i, p));
2006
2007 for (__u32 *x = m; x < u; x++)
2008 if (*x != filler)
2009 fail("data at %zd bytes before start of the buffer was touched\n",
2010 (u - x) * 4);
2011
2012 unsigned data_offset = min_data_offset[p];
2013 data_offset = (data_offset + 3U) & ~3U;
2014 if (!v4l_type_is_output(type) && u[data_offset / 4] == filler)
2015 fail("data at data_offset %u was untouched\n", data_offset);
2016
2017 unsigned used = max_bytesused[p];
2018 // Should never happen
2019 fail_on_test(!used);
2020 used = (used + 3U) & ~3U;
2021
2022 for (__u32 *x = u + used / 4; x < u + buflen / 4; x++) {
2023 if (*x == filler)
2024 continue;
2025 warn_once("data from max bytesused %u+%zd to length %u was touched in plane %u\n",
2026 used, (x - u) * 4 - used, buflen, p);
2027 break;
2028 }
2029 for (__u32 *x = u + buflen / 4; x < m + memlen / 4; x++)
2030 if (*x != filler)
2031 fail("data at %zd bytes after the end of the buffer was touched\n",
2032 (x - (u + buflen / 4)) * 4);
2033 free(m);
2034 q.s_userptr(i, p, nullptr);
2035 }
2036 }
2037 stream_close();
2038 }
2039 return 0;
2040 }
2041
setupDmaBuf(struct node * expbuf_node,struct node * node,cv4l_queue & q,cv4l_queue & exp_q)2042 static int setupDmaBuf(struct node *expbuf_node, struct node *node,
2043 cv4l_queue &q, cv4l_queue &exp_q)
2044 {
2045 fail_on_test(exp_q.reqbufs(expbuf_node, q.g_buffers()));
2046 fail_on_test(exp_q.g_buffers() < q.g_buffers());
2047 fail_on_test(exp_q.export_bufs(expbuf_node, exp_q.g_type()));
2048
2049 for (unsigned i = 0; i < q.g_buffers(); i++) {
2050 buffer buf(q);
2051 int ret;
2052
2053 fail_on_test(buf.querybuf(node, i));
2054 fail_on_test(buf.check(q, Unqueued, i));
2055 fail_on_test(exp_q.g_num_planes() < buf.g_num_planes());
2056 for (unsigned p = 0; p < buf.g_num_planes(); p++) {
2057 if (exp_q.g_length(p) < buf.g_length(p))
2058 return fail("exp_q.g_length(%u) < buf.g_length(%u): %u < %u\n",
2059 p, p, exp_q.g_length(p), buf.g_length(p));
2060 // This should not work!
2061 fail_on_test(node->mmap(buf.g_length(p), 0) != MAP_FAILED);
2062 q.s_fd(i, p, exp_q.g_fd(i, p));
2063 }
2064
2065 for (unsigned p = 0; p < buf.g_num_planes(); p++)
2066 buf.s_fd(0xdeadbeef + q.g_fd(i, p), p);
2067 ret = buf.prepare_buf(node);
2068 fail_on_test(!ret);
2069 }
2070 fail_on_test(q.mmap_bufs(node));
2071 for (unsigned i = 0; i < q.g_buffers(); i++) {
2072 buffer buf(q);
2073 unsigned int flags;
2074 int ret;
2075
2076 buf.init(q, i);
2077 fail_on_test(buf.querybuf(node, i));
2078 for (unsigned p = 0; p < buf.g_num_planes(); p++)
2079 buf.s_fd(q.g_fd(i, p), p);
2080 flags = buf.g_flags();
2081 flags |= V4L2_BUF_FLAG_NO_CACHE_INVALIDATE;
2082 flags |= V4L2_BUF_FLAG_NO_CACHE_CLEAN;
2083 buf.s_flags(flags);
2084 ret = buf.prepare_buf(node, q);
2085 if (ret != ENOTTY) {
2086 fail_on_test_val(ret, ret);
2087 fail_on_test(buf.querybuf(node, i));
2088 fail_on_test(buf.check(q, Prepared, i));
2089 for (unsigned p = 0; p < buf.g_num_planes(); p++) {
2090 buf.s_fd(-1, p);
2091 buf.s_bytesused(0, p);
2092 buf.s_length(0, p);
2093 }
2094 }
2095
2096 fail_on_test(buf.qbuf(node, false));
2097 for (unsigned p = 0; p < buf.g_num_planes(); p++) {
2098 fail_on_test(buf.g_fd(p) != q.g_fd(i, p));
2099 fail_on_test(buf.g_length(p) != q.g_length(p));
2100 if (v4l_type_is_output(q.g_type()))
2101 fail_on_test(!buf.g_bytesused(p));
2102 }
2103 flags = buf.g_flags();
2104
2105 /* Make sure that flags are cleared */
2106 fail_on_test(flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE);
2107 fail_on_test(flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN);
2108 fail_on_test(flags & V4L2_BUF_FLAG_DONE);
2109 fail_on_test(buf.querybuf(node, i));
2110 fail_on_test(buf.check(q, Queued, i));
2111 }
2112 return 0;
2113 }
2114
testDmaBuf(struct node * expbuf_node,struct node * node,struct node * node_m2m_cap,unsigned frame_count,enum poll_mode pollmode)2115 int testDmaBuf(struct node *expbuf_node, struct node *node, struct node *node_m2m_cap,
2116 unsigned frame_count, enum poll_mode pollmode)
2117 {
2118 bool can_stream = node->g_caps() & V4L2_CAP_STREAMING;
2119 int expbuf_type, type;
2120 int ret;
2121
2122 if (!node->valid_buftypes)
2123 return ENOTTY;
2124
2125 buffer_info.clear();
2126 for (type = 0; type <= V4L2_BUF_TYPE_LAST; type++) {
2127 if (!(node->valid_buftypes & (1 << type)))
2128 continue;
2129 if (v4l_type_is_sdr(type))
2130 continue;
2131 if (v4l_type_is_overlay(type))
2132 continue;
2133 if (node->is_m2m && !v4l_type_is_output(type))
2134 continue;
2135
2136 if (expbuf_node->g_caps() & V4L2_CAP_VIDEO_CAPTURE_MPLANE)
2137 expbuf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
2138 else if (expbuf_node->g_caps() & V4L2_CAP_VIDEO_CAPTURE)
2139 expbuf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
2140 else if (expbuf_node->g_caps() & V4L2_CAP_VIDEO_OUTPUT_MPLANE)
2141 expbuf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
2142 else
2143 expbuf_type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
2144
2145 cv4l_queue q(type, V4L2_MEMORY_DMABUF);
2146 cv4l_queue m2m_q(v4l_type_invert(type));
2147 cv4l_queue exp_q(expbuf_type, V4L2_MEMORY_MMAP);
2148
2149 if (testSetupVbi(node, type))
2150 continue;
2151
2152 stream_close();
2153 ret = q.reqbufs(node, 0);
2154 if (ret) {
2155 fail_on_test_val(!can_stream && ret != ENOTTY, ret);
2156 fail_on_test_val(can_stream && ret != EINVAL, ret);
2157 return ENOTTY;
2158 }
2159 fail_on_test(!can_stream);
2160
2161 fail_on_test(q.reqbufs(node, 2));
2162 fail_on_test(node->streamoff(q.g_type()));
2163 last_seq.init();
2164 if (node->is_video)
2165 last_seq.last_field = cur_fmt.g_field();
2166 if (v4l_type_is_output(type))
2167 stream_for_fmt(cur_fmt.g_pixelformat());
2168
2169 fail_on_test(setupDmaBuf(expbuf_node, node, q, exp_q));
2170
2171 if (node->codec_mask & STATEFUL_DECODER) {
2172 struct v4l2_event_subscription sub = { 0 };
2173
2174 sub.type = V4L2_EVENT_SOURCE_CHANGE;
2175 fail_on_test(doioctl(node, VIDIOC_SUBSCRIBE_EVENT, &sub));
2176 }
2177
2178 fail_on_test(node->streamon(q.g_type()));
2179 fail_on_test(node->streamon(q.g_type()));
2180
2181 unsigned capture_count;
2182
2183 if (node->is_m2m) {
2184 if (node->codec_mask & STATEFUL_DECODER) {
2185 int fd_flags = fcntl(node->g_fd(), F_GETFL);
2186 struct timeval tv = { 1, 0 };
2187 fd_set efds;
2188 v4l2_event ev;
2189
2190 fcntl(node->g_fd(), F_SETFL, fd_flags | O_NONBLOCK);
2191 FD_ZERO(&efds);
2192 FD_SET(node->g_fd(), &efds);
2193 ret = select(node->g_fd() + 1, nullptr, nullptr, &efds, &tv);
2194 fail_on_test_val(ret < 0, ret);
2195 fail_on_test(ret == 0);
2196 fail_on_test(node->dqevent(ev));
2197 fcntl(node->g_fd(), F_SETFL, fd_flags);
2198 fail_on_test(ev.type != V4L2_EVENT_SOURCE_CHANGE);
2199 fail_on_test(!(ev.u.src_change.changes & V4L2_EVENT_SRC_CH_RESOLUTION));
2200 }
2201 fail_on_test(setupM2M(node_m2m_cap, m2m_q));
2202 }
2203 fail_on_test(captureBufs(node, node_m2m_cap, q, m2m_q, frame_count,
2204 pollmode, capture_count));
2205 fail_on_test(node->streamoff(q.g_type()));
2206 fail_on_test(node->streamoff(q.g_type()));
2207 if (node->supports_orphaned_bufs) {
2208 fail_on_test(q.reqbufs(node, 0));
2209 exp_q.close_exported_fds();
2210 } else if (q.reqbufs(node, 0) != EBUSY) {
2211 // It's either a bug or this driver should set
2212 // V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS
2213 warn("Can free buffers even if exported DMABUF fds still open\n");
2214 q.munmap_bufs(node);
2215 } else {
2216 exp_q.close_exported_fds();
2217 fail_on_test(q.reqbufs(node, 0));
2218 }
2219 if (node->is_m2m) {
2220 fail_on_test(node_m2m_cap->streamoff(m2m_q.g_type()));
2221 if (node_m2m_cap->supports_orphaned_bufs) {
2222 fail_on_test(m2m_q.reqbufs(node, 0));
2223 m2m_q.munmap_bufs(node_m2m_cap);
2224 } else if (m2m_q.reqbufs(node_m2m_cap, 0) != EBUSY) {
2225 // It's either a bug or this driver should set
2226 // V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS
2227 warn("Can free buffers even if still mmap()ed\n");
2228 q.munmap_bufs(node);
2229 } else {
2230 m2m_q.munmap_bufs(node_m2m_cap);
2231 fail_on_test(m2m_q.reqbufs(node_m2m_cap, 0));
2232 }
2233 fail_on_test(!capture_count);
2234 }
2235 stream_close();
2236 }
2237 return 0;
2238 }
2239
testRequests(struct node * node,bool test_streaming)2240 int testRequests(struct node *node, bool test_streaming)
2241 {
2242 filehandles fhs;
2243 int media_fd = fhs.add(mi_get_media_fd(node->g_fd(), node->bus_info));
2244 int req_fd;
2245 struct test_query_ext_ctrl valid_qctrl;
2246 v4l2_ext_controls ctrls;
2247 v4l2_ext_control ctrl;
2248 v4l2_ext_control vivid_ro_ctrl = {
2249 .id = VIVID_CID_RO_INTEGER,
2250 };
2251 v4l2_ext_controls vivid_ro_ctrls = {};
2252 // Note: the vivid dynamic array has range 10-90
2253 // and the maximum number of elements is 100.
2254 __u32 vivid_dyn_array[101] = {};
2255 // Initialize with these values
2256 static const __u32 vivid_dyn_array_init[16] = {
2257 6, 12, 18, 24, 30, 36, 42, 48,
2258 54, 60, 66, 72, 78, 84, 90, 96
2259 };
2260 // This is the clamped version to compare against
2261 static const __u32 vivid_dyn_array_clamped[16] = {
2262 10, 12, 18, 24, 30, 36, 42, 48,
2263 54, 60, 66, 72, 78, 84, 90, 90
2264 };
2265 const unsigned elem_size = sizeof(vivid_dyn_array[0]);
2266 v4l2_ext_control vivid_dyn_array_ctrl = {
2267 .id = VIVID_CID_U32_DYN_ARRAY,
2268 };
2269 v4l2_ext_controls vivid_dyn_array_ctrls = {};
2270 unsigned vivid_pixel_array_size = 0;
2271 v4l2_ext_control vivid_pixel_array_ctrl = {
2272 .id = VIVID_CID_U8_PIXEL_ARRAY,
2273 };
2274 v4l2_ext_controls vivid_pixel_array_ctrls = {};
2275 bool have_controls;
2276 int ret;
2277
2278 // Note: trying to initialize vivid_ro_ctrls as was done for
2279 // vivid_ro_ctrl fails with gcc 7 with this error:
2280 // sorry, unimplemented: non-trivial designated initializers not supported
2281 // So just set this struct the old-fashioned way.
2282 vivid_ro_ctrls.which = V4L2_CTRL_WHICH_REQUEST_VAL;
2283 vivid_ro_ctrls.count = 1;
2284 vivid_ro_ctrls.controls = &vivid_ro_ctrl;
2285
2286 if (is_vivid) {
2287 v4l2_query_ext_ctrl qec = { .id = VIVID_CID_U8_PIXEL_ARRAY };
2288 node->query_ext_ctrl(qec);
2289 vivid_pixel_array_size = qec.elems;
2290 }
2291 __u8 vivid_pixel_array[vivid_pixel_array_size + 1];
2292 vivid_pixel_array[vivid_pixel_array_size] = 0xff;
2293 vivid_pixel_array_ctrl.size = vivid_pixel_array_size;
2294 vivid_pixel_array_ctrl.p_u8 = vivid_pixel_array;
2295
2296 // If requests are supported, then there must be a media device
2297 if (node->buf_caps & V4L2_BUF_CAP_SUPPORTS_REQUESTS)
2298 fail_on_test(media_fd < 0);
2299
2300 // Check if the driver has controls that can be used to test requests
2301 memset(&valid_qctrl, 0, sizeof(valid_qctrl));
2302 memset(&ctrls, 0, sizeof(ctrls));
2303 memset(&ctrl, 0, sizeof(ctrl));
2304 for (auto &control : node->controls) {
2305 test_query_ext_ctrl &qctrl = control.second;
2306
2307 if (qctrl.type != V4L2_CTRL_TYPE_INTEGER &&
2308 qctrl.type != V4L2_CTRL_TYPE_BOOLEAN)
2309 continue;
2310 if (qctrl.flags & V4L2_CTRL_FLAG_WRITE_ONLY ||
2311 qctrl.flags & V4L2_CTRL_FLAG_READ_ONLY)
2312 continue;
2313 if (is_vivid && V4L2_CTRL_ID2WHICH(qctrl.id) == V4L2_CTRL_CLASS_VIVID)
2314 continue;
2315 if (qctrl.minimum != qctrl.maximum) {
2316 valid_qctrl = qctrl;
2317 ctrl.id = qctrl.id;
2318 break;
2319 }
2320 }
2321
2322 if (ctrl.id == 0) {
2323 info("could not test the Request API, no suitable control found\n");
2324 return (node->buf_caps & V4L2_BUF_CAP_SUPPORTS_REQUESTS) ?
2325 0 : ENOTTY;
2326 }
2327
2328 // Test if V4L2_CTRL_WHICH_REQUEST_VAL is supported
2329 ctrls.which = V4L2_CTRL_WHICH_REQUEST_VAL;
2330 ret = doioctl(node, VIDIOC_G_EXT_CTRLS, &ctrls);
2331 fail_on_test_val(ret != EINVAL && ret != EBADR && ret != ENOTTY, ret);
2332 have_controls = ret != ENOTTY;
2333
2334 if (media_fd < 0 || ret == EBADR) {
2335 // Should not happen if requests are supported
2336 fail_on_test(node->buf_caps & V4L2_BUF_CAP_SUPPORTS_REQUESTS);
2337 return ENOTTY;
2338 }
2339 if (have_controls) {
2340 ctrls.request_fd = 10;
2341 // Test that querying controls with an invalid request_fd
2342 // returns EINVAL
2343 fail_on_test(doioctl(node, VIDIOC_G_EXT_CTRLS, &ctrls) != EINVAL);
2344 }
2345 ret = doioctl_fd(media_fd, MEDIA_IOC_REQUEST_ALLOC, &req_fd);
2346 if (ret == ENOTTY) {
2347 // Should not happen if requests are supported
2348 fail_on_test(node->buf_caps & V4L2_BUF_CAP_SUPPORTS_REQUESTS);
2349 return ENOTTY;
2350 }
2351 // Check that a request was allocated with a valid fd
2352 fail_on_test_val(ret, ret);
2353 fail_on_test(req_fd < 0);
2354 fhs.add(req_fd);
2355 if (have_controls) {
2356 ctrls.request_fd = req_fd;
2357 // The request is in unqueued state, so this should return EACCES
2358 fail_on_test(doioctl(node, VIDIOC_G_EXT_CTRLS, &ctrls) != EACCES);
2359 }
2360 // You cannot queue a request that has no buffer
2361 fail_on_test(doioctl_fd(req_fd, MEDIA_REQUEST_IOC_QUEUE, nullptr) != ENOENT);
2362 // REINIT must work for an unqueued request
2363 fail_on_test(doioctl_fd(req_fd, MEDIA_REQUEST_IOC_REINIT, nullptr));
2364 // Close media_fd
2365 fhs.del(media_fd);
2366 // This should have no effect on req_fd
2367 fail_on_test(doioctl_fd(req_fd, MEDIA_REQUEST_IOC_QUEUE, nullptr) != ENOENT);
2368 fail_on_test(doioctl_fd(req_fd, MEDIA_REQUEST_IOC_REINIT, nullptr));
2369 // Close req_fd
2370 fhs.del(req_fd);
2371 // G_EXT_CTRLS must now return EINVAL for req_fd since it no longer exists
2372 if (have_controls)
2373 fail_on_test(doioctl(node, VIDIOC_G_EXT_CTRLS, &ctrls) != EINVAL);
2374 // And the media request ioctls now must return EBADF
2375 fail_on_test(doioctl_fd(req_fd, MEDIA_REQUEST_IOC_QUEUE, nullptr) != EBADF);
2376 fail_on_test(doioctl_fd(req_fd, MEDIA_REQUEST_IOC_REINIT, nullptr) != EBADF);
2377
2378 // Open media_fd and alloc a request again
2379 media_fd = fhs.add(mi_get_media_fd(node->g_fd(), node->bus_info));
2380 fail_on_test(doioctl_fd(media_fd, MEDIA_IOC_REQUEST_ALLOC, &req_fd));
2381 fhs.add(req_fd);
2382 ctrls.count = 1;
2383 ctrls.controls = &ctrl;
2384 if (have_controls) {
2385 ctrl.value = valid_qctrl.minimum;
2386 ctrls.which = 0;
2387 // Set control without requests
2388 fail_on_test(doioctl(node, VIDIOC_S_EXT_CTRLS, &ctrls));
2389 ctrl.value = valid_qctrl.maximum;
2390 ctrls.which = V4L2_CTRL_WHICH_REQUEST_VAL;
2391 ctrls.request_fd = req_fd;
2392 // Set control for a request
2393 fail_on_test(doioctl(node, VIDIOC_S_EXT_CTRLS, &ctrls));
2394 ctrl.value = valid_qctrl.minimum;
2395 ctrls.request_fd = req_fd;
2396 // But you cannot get the value of an unqueued request
2397 fail_on_test(doioctl(node, VIDIOC_G_EXT_CTRLS, &ctrls) != EACCES);
2398 ctrls.which = 0;
2399 // But you can without a request
2400 fail_on_test(doioctl(node, VIDIOC_G_EXT_CTRLS, &ctrls));
2401 fail_on_test(ctrl.value != valid_qctrl.minimum);
2402 ctrls.request_fd = req_fd;
2403 ctrls.which = V4L2_CTRL_WHICH_REQUEST_VAL;
2404 ctrl.id = 1;
2405 // Setting an invalid control in a request must fail
2406 fail_on_test(!doioctl(node, VIDIOC_S_EXT_CTRLS, &ctrls));
2407 // And also when trying to read an invalid control of a request
2408 fail_on_test(doioctl(node, VIDIOC_G_EXT_CTRLS, &ctrls) != EACCES);
2409 }
2410 ctrl.id = valid_qctrl.id;
2411 // Close req_fd and media_fd and reopen device node
2412 fhs.del(req_fd);
2413 fhs.del(media_fd);
2414 node->reopen();
2415
2416 int type = node->g_type();
2417 // For m2m devices g_type() will return the capture type, so
2418 // we need to invert it to get the output type.
2419 // At the moment only the output type of an m2m device can use
2420 // requests.
2421 if (node->is_m2m)
2422 type = v4l_type_invert(type);
2423 if (v4l_type_is_vbi(type)) {
2424 cv4l_fmt fmt;
2425
2426 node->g_fmt(fmt, type);
2427 node->s_fmt(fmt, type);
2428 }
2429
2430 if (!(node->valid_buftypes & (1 << type))) {
2431 // If the type is not supported, then check that requests
2432 // are also not supported.
2433 fail_on_test(node->buf_caps & V4L2_BUF_CAP_SUPPORTS_REQUESTS);
2434 return ENOTTY;
2435 }
2436 bool supports_requests = node->buf_caps & V4L2_BUF_CAP_SUPPORTS_REQUESTS;
2437
2438 buffer_info.clear();
2439
2440 cv4l_queue q(type, V4L2_MEMORY_MMAP);
2441 // For m2m devices q is the output queue and m2m_q is the capture queue
2442 cv4l_queue m2m_q(v4l_type_invert(type));
2443
2444 q.init(type, V4L2_MEMORY_MMAP);
2445 fail_on_test(q.reqbufs(node, 15));
2446
2447 unsigned min_bufs = q.g_buffers();
2448 fail_on_test(q.reqbufs(node, min_bufs + 6));
2449 unsigned num_bufs = q.g_buffers();
2450 // Create twice as many requests as there are buffers
2451 unsigned num_requests = 2 * num_bufs;
2452 last_seq.init();
2453
2454 media_fd = fhs.add(mi_get_media_fd(node->g_fd(), node->bus_info));
2455
2456 // Allocate the requests
2457 for (unsigned i = 0; i < num_requests; i++) {
2458 fail_on_test(doioctl_fd(media_fd, MEDIA_IOC_REQUEST_ALLOC, &buf_req_fds[i]));
2459 fhs.add(buf_req_fds[i]);
2460 fail_on_test(buf_req_fds[i] < 0);
2461 // Check that empty requests can't be queued
2462 fail_on_test(!doioctl_fd(buf_req_fds[i], MEDIA_REQUEST_IOC_QUEUE, nullptr));
2463 }
2464 // close the media fd, should not be needed anymore
2465 fhs.del(media_fd);
2466
2467 buffer buf(q);
2468
2469 fail_on_test(buf.querybuf(node, 0));
2470 // Queue a buffer without using requests
2471 ret = buf.qbuf(node);
2472 // If this fails, then that can only happen if the queue
2473 // requires requests. In that case EBADR is returned.
2474 fail_on_test_val(ret && ret != EBADR, ret);
2475 fail_on_test(buf.querybuf(node, 1));
2476 // Now try to queue the buffer to the request
2477 buf.s_flags(V4L2_BUF_FLAG_REQUEST_FD);
2478 buf.s_request_fd(buf_req_fds[1]);
2479 // If requests are required, then this must now work
2480 // If requests are optional, then this must now fail since the
2481 // queue in is non-request mode.
2482 if (ret == EBADR)
2483 fail_on_test(buf.qbuf(node));
2484 else
2485 fail_on_test(!buf.qbuf(node));
2486
2487 // Reopen device node, clearing any pending requests
2488 node->reopen();
2489
2490 q.init(type, V4L2_MEMORY_MMAP);
2491 fail_on_test(q.reqbufs(node, num_bufs));
2492
2493 if (node->is_m2m) {
2494 // Setup the capture queue
2495 fail_on_test(m2m_q.reqbufs(node, 2));
2496 fail_on_test(node->streamon(m2m_q.g_type()));
2497
2498 buffer buf(m2m_q);
2499
2500 fail_on_test(buf.querybuf(node, 0));
2501 buf.s_flags(V4L2_BUF_FLAG_REQUEST_FD);
2502 buf.s_request_fd(buf_req_fds[0]);
2503 // Only the output queue can support requests,
2504 // so if the capture queue also supports requests,
2505 // then something is wrong.
2506 fail_on_test(!buf.qbuf(node));
2507 fail_on_test(node->streamoff(m2m_q.g_type()));
2508 fail_on_test(m2m_q.reqbufs(node, 0));
2509
2510 fail_on_test(setupM2M(node, m2m_q));
2511 }
2512
2513 ctrls.which = V4L2_CTRL_WHICH_REQUEST_VAL;
2514 // Test queuing buffers...
2515 for (unsigned i = 0; i < num_bufs; i++) {
2516 buffer buf(q);
2517
2518 fail_on_test(buf.querybuf(node, i));
2519 // No request was set, so this should return 0
2520 fail_on_test(buf.g_request_fd());
2521 buf.s_flags(V4L2_BUF_FLAG_REQUEST_FD);
2522 if (i == 0) {
2523 buf.s_request_fd(-1);
2524 // Can't queue to an invalid request fd
2525 fail_on_test(!buf.qbuf(node));
2526 buf.s_request_fd(0xdead);
2527 fail_on_test(!buf.qbuf(node));
2528 }
2529 buf.s_request_fd(buf_req_fds[i]);
2530 if (v4l_type_is_video(buf.g_type()))
2531 buf.s_field(V4L2_FIELD_ANY);
2532 if (!(i & 1)) {
2533 // VIDIOC_PREPARE_BUF is incompatible with requests
2534 fail_on_test(buf.prepare_buf(node) != EINVAL);
2535 buf.s_flags(0);
2536 // Test vivid error injection
2537 if (node->inject_error(VIVID_CID_BUF_PREPARE_ERROR))
2538 fail_on_test(buf.prepare_buf(node) != EINVAL);
2539 fail_on_test(buf.prepare_buf(node));
2540 fail_on_test(buf.querybuf(node, i));
2541 // Check that the buffer was prepared
2542 fail_on_test(!(buf.g_flags() & V4L2_BUF_FLAG_PREPARED));
2543 buf.s_flags(buf.g_flags() | V4L2_BUF_FLAG_REQUEST_FD);
2544 buf.s_request_fd(buf_req_fds[i]);
2545 }
2546 // Queue the buffer to the request
2547 int err = buf.qbuf(node);
2548 if (!err) {
2549 // If requests are not supported, this should fail
2550 fail_on_test(!supports_requests);
2551 // You can't queue the same buffer again
2552 fail_on_test(!buf.qbuf(node));
2553 } else {
2554 // Can only fail if requests are not supported
2555 fail_on_test(supports_requests);
2556 // and should fail with EBADR in that case
2557 fail_on_test(err != EBADR);
2558 }
2559 if (err) {
2560 // Requests are not supported, so clean up and return
2561 fail_on_test(node->streamoff(q.g_type()));
2562 fail_on_test(q.reqbufs(node, 0));
2563 if (node->is_m2m) {
2564 fail_on_test(node->streamoff(m2m_q.g_type()));
2565 m2m_q.munmap_bufs(node);
2566 fail_on_test(m2m_q.reqbufs(node, 0));
2567 }
2568 node->reopen();
2569 return ENOTTY;
2570 }
2571 // Check flags and request fd
2572 fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_DONE);
2573 fail_on_test(!(buf.g_flags() & V4L2_BUF_FLAG_IN_REQUEST));
2574 fail_on_test(!(buf.g_flags() & V4L2_BUF_FLAG_REQUEST_FD));
2575 fail_on_test(buf.g_request_fd() < 0);
2576 // Query the buffer again
2577 fail_on_test(buf.querybuf(node, i));
2578 // Check returned flags and request fd
2579 fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_DONE);
2580 fail_on_test(!(buf.g_flags() & V4L2_BUF_FLAG_IN_REQUEST));
2581 fail_on_test(!(buf.g_flags() & V4L2_BUF_FLAG_REQUEST_FD));
2582 fail_on_test(buf.g_request_fd() < 0);
2583 if (i & 1)
2584 fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_PREPARED);
2585 else
2586 fail_on_test(!(buf.g_flags() & V4L2_BUF_FLAG_PREPARED));
2587 // Check that you can't queue it again
2588 buf.s_request_fd(buf_req_fds[i]);
2589 fail_on_test(!buf.qbuf(node));
2590
2591 // Set a control in the request, except for every third request.
2592 ctrl.value = (i & 1) ? valid_qctrl.maximum : valid_qctrl.minimum;
2593 ctrls.request_fd = buf_req_fds[i];
2594 if (i % 3 < 2)
2595 fail_on_test(doioctl(node, VIDIOC_S_EXT_CTRLS, &ctrls));
2596 if (is_vivid) {
2597 // For vivid, check modifiable array support
2598 memset(vivid_pixel_array, i, vivid_pixel_array_size);
2599 vivid_pixel_array_ctrls.which = V4L2_CTRL_WHICH_REQUEST_VAL;
2600 vivid_pixel_array_ctrls.count = 1;
2601 vivid_pixel_array_ctrls.controls = &vivid_pixel_array_ctrl;
2602 vivid_pixel_array_ctrls.request_fd = buf_req_fds[i];
2603 fail_on_test(doioctl(node, VIDIOC_S_EXT_CTRLS,
2604 &vivid_pixel_array_ctrls));
2605 fail_on_test(vivid_pixel_array[vivid_pixel_array_size] != 0xff);
2606
2607 // For vivid, check dynamic array support:
2608 vivid_dyn_array_ctrl.size = sizeof(vivid_dyn_array);
2609 vivid_dyn_array_ctrl.p_u32 = vivid_dyn_array;
2610 memset(vivid_dyn_array, 0xff, sizeof(vivid_dyn_array));
2611 vivid_dyn_array_ctrls.which = V4L2_CTRL_WHICH_REQUEST_VAL;
2612 vivid_dyn_array_ctrls.count = 1;
2613 vivid_dyn_array_ctrls.controls = &vivid_dyn_array_ctrl;
2614 vivid_dyn_array_ctrls.request_fd = buf_req_fds[i];
2615 // vivid_dyn_array_ctrl.size is too large, must return ENOSPC
2616 fail_on_test(doioctl(node, VIDIOC_S_EXT_CTRLS,
2617 &vivid_dyn_array_ctrls) != ENOSPC);
2618 // and size is set at 100 elems
2619 fail_on_test(vivid_dyn_array_ctrl.size != 100 * elem_size);
2620 // Check that the array is not overwritten
2621 fail_on_test(vivid_dyn_array[0] != 0xffffffff);
2622 if (i % 3 < 2) {
2623 unsigned size = (2 + 2 * (i % 8)) * elem_size;
2624
2625 // Set proper size, varies per request
2626 vivid_dyn_array_ctrl.size = size;
2627 memcpy(vivid_dyn_array, vivid_dyn_array_init, size);
2628 fail_on_test(doioctl(node, VIDIOC_S_EXT_CTRLS,
2629 &vivid_dyn_array_ctrls));
2630 // check that the size is as expected
2631 fail_on_test(vivid_dyn_array_ctrl.size != size);
2632 // and the array values are correctly clamped
2633 fail_on_test(memcmp(vivid_dyn_array, vivid_dyn_array_clamped, size));
2634 // and the end of the array is not overwritten
2635 fail_on_test(vivid_dyn_array[size / elem_size] != 0xffffffff);
2636 }
2637 }
2638 // Re-init the unqueued request
2639 fail_on_test(doioctl_fd(buf_req_fds[i], MEDIA_REQUEST_IOC_REINIT, nullptr));
2640
2641 // Make sure that the buffer is no longer in a request
2642 fail_on_test(buf.querybuf(node, i));
2643 fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_IN_REQUEST);
2644 fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_REQUEST_FD);
2645
2646 // Set the control again
2647 ctrls.request_fd = buf_req_fds[i];
2648 if (i % 3 < 2)
2649 fail_on_test(doioctl(node, VIDIOC_S_EXT_CTRLS, &ctrls));
2650 if (is_vivid && i % 3 < 2) {
2651 // Set the pixel array control again
2652 memset(vivid_pixel_array, i, vivid_pixel_array_size);
2653 vivid_pixel_array_ctrls.request_fd = buf_req_fds[i];
2654 fail_on_test(doioctl(node, VIDIOC_S_EXT_CTRLS,
2655 &vivid_pixel_array_ctrls));
2656 // Set the dynamic array control again
2657 vivid_dyn_array_ctrls.request_fd = buf_req_fds[i];
2658 vivid_dyn_array_ctrl.size = (2 + 2 * (i % 8)) * elem_size;
2659 memcpy(vivid_dyn_array, vivid_dyn_array_init,
2660 sizeof(vivid_dyn_array_init));
2661 fail_on_test(doioctl(node, VIDIOC_S_EXT_CTRLS,
2662 &vivid_dyn_array_ctrls));
2663 }
2664
2665 // After the re-init the buffer is no longer marked for
2666 // a request. If a request has been queued before (hence
2667 // the 'if (i)' check), then queuing the buffer without
2668 // a request must fail since you can't mix the two streamining
2669 // models.
2670 if (i)
2671 fail_on_test(!buf.qbuf(node));
2672 buf.s_flags(buf.g_flags() | V4L2_BUF_FLAG_REQUEST_FD);
2673 buf.s_request_fd(buf_req_fds[i]);
2674 buf.s_field(V4L2_FIELD_ANY);
2675 // Queue the buffer for the request
2676 fail_on_test(buf.qbuf(node));
2677 // Verify that drivers will replace FIELD_ANY for video output queues
2678 if (v4l_type_is_video(buf.g_type()) && v4l_type_is_output(buf.g_type()))
2679 fail_on_test(buf.g_field() == V4L2_FIELD_ANY);
2680 // Query buffer and check that it is marked as being part of a request
2681 fail_on_test(buf.querybuf(node, i));
2682 fail_on_test(!(buf.g_flags() & V4L2_BUF_FLAG_IN_REQUEST));
2683 fail_on_test(!(buf.g_flags() & V4L2_BUF_FLAG_REQUEST_FD));
2684 // Use vivid to check buffer prepare or request validation error injection
2685 if ((i & 1) && node->inject_error(i > num_bufs / 2 ?
2686 VIVID_CID_BUF_PREPARE_ERROR :
2687 VIVID_CID_REQ_VALIDATE_ERROR))
2688 fail_on_test(doioctl_fd(buf_req_fds[i],
2689 MEDIA_REQUEST_IOC_QUEUE, nullptr) != EINVAL);
2690 // Queue the request
2691 ret = doioctl_fd(buf_req_fds[i], MEDIA_REQUEST_IOC_QUEUE, nullptr);
2692 if (node->codec_mask & STATELESS_DECODER) {
2693 // Stateless decoders might require that certain
2694 // controls are present in the request. In that
2695 // case they return ENOENT and we just stop testing
2696 // since we don't know what those controls are.
2697 fail_on_test_val(ret != ENOENT, ret);
2698 test_streaming = false;
2699 break;
2700 }
2701 fail_on_test_val(ret, ret);
2702 fail_on_test(buf.querybuf(node, i));
2703 // Check that the buffer is now queued up
2704 fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_IN_REQUEST);
2705 fail_on_test(!(buf.g_flags() & V4L2_BUF_FLAG_REQUEST_FD));
2706 fail_on_test(!(buf.g_flags() & V4L2_BUF_FLAG_QUEUED));
2707 // Re-initing or requeuing the request is no longer possible
2708 fail_on_test(doioctl_fd(buf_req_fds[i], MEDIA_REQUEST_IOC_REINIT, nullptr) != EBUSY);
2709 fail_on_test(doioctl_fd(buf_req_fds[i], MEDIA_REQUEST_IOC_QUEUE, nullptr) != EBUSY);
2710 if (i >= min_bufs) {
2711 // Close some of the request fds to check that this
2712 // is safe to do
2713 close(buf_req_fds[i]);
2714 buf_req_fds[i] = -1;
2715 }
2716 if (i == min_bufs - 1) {
2717 // Check vivid STREAMON error injection
2718 if (node->inject_error(VIVID_CID_START_STR_ERROR))
2719 fail_on_test(!node->streamon(q.g_type()));
2720 fail_on_test(node->streamon(q.g_type()));
2721 }
2722 }
2723
2724 fail_on_test(node->g_fmt(cur_fmt, q.g_type()));
2725
2726 if (test_streaming) {
2727 unsigned capture_count;
2728
2729 // Continue streaming
2730 // For m2m devices captureBufs() behaves a bit odd: you pass
2731 // in the total number of output buffers that you want to
2732 // stream, but since there are already q.g_buffers() output
2733 // buffers queued up (see previous loop), the captureBufs()
2734 // function will subtract that from frame_count, so it will
2735 // only queue frame_count - q.g_buffers() output buffers.
2736 // In order to ensure we captured at least
2737 // min_bufs buffers we need to add min_bufs to the frame_count.
2738 fail_on_test(captureBufs(node, node, q, m2m_q,
2739 num_bufs + (node->is_m2m ? min_bufs : 0),
2740 POLL_MODE_SELECT, capture_count));
2741 }
2742 fail_on_test(node->streamoff(q.g_type()));
2743
2744 // Note that requests min_bufs...2*min_bufs-1 close their filehandles,
2745 // so here we just go through the first half of the requests.
2746 for (unsigned i = 0; test_streaming && i < min_bufs; i++) {
2747 buffer buf(q);
2748
2749 // Get the control
2750 ctrls.request_fd = buf_req_fds[i];
2751 fail_on_test(doioctl(node, VIDIOC_G_EXT_CTRLS, &ctrls));
2752 bool is_max = i & 1;
2753 // Since the control was not set for every third request,
2754 // the value will actually be that of the previous request.
2755 if (i % 3 == 2)
2756 is_max = !is_max;
2757 // Check that the value is as expected
2758 fail_on_test(ctrl.value != (is_max ? valid_qctrl.maximum :
2759 valid_qctrl.minimum));
2760 if (is_vivid) {
2761 // vivid specific: check that the read-only control
2762 // of the completed request has the expected value
2763 // (sequence number & 0xff).
2764 vivid_ro_ctrls.request_fd = buf_req_fds[i];
2765 fail_on_test(doioctl(node, VIDIOC_G_EXT_CTRLS, &vivid_ro_ctrls));
2766 // FIXME: due to unreliable sequence counters from vivid this
2767 // test fails regularly. For now replace the 'warn_once' by
2768 // 'info_once' until vivid is fixed.
2769 if (node->is_video && !node->can_output &&
2770 vivid_ro_ctrl.value != (int)i)
2771 info_once("vivid_ro_ctrl.value (%d) != i (%u)\n",
2772 vivid_ro_ctrl.value, i);
2773
2774 // Check that the dynamic control array is set as
2775 // expected and with the correct values.
2776 vivid_dyn_array_ctrls.request_fd = buf_req_fds[i];
2777 memset(vivid_dyn_array, 0xff, sizeof(vivid_dyn_array));
2778 vivid_dyn_array_ctrl.size = sizeof(vivid_dyn_array);
2779 fail_on_test(doioctl(node, VIDIOC_G_EXT_CTRLS, &vivid_dyn_array_ctrls));
2780 unsigned size = (2 + 2 * (i % 8)) * elem_size;
2781 if (i % 3 == 2)
2782 size = (2 + 2 * ((i - 1) % 8)) * elem_size;
2783 fail_on_test(vivid_dyn_array_ctrl.size != size);
2784 fail_on_test(memcmp(vivid_dyn_array, vivid_dyn_array_clamped,
2785 vivid_dyn_array_ctrl.size));
2786 fail_on_test(vivid_dyn_array[size / elem_size] != 0xffffffff);
2787 // Check that the pixel array control is set as
2788 // expected and with the correct values.
2789 vivid_pixel_array_ctrls.request_fd = buf_req_fds[i];
2790 memset(vivid_pixel_array, 0xfe, vivid_pixel_array_size);
2791 fail_on_test(doioctl(node, VIDIOC_G_EXT_CTRLS, &vivid_pixel_array_ctrls));
2792 bool ok = true;
2793 __u8 expected = (i % 3 == 2) ? i - 1 : i;
2794 for (unsigned j = 0; j < vivid_pixel_array_size; j++)
2795 if (vivid_pixel_array[j] != expected) {
2796 ok = false;
2797 break;
2798 }
2799 fail_on_test(!ok);
2800 fail_on_test(vivid_pixel_array[vivid_pixel_array_size] != 0xff);
2801 }
2802 fail_on_test(buf.querybuf(node, i));
2803 // Check that all the buffers of the stopped stream are
2804 // no longer marked as belonging to a request.
2805 fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_REQUEST_FD);
2806 fail_on_test(buf.g_request_fd());
2807 struct pollfd pfd = {
2808 buf_req_fds[i],
2809 POLLPRI, 0
2810 };
2811 // Check that polling the request fd will immediately return,
2812 // indicating that the request has completed.
2813 fail_on_test(poll(&pfd, 1, 100) != 1);
2814 // Requeuing the request must fail
2815 fail_on_test(doioctl_fd(buf_req_fds[i], MEDIA_REQUEST_IOC_QUEUE, nullptr) != EBUSY);
2816 // But reinit must succeed.
2817 fail_on_test(doioctl_fd(buf_req_fds[i], MEDIA_REQUEST_IOC_REINIT, nullptr));
2818 fail_on_test(buf.querybuf(node, i));
2819 fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_REQUEST_FD);
2820 fail_on_test(buf.g_request_fd());
2821 ctrls.request_fd = buf_req_fds[i];
2822 // Check that getting controls from a reinited request fails
2823 fail_on_test(!doioctl(node, VIDIOC_G_EXT_CTRLS, &ctrls));
2824 // Close the request fd
2825 fhs.del(buf_req_fds[i]);
2826 buf_req_fds[i] = -1;
2827 }
2828 // Close any remaining open request fds
2829 for (unsigned i = 0; i < num_requests; i++)
2830 if (buf_req_fds[i] >= 0)
2831 fhs.del(buf_req_fds[i]);
2832
2833 // Getting the current control value must work
2834 ctrls.which = 0;
2835 fail_on_test(doioctl(node, VIDIOC_G_EXT_CTRLS, &ctrls));
2836 // Check the final control value
2837 if (test_streaming) {
2838 bool is_max = (num_bufs - 1) & 1;
2839 if ((num_bufs - 1) % 3 == 2)
2840 is_max = !is_max;
2841 fail_on_test(ctrl.value != (is_max ? valid_qctrl.maximum :
2842 valid_qctrl.minimum));
2843 if (is_vivid) {
2844 // For vivid check the final read-only value,
2845 vivid_ro_ctrls.which = 0;
2846 fail_on_test(doioctl(node, VIDIOC_G_EXT_CTRLS, &vivid_ro_ctrls));
2847 // FIXME: due to unreliable sequence counters from vivid this
2848 // test fails regularly. For now replace the 'warn' by 'info'
2849 // until vivid is fixed.
2850 if (node->is_video && !node->can_output &&
2851 vivid_ro_ctrl.value != (int)(num_bufs - 1))
2852 info("vivid_ro_ctrl.value (%d) != num_bufs - 1 (%d)\n",
2853 vivid_ro_ctrl.value, num_bufs - 1);
2854
2855 // the final dynamic array value,
2856 v4l2_query_ext_ctrl q_dyn_array = {
2857 .id = VIVID_CID_U32_DYN_ARRAY,
2858 };
2859 fail_on_test(doioctl(node, VIDIOC_QUERY_EXT_CTRL, &q_dyn_array));
2860 unsigned elems = 2 + 2 * ((num_bufs - 1) % 8);
2861 if ((num_bufs - 1) % 3 == 2)
2862 elems = 2 + 2 * ((num_bufs - 2) % 8);
2863 fail_on_test(q_dyn_array.elems != elems);
2864 vivid_dyn_array_ctrls.which = 0;
2865 fail_on_test(doioctl(node, VIDIOC_G_EXT_CTRLS, &vivid_dyn_array_ctrls));
2866 fail_on_test(vivid_dyn_array_ctrl.size != elems * elem_size);
2867 fail_on_test(memcmp(vivid_dyn_array, vivid_dyn_array_clamped,
2868 vivid_dyn_array_ctrl.size));
2869
2870 // and the final pixel array value.
2871 vivid_pixel_array_ctrls.which = 0;
2872 fail_on_test(doioctl(node, VIDIOC_G_EXT_CTRLS, &vivid_pixel_array_ctrls));
2873 bool ok = true;
2874 __u8 expected = (num_bufs - 1) % 3 == 2 ? num_bufs - 2 : num_bufs - 1;
2875 for (unsigned j = 0; j < vivid_pixel_array_size; j++)
2876 if (vivid_pixel_array[j] != expected) {
2877 ok = false;
2878 break;
2879 }
2880 fail_on_test(!ok);
2881 }
2882 }
2883
2884 // Cleanup
2885 fail_on_test(q.reqbufs(node, 0));
2886 if (node->is_m2m) {
2887 fail_on_test(node->streamoff(m2m_q.g_type()));
2888 m2m_q.munmap_bufs(node);
2889 fail_on_test(m2m_q.reqbufs(node, 0));
2890 }
2891
2892 return 0;
2893 }
2894
2895 /* Android does not have support for pthread_cancel */
2896 #ifndef ANDROID
2897
2898 /*
2899 * This class wraps a pthread in such a way that it simplifies passing
2900 * parameters, checking completion, gracious halting, and aggressive
2901 * termination (an empty signal handler, as installed by testBlockingDQBuf,
2902 * is necessary). This alleviates the need for spaghetti error paths when
2903 * multiple potentially blocking threads are involved.
2904 */
2905 class BlockingThread
2906 {
2907 public:
~BlockingThread()2908 virtual ~BlockingThread()
2909 {
2910 stop();
2911 }
2912
start()2913 int start()
2914 {
2915 int ret = pthread_create(&thread, nullptr, startRoutine, this);
2916 if (ret < 0)
2917 return ret;
2918
2919 running = true;
2920 return 0;
2921 }
2922
stop()2923 void stop()
2924 {
2925 if (!running)
2926 return;
2927
2928 /*
2929 * If the thread is blocked on an ioctl, try to wake it up with
2930 * a signal.
2931 */
2932 if (!done) {
2933 pthread_kill(thread, SIGUSR1);
2934 sleep(1);
2935 }
2936
2937 /*
2938 * If the signal failed to interrupt the ioctl, use the heavy
2939 * artillery and cancel the thread.
2940 */
2941 if (!done) {
2942 pthread_cancel(thread);
2943 sleep(1);
2944 }
2945
2946 pthread_join(thread, nullptr);
2947 running = false;
2948 }
2949
2950 std::atomic<bool> done{};
2951
2952 private:
startRoutine(void * arg)2953 static void *startRoutine(void *arg)
2954 {
2955 auto self = static_cast<BlockingThread *>(arg);
2956
2957 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, nullptr);
2958 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, nullptr);
2959
2960 self->run();
2961
2962 self->done = true;
2963 return nullptr;
2964 }
2965
2966 virtual void run() = 0;
2967
2968 pthread_t thread;
2969 std::atomic<bool> running{};
2970 };
2971
2972 class DqbufThread : public BlockingThread
2973 {
2974 public:
DqbufThread(cv4l_queue * q,struct node * n)2975 DqbufThread(cv4l_queue *q, struct node *n) : queue(q), node(n) {}
2976
2977 private:
run()2978 void run() override
2979 {
2980 /*
2981 * In this thread we call VIDIOC_DQBUF and wait indefinitely
2982 * since no buffers are queued.
2983 */
2984 cv4l_buffer buf(queue->g_type(), V4L2_MEMORY_MMAP);
2985 node->dqbuf(buf);
2986 }
2987
2988 cv4l_queue *queue;
2989 struct node *node;
2990 };
2991
2992 class StreamoffThread : public BlockingThread
2993 {
2994 public:
StreamoffThread(cv4l_queue * q,struct node * n)2995 StreamoffThread(cv4l_queue *q, struct node *n) : queue(q), node(n) {}
2996
2997 private:
run()2998 void run() override
2999 {
3000 /*
3001 * In this thread call STREAMOFF; this shouldn't
3002 * be blocked by the DQBUF!
3003 */
3004 node->streamoff(queue->g_type());
3005 }
3006
3007 cv4l_queue *queue;
3008 struct node *node;
3009 };
3010
pthread_sighandler(int sig)3011 static void pthread_sighandler(int sig)
3012 {
3013 }
3014
testBlockingDQBuf(struct node * node,cv4l_queue & q)3015 static int testBlockingDQBuf(struct node *node, cv4l_queue &q)
3016 {
3017 DqbufThread thread_dqbuf(&q, node);
3018 StreamoffThread thread_streamoff(&q, node);
3019
3020 /*
3021 * SIGUSR1 is ignored by default, so install an empty signal handler
3022 * so that we can use SIGUSR1 to wake up threads potentially blocked
3023 * on ioctls.
3024 */
3025 signal(SIGUSR1, pthread_sighandler);
3026
3027 fail_on_test(q.reqbufs(node, 2));
3028 fail_on_test(node->streamon(q.g_type()));
3029
3030 /*
3031 * This test checks if a blocking wait in VIDIOC_DQBUF doesn't block
3032 * other ioctls.
3033 */
3034 fflush(stdout);
3035 thread_dqbuf.start();
3036
3037 /* Wait for the child thread to start and block */
3038 sleep(1);
3039 /* Check that it is really blocking */
3040 fail_on_test(thread_dqbuf.done);
3041
3042 fflush(stdout);
3043 thread_streamoff.start();
3044
3045 /* Wait for the second child to start and exit */
3046 sleep(3);
3047 fail_on_test(!thread_streamoff.done);
3048
3049 fail_on_test(node->streamoff(q.g_type()));
3050 fail_on_test(q.reqbufs(node, 0));
3051 return 0;
3052 }
3053
3054 #endif //ANDROID
3055
testBlockingWait(struct node * node)3056 int testBlockingWait(struct node *node)
3057 {
3058 bool can_stream = node->g_caps() & V4L2_CAP_STREAMING;
3059 int type;
3060
3061 if (!can_stream || !node->valid_buftypes)
3062 return ENOTTY;
3063
3064 buffer_info.clear();
3065 for (type = 0; type <= V4L2_BUF_TYPE_LAST; type++) {
3066 if (!(node->valid_buftypes & (1 << type)))
3067 continue;
3068 if (v4l_type_is_overlay(type))
3069 continue;
3070
3071 cv4l_queue q(type, V4L2_MEMORY_MMAP);
3072 cv4l_queue m2m_q(v4l_type_invert(type), V4L2_MEMORY_MMAP);
3073
3074 if (testSetupVbi(node, type))
3075 continue;
3076
3077 #ifndef ANDROID
3078 fail_on_test(testBlockingDQBuf(node, q));
3079 if (node->is_m2m)
3080 fail_on_test(testBlockingDQBuf(node, m2m_q));
3081 #endif
3082 }
3083 return 0;
3084 }
3085
restoreCropCompose(struct node * node,__u32 field,v4l2_selection & crop,v4l2_selection & compose)3086 static void restoreCropCompose(struct node *node, __u32 field,
3087 v4l2_selection &crop, v4l2_selection &compose)
3088 {
3089 if (node->has_inputs) {
3090 /*
3091 * For capture restore the compose rectangle
3092 * before the crop rectangle.
3093 */
3094 if (compose.r.width)
3095 node->s_frame_selection(compose, field);
3096 if (crop.r.width)
3097 node->s_frame_selection(crop, field);
3098 } else {
3099 /*
3100 * For output the crop rectangle should be
3101 * restored before the compose rectangle.
3102 */
3103 if (crop.r.width)
3104 node->s_frame_selection(crop, field);
3105 if (compose.r.width)
3106 node->s_frame_selection(compose, field);
3107 }
3108 }
3109
restoreFormat(struct node * node)3110 int restoreFormat(struct node *node)
3111 {
3112 cv4l_fmt fmt;
3113 unsigned h;
3114
3115 node->g_fmt(fmt);
3116
3117 h = fmt.g_frame_height();
3118 if (node->cur_io_caps & V4L2_IN_CAP_STD) {
3119 v4l2_std_id std;
3120
3121 fail_on_test(node->g_std(std));
3122 fmt.s_width(720);
3123 h = (std & V4L2_STD_525_60) ? 480 : 576;
3124 }
3125 if (node->cur_io_caps & V4L2_IN_CAP_DV_TIMINGS) {
3126 v4l2_dv_timings timings;
3127
3128 fail_on_test(node->g_dv_timings(timings));
3129 fmt.s_width(timings.bt.width);
3130 h = timings.bt.height;
3131 }
3132 if (node->cur_io_caps & V4L2_IN_CAP_NATIVE_SIZE) {
3133 v4l2_selection sel = {
3134 node->g_selection_type(),
3135 V4L2_SEL_TGT_NATIVE_SIZE,
3136 };
3137
3138 fail_on_test(node->g_selection(sel));
3139 fmt.s_width(sel.r.width);
3140 h = sel.r.height;
3141 }
3142 fmt.s_frame_height(h);
3143 /* First restore the format */
3144 node->s_fmt(fmt);
3145
3146 v4l2_selection sel_compose = {
3147 node->g_selection_type(),
3148 V4L2_SEL_TGT_COMPOSE,
3149 };
3150 v4l2_selection sel_crop = {
3151 node->g_selection_type(),
3152 V4L2_SEL_TGT_CROP,
3153 };
3154 sel_compose.r.width = fmt.g_width();
3155 sel_compose.r.height = fmt.g_frame_height();
3156 sel_crop.r.width = fmt.g_width();
3157 sel_crop.r.height = fmt.g_frame_height();
3158 restoreCropCompose(node, fmt.g_field(), sel_crop, sel_compose);
3159 return 0;
3160 }
3161
testStreaming(struct node * node,unsigned frame_count)3162 static int testStreaming(struct node *node, unsigned frame_count)
3163 {
3164 int type = node->g_type();
3165
3166 if (!(node->valid_buftypes & (1 << type)))
3167 return ENOTTY;
3168
3169 buffer_info.clear();
3170
3171 cur_fmt.s_type(type);
3172 node->g_fmt(cur_fmt);
3173
3174 bool is_output = v4l_type_is_output(type);
3175
3176 if (node->g_caps() & V4L2_CAP_STREAMING) {
3177 cv4l_queue q(type, V4L2_MEMORY_MMAP);
3178 bool alternate = cur_fmt.g_field() == V4L2_FIELD_ALTERNATE;
3179 v4l2_std_id std = 0;
3180
3181 node->g_std(std);
3182
3183 unsigned field = cur_fmt.g_first_field(std);
3184 cv4l_buffer buf(q);
3185
3186 if (is_output)
3187 stream_for_fmt(cur_fmt.g_pixelformat());
3188
3189 fail_on_test(q.reqbufs(node, 3));
3190 fail_on_test(q.obtain_bufs(node));
3191 for (unsigned i = 0; i < q.g_buffers(); i++) {
3192 buf.init(q, i);
3193 buf.s_field(field);
3194 if (alternate)
3195 field ^= 1;
3196 if (is_output &&
3197 !fill_output_buffer(q, buf))
3198 return 0;
3199 fail_on_test(node->qbuf(buf));
3200 fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_DONE);
3201 }
3202 fail_on_test(node->streamon());
3203
3204 while (node->dqbuf(buf) == 0) {
3205 if (!no_progress)
3206 printf("\r\t\t%s: Frame #%03d Field %s ",
3207 buftype2s(q.g_type()).c_str(),
3208 buf.g_sequence(), field2s(buf.g_field()).c_str());
3209 fflush(stdout);
3210 fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_DONE);
3211 buf.s_field(field);
3212 if (alternate)
3213 field ^= 1;
3214 if (is_output &&
3215 !fill_output_buffer(q, buf))
3216 return 0;
3217 fail_on_test(node->qbuf(buf));
3218 fail_on_test(buf.g_flags() & V4L2_BUF_FLAG_DONE);
3219 if (--frame_count == 0)
3220 break;
3221 }
3222 q.free(node);
3223 if (is_output)
3224 stream_close();
3225 if (!no_progress)
3226 printf("\r\t\t ");
3227 return 0;
3228 }
3229 fail_on_test(!(node->g_caps() & V4L2_CAP_READWRITE));
3230
3231 int size = cur_fmt.g_sizeimage();
3232 void *tmp = malloc(size);
3233
3234 for (unsigned i = 0; i < frame_count; i++) {
3235 int ret;
3236
3237 if (node->can_capture)
3238 ret = node->read(tmp, size);
3239 else
3240 ret = node->write(tmp, size);
3241 fail_on_test(ret != size);
3242 if (!no_progress)
3243 printf("\r\t\t%s: Frame #%03d", buftype2s(type).c_str(), i);
3244 fflush(stdout);
3245 }
3246 if (!no_progress)
3247 printf("\r\t\t ");
3248 return 0;
3249 }
3250
3251 /*
3252 * Remember which combination of fmt, crop and compose rectangles have been
3253 * used to test streaming.
3254 * This helps prevent duplicate streaming tests.
3255 */
3256 struct selTest {
3257 unsigned fmt_w, fmt_h, fmt_field;
3258 unsigned crop_w, crop_h;
3259 unsigned compose_w, compose_h;
3260 };
3261
3262 static std::vector<selTest> selTests;
3263
createSelTest(const cv4l_fmt & fmt,const v4l2_selection & crop,const v4l2_selection & compose)3264 static selTest createSelTest(const cv4l_fmt &fmt,
3265 const v4l2_selection &crop, const v4l2_selection &compose)
3266 {
3267 selTest st = {
3268 fmt.g_width(), fmt.g_height(), fmt.g_field(),
3269 crop.r.width, crop.r.height,
3270 compose.r.width, compose.r.height
3271 };
3272
3273 return st;
3274 }
3275
createSelTest(struct node * node)3276 static selTest createSelTest(struct node *node)
3277 {
3278 v4l2_selection crop = {
3279 node->g_selection_type(),
3280 V4L2_SEL_TGT_CROP
3281 };
3282 v4l2_selection compose = {
3283 node->g_selection_type(),
3284 V4L2_SEL_TGT_COMPOSE
3285 };
3286 cv4l_fmt fmt;
3287
3288 node->g_fmt(fmt);
3289 node->g_selection(crop);
3290 node->g_selection(compose);
3291 return createSelTest(fmt, crop, compose);
3292 }
3293
haveSelTest(const selTest & test)3294 static bool haveSelTest(const selTest &test)
3295 {
3296 return std::any_of(selTests.begin(), selTests.end(), [&](const selTest &selfTest)
3297 { return &selfTest != &test; });
3298 }
3299
streamFmtRun(struct node * node,cv4l_fmt & fmt,unsigned frame_count,bool testSelection=false)3300 static void streamFmtRun(struct node *node, cv4l_fmt &fmt, unsigned frame_count,
3301 bool testSelection = false)
3302 {
3303 v4l2_selection crop = {
3304 node->g_selection_type(),
3305 V4L2_SEL_TGT_CROP
3306 };
3307 v4l2_selection compose = {
3308 node->g_selection_type(),
3309 V4L2_SEL_TGT_COMPOSE
3310 };
3311 bool has_compose = node->cur_io_has_compose();
3312 bool has_crop = node->cur_io_has_crop();
3313 char s_crop[32] = "";
3314 char s_compose[32] = "";
3315
3316 if (has_crop) {
3317 node->g_frame_selection(crop, fmt.g_field());
3318 sprintf(s_crop, "Crop %ux%u@%ux%u, ",
3319 crop.r.width, crop.r.height,
3320 crop.r.left, crop.r.top);
3321 }
3322 if (has_compose) {
3323 node->g_frame_selection(compose, fmt.g_field());
3324 sprintf(s_compose, "Compose %ux%u@%ux%u, ",
3325 compose.r.width, compose.r.height,
3326 compose.r.left, compose.r.top);
3327 }
3328 printf("\r\t\t%s%sStride %u, Field %s%s: %s \n",
3329 s_crop, s_compose,
3330 fmt.g_bytesperline(),
3331 field2s(fmt.g_field()).c_str(),
3332 testSelection ? ", SelTest" : "",
3333 ok(testStreaming(node, frame_count)));
3334 node->reopen();
3335 }
3336
streamFmt(struct node * node,__u32 pixelformat,__u32 w,__u32 h,v4l2_fract * f,unsigned frame_count)3337 static void streamFmt(struct node *node, __u32 pixelformat, __u32 w, __u32 h,
3338 v4l2_fract *f, unsigned frame_count)
3339 {
3340 const char *op = (node->g_caps() & V4L2_CAP_STREAMING) ? "MMAP" :
3341 (node->can_capture ? "read()" : "write()");
3342 bool has_compose = node->cur_io_has_compose();
3343 bool has_crop = node->cur_io_has_crop();
3344 __u32 default_field;
3345 v4l2_selection crop = {
3346 node->g_selection_type(),
3347 V4L2_SEL_TGT_CROP
3348 };
3349 v4l2_selection min_crop, max_crop;
3350 v4l2_selection compose = {
3351 node->g_selection_type(),
3352 V4L2_SEL_TGT_COMPOSE
3353 };
3354 v4l2_selection min_compose, max_compose;
3355 cv4l_fmt fmt;
3356 char hz[32] = "";
3357
3358 if (!frame_count)
3359 frame_count = f ? static_cast<unsigned>(1.0 / fract2f(f)) : 10;
3360 node->g_fmt(fmt);
3361 fmt.s_pixelformat(pixelformat);
3362 fmt.s_width(w);
3363 fmt.s_field(V4L2_FIELD_ANY);
3364 fmt.s_height(h);
3365 node->try_fmt(fmt);
3366 default_field = fmt.g_field();
3367 fmt.s_frame_height(h);
3368 node->s_fmt(fmt);
3369 if (f) {
3370 node->set_interval(*f);
3371 sprintf(hz, "@%.2f Hz", 1.0 / fract2f(f));
3372 }
3373
3374 printf("\ttest %s for Format %s, Frame Size %ux%u%s:\n", op,
3375 fcc2s(pixelformat).c_str(),
3376 fmt.g_width(), fmt.g_frame_height(), hz);
3377
3378 if (has_crop)
3379 node->g_frame_selection(crop, fmt.g_field());
3380 if (has_compose)
3381 node->g_frame_selection(compose, fmt.g_field());
3382
3383 for (unsigned field = V4L2_FIELD_NONE;
3384 field <= V4L2_FIELD_INTERLACED_BT; field++) {
3385 node->g_fmt(fmt);
3386 fmt.s_field(field);
3387 fmt.s_width(w);
3388 fmt.s_frame_height(h);
3389 node->s_fmt(fmt);
3390 if (fmt.g_field() != field)
3391 continue;
3392
3393 restoreCropCompose(node, fmt.g_field(), crop, compose);
3394 streamFmtRun(node, fmt, frame_count);
3395
3396 // Test if the driver allows for user-specified 'bytesperline' values
3397 node->g_fmt(fmt);
3398 unsigned bpl = fmt.g_bytesperline();
3399 unsigned size = fmt.g_sizeimage();
3400 fmt.s_bytesperline(bpl + 64);
3401 node->s_fmt(fmt, false);
3402 if (fmt.g_bytesperline() == bpl)
3403 continue;
3404 if (fmt.g_sizeimage() <= size)
3405 fail("fmt.g_sizeimage() <= size\n");
3406 streamFmtRun(node, fmt, frame_count);
3407 }
3408
3409 fmt.s_field(default_field);
3410 fmt.s_frame_height(h);
3411 node->s_fmt(fmt);
3412 restoreCropCompose(node, fmt.g_field(), crop, compose);
3413
3414 if (has_crop) {
3415 min_crop = crop;
3416 min_crop.r.width = 0;
3417 min_crop.r.height = 0;
3418 node->s_frame_selection(min_crop, fmt.g_field());
3419 node->s_fmt(fmt);
3420 node->g_frame_selection(min_crop, fmt.g_field());
3421 max_crop = crop;
3422 max_crop.r.width = ~0;
3423 max_crop.r.height = ~0;
3424 node->s_frame_selection(max_crop, fmt.g_field());
3425 node->s_fmt(fmt);
3426 node->g_frame_selection(max_crop, fmt.g_field());
3427 restoreCropCompose(node, fmt.g_field(), crop, compose);
3428 }
3429
3430 if (has_compose) {
3431 min_compose = compose;
3432 min_compose.r.width = 0;
3433 min_compose.r.height = 0;
3434 node->s_frame_selection(min_compose, fmt.g_field());
3435 node->s_fmt(fmt);
3436 node->g_frame_selection(min_compose, fmt.g_field());
3437 max_compose = compose;
3438 max_compose.r.width = ~0;
3439 max_compose.r.height = ~0;
3440 node->s_frame_selection(max_compose, fmt.g_field());
3441 node->s_fmt(fmt);
3442 node->g_frame_selection(max_compose, fmt.g_field());
3443 restoreCropCompose(node, fmt.g_field(), crop, compose);
3444 }
3445
3446 if (min_crop.r.width == max_crop.r.width &&
3447 min_crop.r.height == max_crop.r.height)
3448 has_crop = false;
3449 if (min_compose.r.width == max_compose.r.width &&
3450 min_compose.r.height == max_compose.r.height)
3451 has_compose = false;
3452
3453 if (!has_crop && !has_compose)
3454 return;
3455
3456 if (!has_compose) {
3457 cv4l_fmt tmp;
3458
3459 node->s_frame_selection(min_crop, fmt.g_field());
3460 node->g_fmt(tmp);
3461 if (tmp.g_width() != fmt.g_width() ||
3462 tmp.g_height() != fmt.g_height())
3463 fail("Format resolution changed after changing to min crop\n");
3464 selTest test = createSelTest(node);
3465 if (!haveSelTest(test)) {
3466 selTests.push_back(test);
3467 streamFmtRun(node, fmt, frame_count, true);
3468 }
3469 node->s_frame_selection(max_crop, fmt.g_field());
3470 node->g_fmt(tmp);
3471 if (tmp.g_width() != fmt.g_width() ||
3472 tmp.g_height() != fmt.g_height())
3473 fail("Format resolution changed after changing to max crop\n");
3474 test = createSelTest(node);
3475 if (!haveSelTest(test)) {
3476 selTests.push_back(test);
3477 streamFmtRun(node, fmt, frame_count, true);
3478 }
3479 restoreCropCompose(node, fmt.g_field(), crop, compose);
3480 return;
3481 }
3482 if (!has_crop) {
3483 cv4l_fmt tmp;
3484 node->s_frame_selection(min_compose, fmt.g_field());
3485 node->g_fmt(tmp);
3486 if (tmp.g_width() != fmt.g_width() ||
3487 tmp.g_height() != fmt.g_height())
3488 fail("Format resolution changed after changing to min compose\n");
3489 selTest test = createSelTest(node);
3490 if (!haveSelTest(test)) {
3491 selTests.push_back(test);
3492 streamFmtRun(node, fmt, frame_count, true);
3493 }
3494 node->s_frame_selection(max_compose, fmt.g_field());
3495 node->g_fmt(tmp);
3496 if (tmp.g_width() != fmt.g_width() ||
3497 tmp.g_height() != fmt.g_height())
3498 fail("Format resolution changed after changing to max compose\n");
3499 test = createSelTest(node);
3500 if (!haveSelTest(test)) {
3501 selTests.push_back(test);
3502 streamFmtRun(node, fmt, frame_count, true);
3503 }
3504 restoreCropCompose(node, fmt.g_field(), crop, compose);
3505 return;
3506 }
3507
3508 v4l2_selection *selections[2][4] = {
3509 { &min_crop, &max_crop, &min_compose, &max_compose },
3510 { &min_compose, &max_compose, &min_crop, &max_crop }
3511 };
3512
3513 selTest test = createSelTest(node);
3514 if (!haveSelTest(test))
3515 selTests.push_back(test);
3516
3517 for (unsigned i = 0; i < 8; i++) {
3518 v4l2_selection *sel1 = selections[node->can_output][i & 1];
3519 v4l2_selection *sel2 = selections[node->can_output][2 + ((i & 2) >> 1)];
3520 v4l2_selection *sel3 = selections[node->can_output][(i & 4) >> 2];
3521 cv4l_fmt tmp;
3522
3523 restoreCropCompose(node, fmt.g_field(), crop, compose);
3524 node->s_frame_selection(*sel1, fmt.g_field());
3525 node->g_fmt(tmp);
3526 if (tmp.g_width() != fmt.g_width() ||
3527 tmp.g_height() != fmt.g_height())
3528 fail("Format resolution changed after changing first selection\n");
3529 selTest test = createSelTest(node);
3530 if (!haveSelTest(test)) {
3531 selTests.push_back(test);
3532 streamFmtRun(node, fmt, frame_count, true);
3533 }
3534
3535 node->s_frame_selection(*sel2, fmt.g_field());
3536 node->g_fmt(tmp);
3537 if (tmp.g_width() != fmt.g_width() ||
3538 tmp.g_height() != fmt.g_height())
3539 fail("Format resolution changed after changing second selection\n");
3540 test = createSelTest(node);
3541 if (!haveSelTest(test)) {
3542 selTests.push_back(test);
3543 streamFmtRun(node, fmt, frame_count, true);
3544 }
3545
3546 node->s_frame_selection(*sel3, fmt.g_field());
3547 node->g_fmt(tmp);
3548 if (tmp.g_width() != fmt.g_width() ||
3549 tmp.g_height() != fmt.g_height())
3550 fail("Format resolution changed after changing third selection\n");
3551 test = createSelTest(node);
3552 if (!haveSelTest(test)) {
3553 selTests.push_back(test);
3554 streamFmtRun(node, fmt, frame_count, true);
3555 }
3556 }
3557 restoreCropCompose(node, fmt.g_field(), crop, compose);
3558 }
3559
streamIntervals(struct node * node,__u32 pixelformat,__u32 w,__u32 h,unsigned frame_count)3560 static void streamIntervals(struct node *node, __u32 pixelformat, __u32 w, __u32 h,
3561 unsigned frame_count)
3562 {
3563 v4l2_frmivalenum frmival = { 0 };
3564
3565 if (node->enum_frameintervals(frmival, pixelformat, w, h)) {
3566 streamFmt(node, pixelformat, w, h, nullptr, frame_count);
3567 return;
3568 }
3569
3570 if (frmival.type == V4L2_FRMIVAL_TYPE_DISCRETE) {
3571 do {
3572 streamFmt(node, pixelformat, w, h, &frmival.discrete,
3573 frame_count);
3574 } while (!node->enum_frameintervals(frmival));
3575 return;
3576 }
3577 streamFmt(node, pixelformat, w, h, &frmival.stepwise.min, frame_count);
3578 streamFmt(node, pixelformat, w, h, &frmival.stepwise.max, frame_count);
3579 }
3580
streamAllFormats(struct node * node,unsigned frame_count)3581 void streamAllFormats(struct node *node, unsigned frame_count)
3582 {
3583 v4l2_fmtdesc fmtdesc;
3584
3585 if (node->enum_fmt(fmtdesc, true))
3586 return;
3587 selTests.clear();
3588 do {
3589 v4l2_frmsizeenum frmsize;
3590 cv4l_fmt fmt;
3591
3592 if (node->enum_framesizes(frmsize, fmtdesc.pixelformat)) {
3593 cv4l_fmt min, max;
3594
3595 restoreFormat(node);
3596 node->g_fmt(fmt);
3597 min = fmt;
3598 min.s_width(0);
3599 min.s_height(0);
3600 node->try_fmt(min);
3601 max = fmt;
3602 max.s_width(~0);
3603 max.s_height(~0);
3604 node->try_fmt(max);
3605 if (min.g_width() != fmt.g_width() ||
3606 min.g_height() != fmt.g_height()) {
3607 streamIntervals(node, fmtdesc.pixelformat,
3608 min.g_width(), min.g_frame_height(),
3609 frame_count);
3610 restoreFormat(node);
3611 }
3612 if (max.g_width() != fmt.g_width() ||
3613 max.g_height() != fmt.g_height()) {
3614 streamIntervals(node, fmtdesc.pixelformat,
3615 max.g_width(), max.g_frame_height(),
3616 frame_count);
3617 restoreFormat(node);
3618 }
3619 streamIntervals(node, fmtdesc.pixelformat,
3620 fmt.g_width(), fmt.g_frame_height(),
3621 frame_count);
3622 continue;
3623 }
3624
3625 v4l2_frmsize_stepwise &ss = frmsize.stepwise;
3626
3627 switch (frmsize.type) {
3628 case V4L2_FRMSIZE_TYPE_DISCRETE:
3629 do {
3630 streamIntervals(node, fmtdesc.pixelformat,
3631 frmsize.discrete.width,
3632 frmsize.discrete.height,
3633 frame_count);
3634 } while (!node->enum_framesizes(frmsize));
3635 break;
3636 default:
3637 restoreFormat(node);
3638 streamIntervals(node, fmtdesc.pixelformat,
3639 ss.min_width, ss.min_height,
3640 frame_count);
3641 restoreFormat(node);
3642 if (ss.max_width != ss.min_width ||
3643 ss.max_height != ss.min_height) {
3644 streamIntervals(node, fmtdesc.pixelformat,
3645 ss.max_width, ss.max_height,
3646 frame_count);
3647 restoreFormat(node);
3648 }
3649 node->g_fmt(fmt);
3650 if (fmt.g_width() != ss.min_width ||
3651 fmt.g_frame_height() != ss.min_height) {
3652 streamIntervals(node, fmtdesc.pixelformat,
3653 fmt.g_width(), fmt.g_frame_height(),
3654 frame_count);
3655 restoreFormat(node);
3656 }
3657 break;
3658 }
3659 } while (!node->enum_fmt(fmtdesc));
3660 }
3661
streamM2MRun(struct node * node,unsigned frame_count)3662 static void streamM2MRun(struct node *node, unsigned frame_count)
3663 {
3664 cv4l_fmt cap_fmt, out_fmt;
3665 unsigned out_type = v4l_type_invert(node->g_type());
3666
3667 node->g_fmt(cap_fmt);
3668 node->g_fmt(out_fmt, out_type);
3669 if (!no_progress)
3670 printf("\r");
3671 printf("\t%s (%s) %dx%d -> %s (%s) %dx%d: %s\n",
3672 fcc2s(out_fmt.g_pixelformat()).c_str(),
3673 pixfmt2s(out_fmt.g_pixelformat()).c_str(),
3674 out_fmt.g_width(), out_fmt.g_height(),
3675 fcc2s(cap_fmt.g_pixelformat()).c_str(),
3676 pixfmt2s(cap_fmt.g_pixelformat()).c_str(),
3677 cap_fmt.g_width(), cap_fmt.g_height(),
3678 ok(testMmap(node, node, frame_count, POLL_MODE_SELECT)));
3679 }
3680
streamM2MOutFormat(struct node * node,__u32 pixelformat,__u32 w,__u32 h,unsigned frame_count)3681 static int streamM2MOutFormat(struct node *node, __u32 pixelformat, __u32 w, __u32 h,
3682 unsigned frame_count)
3683 {
3684 unsigned cap_type = node->g_type();
3685 v4l2_fmtdesc fmtdesc;
3686 cv4l_fmt out_fmt;
3687
3688 node->g_fmt(out_fmt, v4l_type_invert(cap_type));
3689 out_fmt.s_pixelformat(pixelformat);
3690 out_fmt.s_width(w);
3691 out_fmt.s_height(h);
3692 fail_on_test(node->s_fmt(out_fmt));
3693
3694 if (node->enum_fmt(fmtdesc, true, 0))
3695 return 0;
3696 do {
3697 cv4l_fmt fmt;
3698
3699 fail_on_test(node->g_fmt(fmt));
3700 fmt.s_pixelformat(fmtdesc.pixelformat);
3701 fmt.s_width(w);
3702 fmt.s_height(h);
3703 fail_on_test(node->s_fmt(fmt));
3704 streamM2MRun(node, frame_count);
3705 } while (!node->enum_fmt(fmtdesc));
3706 return 0;
3707 }
3708
streamM2MAllFormats(struct node * node,unsigned frame_count)3709 void streamM2MAllFormats(struct node *node, unsigned frame_count)
3710 {
3711 v4l2_fmtdesc fmtdesc;
3712 unsigned out_type = v4l_type_invert(node->g_type());
3713
3714 if (!frame_count)
3715 frame_count = 10;
3716
3717 if (node->enum_fmt(fmtdesc, true, 0, out_type))
3718 return;
3719 selTests.clear();
3720 do {
3721 v4l2_frmsizeenum frmsize;
3722 cv4l_fmt fmt;
3723
3724 if (node->enum_framesizes(frmsize, fmtdesc.pixelformat)) {
3725 cv4l_fmt min, max;
3726
3727 restoreFormat(node);
3728 node->g_fmt(fmt, out_type);
3729 min = fmt;
3730 min.s_width(0);
3731 min.s_height(0);
3732 node->try_fmt(min);
3733 max = fmt;
3734 max.s_width(~0);
3735 max.s_height(~0);
3736 node->try_fmt(max);
3737 if (min.g_width() != fmt.g_width() ||
3738 min.g_height() != fmt.g_height()) {
3739 streamM2MOutFormat(node, fmtdesc.pixelformat,
3740 min.g_width(), min.g_frame_height(),
3741 frame_count);
3742 restoreFormat(node);
3743 }
3744 if (max.g_width() != fmt.g_width() ||
3745 max.g_height() != fmt.g_height()) {
3746 streamM2MOutFormat(node, fmtdesc.pixelformat,
3747 max.g_width(), max.g_frame_height(),
3748 frame_count);
3749 restoreFormat(node);
3750 }
3751 streamM2MOutFormat(node, fmtdesc.pixelformat,
3752 fmt.g_width(), fmt.g_frame_height(),
3753 frame_count);
3754 continue;
3755 }
3756
3757 v4l2_frmsize_stepwise &ss = frmsize.stepwise;
3758
3759 switch (frmsize.type) {
3760 case V4L2_FRMSIZE_TYPE_DISCRETE:
3761 do {
3762 streamM2MOutFormat(node, fmtdesc.pixelformat,
3763 frmsize.discrete.width,
3764 frmsize.discrete.height,
3765 frame_count);
3766 } while (!node->enum_framesizes(frmsize));
3767 break;
3768 default:
3769 node->g_fmt(fmt, out_type);
3770
3771 if (fmt.g_width() != ss.min_width ||
3772 fmt.g_frame_height() != ss.min_height) {
3773 streamM2MOutFormat(node, fmtdesc.pixelformat,
3774 ss.min_width, ss.min_height,
3775 frame_count);
3776 restoreFormat(node);
3777 }
3778 if (fmt.g_width() != ss.max_width ||
3779 fmt.g_frame_height() != ss.max_height) {
3780 streamM2MOutFormat(node, fmtdesc.pixelformat,
3781 ss.max_width, ss.max_height,
3782 frame_count);
3783 restoreFormat(node);
3784 }
3785 streamM2MOutFormat(node, fmtdesc.pixelformat,
3786 fmt.g_width(), fmt.g_frame_height(),
3787 frame_count);
3788 restoreFormat(node);
3789 break;
3790 }
3791 } while (!node->enum_fmt(fmtdesc));
3792 }
3793