1 /*
2 * Copyright 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include <algorithm>
17 #include <string_view>
18 #include <type_traits>
19
20 #include <assert.h>
21 #include <ctype.h>
22 #include <fcntl.h>
23 #include <inttypes.h>
24 #include <getopt.h>
25 #include <signal.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <sys/stat.h>
30 #include <sys/types.h>
31 #include <sys/wait.h>
32
33 #include <termios.h>
34 #include <unistd.h>
35
36 #define LOG_TAG "ScreenRecord"
37 #define ATRACE_TAG ATRACE_TAG_GRAPHICS
38 //#define LOG_NDEBUG 0
39 #include <utils/Log.h>
40
41 #include <binder/IPCThreadState.h>
42 #include <utils/Errors.h>
43 #include <utils/SystemClock.h>
44 #include <utils/Timers.h>
45 #include <utils/Trace.h>
46
47 #include <gui/ISurfaceComposer.h>
48 #include <gui/Surface.h>
49 #include <gui/SurfaceComposerClient.h>
50 #include <gui/ISurfaceComposer.h>
51 #include <media/MediaCodecBuffer.h>
52 #include <media/NdkMediaCodec.h>
53 #include <media/NdkMediaFormatPriv.h>
54 #include <media/NdkMediaMuxer.h>
55 #include <media/openmax/OMX_IVCommon.h>
56 #include <media/stagefright/MediaCodec.h>
57 #include <media/stagefright/MediaCodecConstants.h>
58 #include <media/stagefright/MediaErrors.h>
59 #include <media/stagefright/PersistentSurface.h>
60 #include <media/stagefright/foundation/ABuffer.h>
61 #include <media/stagefright/foundation/AMessage.h>
62 #include <mediadrm/ICrypto.h>
63 #include <ui/DisplayMode.h>
64 #include <ui/DisplayState.h>
65
66 #include "screenrecord.h"
67 #include "Overlay.h"
68 #include "FrameOutput.h"
69
70 using android::ABuffer;
71 using android::ALooper;
72 using android::AMessage;
73 using android::AString;
74 using android::ui::DisplayMode;
75 using android::FrameOutput;
76 using android::IBinder;
77 using android::IGraphicBufferProducer;
78 using android::ISurfaceComposer;
79 using android::MediaCodec;
80 using android::MediaCodecBuffer;
81 using android::Overlay;
82 using android::PersistentSurface;
83 using android::PhysicalDisplayId;
84 using android::ProcessState;
85 using android::Rect;
86 using android::String8;
87 using android::SurfaceComposerClient;
88 using android::Vector;
89 using android::sp;
90 using android::status_t;
91 using android::SurfaceControl;
92
93 using android::INVALID_OPERATION;
94 using android::NAME_NOT_FOUND;
95 using android::NO_ERROR;
96 using android::UNKNOWN_ERROR;
97
98 namespace ui = android::ui;
99
100 static const uint32_t kMinBitRate = 100000; // 0.1Mbps
101 static const uint32_t kMaxBitRate = 200 * 1000000; // 200Mbps
102 static const uint32_t kMaxTimeLimitSec = 180; // 3 minutes
103 static const uint32_t kFallbackWidth = 1280; // 720p
104 static const uint32_t kFallbackHeight = 720;
105 static const char* kMimeTypeAvc = "video/avc";
106 static const char* kMimeTypeApplicationOctetstream = "application/octet-stream";
107
108 // Command-line parameters.
109 static bool gVerbose = false; // chatty on stdout
110 static bool gRotate = false; // rotate 90 degrees
111 static bool gMonotonicTime = false; // use system monotonic time for timestamps
112 static bool gPersistentSurface = false; // use persistent surface
113 static enum {
114 FORMAT_MP4, FORMAT_H264, FORMAT_WEBM, FORMAT_3GPP, FORMAT_FRAMES, FORMAT_RAW_FRAMES
115 } gOutputFormat = FORMAT_MP4; // data format for output
116 static AString gCodecName = ""; // codec name override
117 static bool gSizeSpecified = false; // was size explicitly requested?
118 static bool gWantInfoScreen = false; // do we want initial info screen?
119 static bool gWantFrameTime = false; // do we want times on each frame?
120 static uint32_t gVideoWidth = 0; // default width+height
121 static uint32_t gVideoHeight = 0;
122 static uint32_t gBitRate = 20000000; // 20Mbps
123 static uint32_t gTimeLimitSec = kMaxTimeLimitSec;
124 static uint32_t gBframes = 0;
125 static std::optional<PhysicalDisplayId> gPhysicalDisplayId;
126 // Set by signal handler to stop recording.
127 static volatile bool gStopRequested = false;
128
129 // Previous signal handler state, restored after first hit.
130 static struct sigaction gOrigSigactionINT;
131 static struct sigaction gOrigSigactionHUP;
132
133
134 /*
135 * Catch keyboard interrupt signals. On receipt, the "stop requested"
136 * flag is raised, and the original handler is restored (so that, if
137 * we get stuck finishing, a second Ctrl-C will kill the process).
138 */
signalCatcher(int signum)139 static void signalCatcher(int signum)
140 {
141 gStopRequested = true;
142 switch (signum) {
143 case SIGINT:
144 case SIGHUP:
145 sigaction(SIGINT, &gOrigSigactionINT, NULL);
146 sigaction(SIGHUP, &gOrigSigactionHUP, NULL);
147 break;
148 default:
149 abort();
150 break;
151 }
152 }
153
154 /*
155 * Configures signal handlers. The previous handlers are saved.
156 *
157 * If the command is run from an interactive adb shell, we get SIGINT
158 * when Ctrl-C is hit. If we're run from the host, the local adb process
159 * gets the signal, and we get a SIGHUP when the terminal disconnects.
160 */
configureSignals()161 static status_t configureSignals() {
162 struct sigaction act;
163 memset(&act, 0, sizeof(act));
164 act.sa_handler = signalCatcher;
165 if (sigaction(SIGINT, &act, &gOrigSigactionINT) != 0) {
166 status_t err = -errno;
167 fprintf(stderr, "Unable to configure SIGINT handler: %s\n",
168 strerror(errno));
169 return err;
170 }
171 if (sigaction(SIGHUP, &act, &gOrigSigactionHUP) != 0) {
172 status_t err = -errno;
173 fprintf(stderr, "Unable to configure SIGHUP handler: %s\n",
174 strerror(errno));
175 return err;
176 }
177 signal(SIGPIPE, SIG_IGN);
178 return NO_ERROR;
179 }
180
181 /*
182 * Configures and starts the MediaCodec encoder. Obtains an input surface
183 * from the codec.
184 */
prepareEncoder(float displayFps,sp<MediaCodec> * pCodec,sp<IGraphicBufferProducer> * pBufferProducer)185 static status_t prepareEncoder(float displayFps, sp<MediaCodec>* pCodec,
186 sp<IGraphicBufferProducer>* pBufferProducer) {
187 status_t err;
188
189 if (gVerbose) {
190 printf("Configuring recorder for %dx%d %s at %.2fMbps\n",
191 gVideoWidth, gVideoHeight, kMimeTypeAvc, gBitRate / 1000000.0);
192 fflush(stdout);
193 }
194
195 sp<AMessage> format = new AMessage;
196 format->setInt32(KEY_WIDTH, gVideoWidth);
197 format->setInt32(KEY_HEIGHT, gVideoHeight);
198 format->setString(KEY_MIME, kMimeTypeAvc);
199 format->setInt32(KEY_COLOR_FORMAT, OMX_COLOR_FormatAndroidOpaque);
200 format->setInt32(KEY_BIT_RATE, gBitRate);
201 format->setFloat(KEY_FRAME_RATE, displayFps);
202 format->setInt32(KEY_I_FRAME_INTERVAL, 10);
203 format->setInt32(KEY_MAX_B_FRAMES, gBframes);
204 if (gBframes > 0) {
205 format->setInt32(KEY_PROFILE, AVCProfileMain);
206 format->setInt32(KEY_LEVEL, AVCLevel41);
207 }
208
209 sp<android::ALooper> looper = new android::ALooper;
210 looper->setName("screenrecord_looper");
211 looper->start();
212 ALOGV("Creating codec");
213 sp<MediaCodec> codec;
214 if (gCodecName.empty()) {
215 codec = MediaCodec::CreateByType(looper, kMimeTypeAvc, true);
216 if (codec == NULL) {
217 fprintf(stderr, "ERROR: unable to create %s codec instance\n",
218 kMimeTypeAvc);
219 return UNKNOWN_ERROR;
220 }
221 } else {
222 codec = MediaCodec::CreateByComponentName(looper, gCodecName);
223 if (codec == NULL) {
224 fprintf(stderr, "ERROR: unable to create %s codec instance\n",
225 gCodecName.c_str());
226 return UNKNOWN_ERROR;
227 }
228 }
229
230 err = codec->configure(format, NULL, NULL,
231 MediaCodec::CONFIGURE_FLAG_ENCODE);
232 if (err != NO_ERROR) {
233 fprintf(stderr, "ERROR: unable to configure %s codec at %dx%d (err=%d)\n",
234 kMimeTypeAvc, gVideoWidth, gVideoHeight, err);
235 codec->release();
236 return err;
237 }
238
239 ALOGV("Creating encoder input surface");
240 sp<IGraphicBufferProducer> bufferProducer;
241 if (gPersistentSurface) {
242 sp<PersistentSurface> surface = MediaCodec::CreatePersistentInputSurface();
243 bufferProducer = surface->getBufferProducer();
244 err = codec->setInputSurface(surface);
245 } else {
246 err = codec->createInputSurface(&bufferProducer);
247 }
248 if (err != NO_ERROR) {
249 fprintf(stderr,
250 "ERROR: unable to %s encoder input surface (err=%d)\n",
251 gPersistentSurface ? "set" : "create",
252 err);
253 codec->release();
254 return err;
255 }
256
257 ALOGV("Starting codec");
258 err = codec->start();
259 if (err != NO_ERROR) {
260 fprintf(stderr, "ERROR: unable to start codec (err=%d)\n", err);
261 codec->release();
262 return err;
263 }
264
265 ALOGV("Codec prepared");
266 *pCodec = codec;
267 *pBufferProducer = bufferProducer;
268 return 0;
269 }
270
271 /*
272 * Sets the display projection, based on the display dimensions, video size,
273 * and device orientation.
274 */
setDisplayProjection(SurfaceComposerClient::Transaction & t,const sp<IBinder> & dpy,const ui::DisplayState & displayState)275 static status_t setDisplayProjection(
276 SurfaceComposerClient::Transaction& t,
277 const sp<IBinder>& dpy,
278 const ui::DisplayState& displayState) {
279 // Set the region of the layer stack we're interested in, which in our case is "all of it".
280 Rect layerStackRect(displayState.layerStackSpaceRect);
281
282 // We need to preserve the aspect ratio of the display.
283 float displayAspect = layerStackRect.getHeight() / static_cast<float>(layerStackRect.getWidth());
284
285
286 // Set the way we map the output onto the display surface (which will
287 // be e.g. 1280x720 for a 720p video). The rect is interpreted
288 // post-rotation, so if the display is rotated 90 degrees we need to
289 // "pre-rotate" it by flipping width/height, so that the orientation
290 // adjustment changes it back.
291 //
292 // We might want to encode a portrait display as landscape to use more
293 // of the screen real estate. (If players respect a 90-degree rotation
294 // hint, we can essentially get a 720x1280 video instead of 1280x720.)
295 // In that case, we swap the configured video width/height and then
296 // supply a rotation value to the display projection.
297 uint32_t videoWidth, videoHeight;
298 uint32_t outWidth, outHeight;
299 if (!gRotate) {
300 videoWidth = gVideoWidth;
301 videoHeight = gVideoHeight;
302 } else {
303 videoWidth = gVideoHeight;
304 videoHeight = gVideoWidth;
305 }
306 if (videoHeight > (uint32_t)(videoWidth * displayAspect)) {
307 // limited by narrow width; reduce height
308 outWidth = videoWidth;
309 outHeight = (uint32_t)(videoWidth * displayAspect);
310 } else {
311 // limited by short height; restrict width
312 outHeight = videoHeight;
313 outWidth = (uint32_t)(videoHeight / displayAspect);
314 }
315 uint32_t offX, offY;
316 offX = (videoWidth - outWidth) / 2;
317 offY = (videoHeight - outHeight) / 2;
318 Rect displayRect(offX, offY, offX + outWidth, offY + outHeight);
319
320 if (gVerbose) {
321 if (gRotate) {
322 printf("Rotated content area is %ux%u at offset x=%d y=%d\n",
323 outHeight, outWidth, offY, offX);
324 fflush(stdout);
325 } else {
326 printf("Content area is %ux%u at offset x=%d y=%d\n",
327 outWidth, outHeight, offX, offY);
328 fflush(stdout);
329 }
330 }
331
332 t.setDisplayProjection(dpy,
333 gRotate ? ui::ROTATION_90 : ui::ROTATION_0,
334 layerStackRect, displayRect);
335 return NO_ERROR;
336 }
337
338 /*
339 * Gets the physical id of the display to record. If the user specified a physical
340 * display id, then that id will be set. Otherwise, the default display will be set.
341 */
getPhysicalDisplayId(PhysicalDisplayId & outDisplayId)342 static status_t getPhysicalDisplayId(PhysicalDisplayId& outDisplayId) {
343 if (gPhysicalDisplayId) {
344 outDisplayId = *gPhysicalDisplayId;
345 return NO_ERROR;
346 }
347
348 const std::vector<PhysicalDisplayId> ids = SurfaceComposerClient::getPhysicalDisplayIds();
349 if (ids.empty()) {
350 return INVALID_OPERATION;
351 }
352 outDisplayId = ids.front();
353 return NO_ERROR;
354 }
355
356 /*
357 * Configures the virtual display. When this completes, virtual display
358 * frames will start arriving from the buffer producer.
359 */
prepareVirtualDisplay(const ui::DisplayState & displayState,const sp<IGraphicBufferProducer> & bufferProducer,sp<IBinder> * pDisplayHandle,sp<SurfaceControl> * mirrorRoot)360 static status_t prepareVirtualDisplay(
361 const ui::DisplayState& displayState,
362 const sp<IGraphicBufferProducer>& bufferProducer,
363 sp<IBinder>* pDisplayHandle, sp<SurfaceControl>* mirrorRoot) {
364 sp<IBinder> dpy = SurfaceComposerClient::createDisplay(
365 String8("ScreenRecorder"), false /*secure*/);
366 SurfaceComposerClient::Transaction t;
367 t.setDisplaySurface(dpy, bufferProducer);
368 setDisplayProjection(t, dpy, displayState);
369 ui::LayerStack layerStack = ui::LayerStack::fromValue(std::rand());
370 t.setDisplayLayerStack(dpy, layerStack);
371 PhysicalDisplayId displayId;
372 status_t err = getPhysicalDisplayId(displayId);
373 if (err != NO_ERROR) {
374 return err;
375 }
376 *mirrorRoot = SurfaceComposerClient::getDefault()->mirrorDisplay(displayId);
377 if (*mirrorRoot == nullptr) {
378 ALOGE("Failed to create a mirror for screenrecord");
379 return UNKNOWN_ERROR;
380 }
381 t.setLayerStack(*mirrorRoot, layerStack);
382 t.apply();
383
384 *pDisplayHandle = dpy;
385
386 return NO_ERROR;
387 }
388
389 /*
390 * Writes an unsigned/signed integer byte-by-byte in little endian order regardless
391 * of the platform endianness.
392 */
393 template <typename T>
writeValueLE(T value,uint8_t * buffer)394 static void writeValueLE(T value, uint8_t* buffer) {
395 std::remove_const_t<T> temp = value;
396 for (int i = 0; i < sizeof(T); ++i) {
397 buffer[i] = static_cast<std::uint8_t>(temp & 0xff);
398 temp >>= 8;
399 }
400 }
401
402 /*
403 * Saves frames presentation time relative to the elapsed realtime clock in microseconds
404 * preceded by a Winscope magic string and frame count to a metadata track.
405 * This metadata is used by the Winscope tool to sync video with SurfaceFlinger
406 * and WindowManager traces.
407 *
408 * The metadata is written as a binary array as follows:
409 * - winscope magic string (kWinscopeMagicString constant), without trailing null char,
410 * - the number of recorded frames (as little endian uint32),
411 * - for every frame its presentation time relative to the elapsed realtime clock in microseconds
412 * (as little endian uint64).
413 */
writeWinscopeMetadataLegacy(const Vector<int64_t> & timestamps,const ssize_t metaTrackIdx,AMediaMuxer * muxer)414 static status_t writeWinscopeMetadataLegacy(const Vector<int64_t>& timestamps,
415 const ssize_t metaTrackIdx, AMediaMuxer *muxer) {
416 static constexpr auto kWinscopeMagicStringLegacy = "#VV1NSC0PET1ME!#";
417
418 ALOGV("Writing winscope metadata legacy");
419 int64_t systemTimeToElapsedTimeOffsetMicros = (android::elapsedRealtimeNano()
420 - systemTime(SYSTEM_TIME_MONOTONIC)) / 1000;
421 sp<ABuffer> buffer = new ABuffer(timestamps.size() * sizeof(int64_t)
422 + sizeof(uint32_t) + strlen(kWinscopeMagicStringLegacy));
423 uint8_t* pos = buffer->data();
424 strcpy(reinterpret_cast<char*>(pos), kWinscopeMagicStringLegacy);
425 pos += strlen(kWinscopeMagicStringLegacy);
426 writeValueLE<uint32_t>(timestamps.size(), pos);
427 pos += sizeof(uint32_t);
428 for (size_t idx = 0; idx < timestamps.size(); ++idx) {
429 writeValueLE<uint64_t>(static_cast<uint64_t>(timestamps[idx]
430 + systemTimeToElapsedTimeOffsetMicros), pos);
431 pos += sizeof(uint64_t);
432 }
433 AMediaCodecBufferInfo bufferInfo = {
434 0 /* offset */,
435 static_cast<int32_t>(buffer->size()),
436 timestamps[0] /* presentationTimeUs */,
437 0 /* flags */
438 };
439 return AMediaMuxer_writeSampleData(muxer, metaTrackIdx, buffer->data(), &bufferInfo);
440 }
441
442 /*
443 * Saves metadata needed by Winscope to synchronize the screen recording playback with other traces.
444 *
445 * The metadata (version 2) is written as a binary array with the following format:
446 * - winscope magic string (#VV1NSC0PET1ME2#, 16B).
447 * - the metadata version number (4B little endian).
448 * - Realtime-to-elapsed time offset in nanoseconds (8B little endian).
449 * - the recorded frames count (8B little endian)
450 * - for each recorded frame:
451 * - System time in elapsed clock timebase in nanoseconds (8B little endian).
452 *
453 *
454 * Metadata version 2 changes
455 *
456 * Use elapsed time for compatibility with other UI traces (most of them):
457 * - Realtime-to-elapsed time offset (instead of realtime-to-monotonic)
458 * - Frame timestamps in elapsed clock timebase (instead of monotonic)
459 */
writeWinscopeMetadata(const Vector<std::int64_t> & timestampsMonotonicUs,const ssize_t metaTrackIdx,AMediaMuxer * muxer)460 static status_t writeWinscopeMetadata(const Vector<std::int64_t>& timestampsMonotonicUs,
461 const ssize_t metaTrackIdx, AMediaMuxer *muxer) {
462 ALOGV("Writing winscope metadata");
463
464 static constexpr auto kWinscopeMagicString = std::string_view {"#VV1NSC0PET1ME2#"};
465 static constexpr std::uint32_t metadataVersion = 2;
466
467 const auto elapsedTimeNs = android::elapsedRealtimeNano();
468 const std::int64_t elapsedToMonotonicTimeOffsetNs =
469 elapsedTimeNs - systemTime(SYSTEM_TIME_MONOTONIC);
470 const std::int64_t realToElapsedTimeOffsetNs =
471 systemTime(SYSTEM_TIME_REALTIME) - elapsedTimeNs;
472 const std::uint32_t framesCount = static_cast<std::uint32_t>(timestampsMonotonicUs.size());
473
474 sp<ABuffer> buffer = new ABuffer(
475 kWinscopeMagicString.size() +
476 sizeof(decltype(metadataVersion)) +
477 sizeof(decltype(realToElapsedTimeOffsetNs)) +
478 sizeof(decltype(framesCount)) +
479 framesCount * sizeof(std::uint64_t)
480 );
481 std::uint8_t* pos = buffer->data();
482
483 std::copy(kWinscopeMagicString.cbegin(), kWinscopeMagicString.cend(), pos);
484 pos += kWinscopeMagicString.size();
485
486 writeValueLE(metadataVersion, pos);
487 pos += sizeof(decltype(metadataVersion));
488
489 writeValueLE(realToElapsedTimeOffsetNs, pos);
490 pos += sizeof(decltype(realToElapsedTimeOffsetNs));
491
492 writeValueLE(framesCount, pos);
493 pos += sizeof(decltype(framesCount));
494
495 for (const auto timestampMonotonicUs : timestampsMonotonicUs) {
496 const auto timestampElapsedNs =
497 elapsedToMonotonicTimeOffsetNs + timestampMonotonicUs * 1000;
498 writeValueLE<std::uint64_t>(timestampElapsedNs, pos);
499 pos += sizeof(std::uint64_t);
500 }
501
502 AMediaCodecBufferInfo bufferInfo = {
503 0 /* offset */,
504 static_cast<std::int32_t>(buffer->size()),
505 timestampsMonotonicUs[0] /* presentationTimeUs */,
506 0 /* flags */
507 };
508 return AMediaMuxer_writeSampleData(muxer, metaTrackIdx, buffer->data(), &bufferInfo);
509 }
510
511 /*
512 * Update the display projection if size or orientation have changed.
513 */
updateDisplayProjection(const sp<IBinder> & virtualDpy,ui::DisplayState & displayState)514 void updateDisplayProjection(const sp<IBinder>& virtualDpy, ui::DisplayState& displayState) {
515 ATRACE_NAME("updateDisplayProjection");
516
517 PhysicalDisplayId displayId;
518 if (getPhysicalDisplayId(displayId) != NO_ERROR) {
519 fprintf(stderr, "ERROR: Failed to get display id\n");
520 return;
521 }
522
523 sp<IBinder> displayToken = SurfaceComposerClient::getPhysicalDisplayToken(displayId);
524 if (!displayToken) {
525 fprintf(stderr, "ERROR: failed to get display token\n");
526 return;
527 }
528
529 ui::DisplayState currentDisplayState;
530 if (SurfaceComposerClient::getDisplayState(displayToken, ¤tDisplayState) != NO_ERROR) {
531 ALOGW("ERROR: failed to get display state\n");
532 return;
533 }
534
535 if (currentDisplayState.orientation != displayState.orientation ||
536 currentDisplayState.layerStackSpaceRect != displayState.layerStackSpaceRect) {
537 displayState = currentDisplayState;
538 ALOGD("display state changed, now has orientation %s, size (%d, %d)",
539 toCString(displayState.orientation), displayState.layerStackSpaceRect.getWidth(),
540 displayState.layerStackSpaceRect.getHeight());
541
542 SurfaceComposerClient::Transaction t;
543 setDisplayProjection(t, virtualDpy, currentDisplayState);
544 t.apply();
545 }
546 }
547
548 /*
549 * Runs the MediaCodec encoder, sending the output to the MediaMuxer. The
550 * input frames are coming from the virtual display as fast as SurfaceFlinger
551 * wants to send them.
552 *
553 * Exactly one of muxer or rawFp must be non-null.
554 *
555 * The muxer must *not* have been started before calling.
556 */
runEncoder(const sp<MediaCodec> & encoder,AMediaMuxer * muxer,FILE * rawFp,const sp<IBinder> & virtualDpy,ui::DisplayState displayState)557 static status_t runEncoder(const sp<MediaCodec>& encoder, AMediaMuxer* muxer, FILE* rawFp,
558 const sp<IBinder>& virtualDpy, ui::DisplayState displayState) {
559 static int kTimeout = 250000; // be responsive on signal
560 status_t err;
561 ssize_t trackIdx = -1;
562 ssize_t metaLegacyTrackIdx = -1;
563 ssize_t metaTrackIdx = -1;
564 uint32_t debugNumFrames = 0;
565 int64_t startWhenNsec = systemTime(CLOCK_MONOTONIC);
566 int64_t endWhenNsec = startWhenNsec + seconds_to_nanoseconds(gTimeLimitSec);
567 Vector<int64_t> timestampsMonotonicUs;
568 bool firstFrame = true;
569
570 assert((rawFp == NULL && muxer != NULL) || (rawFp != NULL && muxer == NULL));
571
572 Vector<sp<MediaCodecBuffer> > buffers;
573 err = encoder->getOutputBuffers(&buffers);
574 if (err != NO_ERROR) {
575 fprintf(stderr, "Unable to get output buffers (err=%d)\n", err);
576 return err;
577 }
578
579 // Run until we're signaled.
580 while (!gStopRequested) {
581 size_t bufIndex, offset, size;
582 int64_t ptsUsec;
583 uint32_t flags;
584
585 if (firstFrame) {
586 ATRACE_NAME("first_frame");
587 firstFrame = false;
588 }
589
590 if (systemTime(CLOCK_MONOTONIC) > endWhenNsec) {
591 if (gVerbose) {
592 printf("Time limit reached\n");
593 fflush(stdout);
594 }
595 break;
596 }
597
598 ALOGV("Calling dequeueOutputBuffer");
599 err = encoder->dequeueOutputBuffer(&bufIndex, &offset, &size, &ptsUsec,
600 &flags, kTimeout);
601 ALOGV("dequeueOutputBuffer returned %d", err);
602 switch (err) {
603 case NO_ERROR:
604 // got a buffer
605 if ((flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) != 0) {
606 ALOGV("Got codec config buffer (%zu bytes)", size);
607 if (muxer != NULL) {
608 // ignore this -- we passed the CSD into MediaMuxer when
609 // we got the format change notification
610 size = 0;
611 }
612 }
613 if (size != 0) {
614 ALOGV("Got data in buffer %zu, size=%zu, pts=%" PRId64,
615 bufIndex, size, ptsUsec);
616
617 updateDisplayProjection(virtualDpy, displayState);
618
619 // If the virtual display isn't providing us with timestamps,
620 // use the current time. This isn't great -- we could get
621 // decoded data in clusters -- but we're not expecting
622 // to hit this anyway.
623 if (ptsUsec == 0) {
624 ptsUsec = systemTime(SYSTEM_TIME_MONOTONIC) / 1000;
625 }
626
627 if (muxer == NULL) {
628 fwrite(buffers[bufIndex]->data(), 1, size, rawFp);
629 // Flush the data immediately in case we're streaming.
630 // We don't want to do this if all we've written is
631 // the SPS/PPS data because mplayer gets confused.
632 if ((flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) == 0) {
633 fflush(rawFp);
634 }
635 } else {
636 // The MediaMuxer docs are unclear, but it appears that we
637 // need to pass either the full set of BufferInfo flags, or
638 // (flags & BUFFER_FLAG_SYNCFRAME).
639 //
640 // If this blocks for too long we could drop frames. We may
641 // want to queue these up and do them on a different thread.
642 ATRACE_NAME("write sample");
643 assert(trackIdx != -1);
644 // TODO
645 sp<ABuffer> buffer = new ABuffer(
646 buffers[bufIndex]->data(), buffers[bufIndex]->size());
647 AMediaCodecBufferInfo bufferInfo = {
648 0 /* offset */,
649 static_cast<int32_t>(buffer->size()),
650 ptsUsec /* presentationTimeUs */,
651 flags
652 };
653 err = AMediaMuxer_writeSampleData(muxer, trackIdx, buffer->data(), &bufferInfo);
654 if (err != NO_ERROR) {
655 fprintf(stderr,
656 "Failed writing data to muxer (err=%d)\n", err);
657 return err;
658 }
659 if (gOutputFormat == FORMAT_MP4) {
660 timestampsMonotonicUs.add(ptsUsec);
661 }
662 }
663 debugNumFrames++;
664 }
665 err = encoder->releaseOutputBuffer(bufIndex);
666 if (err != NO_ERROR) {
667 fprintf(stderr, "Unable to release output buffer (err=%d)\n",
668 err);
669 return err;
670 }
671 if ((flags & MediaCodec::BUFFER_FLAG_EOS) != 0) {
672 // Not expecting EOS from SurfaceFlinger. Go with it.
673 ALOGI("Received end-of-stream");
674 gStopRequested = true;
675 }
676 break;
677 case -EAGAIN: // INFO_TRY_AGAIN_LATER
678 ALOGV("Got -EAGAIN, looping");
679 break;
680 case android::INFO_FORMAT_CHANGED: // INFO_OUTPUT_FORMAT_CHANGED
681 {
682 // Format includes CSD, which we must provide to muxer.
683 ALOGV("Encoder format changed");
684 sp<AMessage> newFormat;
685 encoder->getOutputFormat(&newFormat);
686 // TODO remove when MediaCodec has been replaced with AMediaCodec
687 AMediaFormat *ndkFormat = AMediaFormat_fromMsg(&newFormat);
688 if (muxer != NULL) {
689 trackIdx = AMediaMuxer_addTrack(muxer, ndkFormat);
690 if (gOutputFormat == FORMAT_MP4) {
691 AMediaFormat *metaFormat = AMediaFormat_new();
692 AMediaFormat_setString(metaFormat, AMEDIAFORMAT_KEY_MIME, kMimeTypeApplicationOctetstream);
693 metaLegacyTrackIdx = AMediaMuxer_addTrack(muxer, metaFormat);
694 metaTrackIdx = AMediaMuxer_addTrack(muxer, metaFormat);
695 AMediaFormat_delete(metaFormat);
696 }
697 ALOGV("Starting muxer");
698 err = AMediaMuxer_start(muxer);
699 if (err != NO_ERROR) {
700 fprintf(stderr, "Unable to start muxer (err=%d)\n", err);
701 return err;
702 }
703 }
704 }
705 break;
706 case android::INFO_OUTPUT_BUFFERS_CHANGED: // INFO_OUTPUT_BUFFERS_CHANGED
707 // Not expected for an encoder; handle it anyway.
708 ALOGV("Encoder buffers changed");
709 err = encoder->getOutputBuffers(&buffers);
710 if (err != NO_ERROR) {
711 fprintf(stderr,
712 "Unable to get new output buffers (err=%d)\n", err);
713 return err;
714 }
715 break;
716 case INVALID_OPERATION:
717 ALOGW("dequeueOutputBuffer returned INVALID_OPERATION");
718 return err;
719 default:
720 fprintf(stderr,
721 "Got weird result %d from dequeueOutputBuffer\n", err);
722 return err;
723 }
724 }
725
726 ALOGV("Encoder stopping (req=%d)", gStopRequested);
727 if (gVerbose) {
728 printf("Encoder stopping; recorded %u frames in %" PRId64 " seconds\n",
729 debugNumFrames, nanoseconds_to_seconds(
730 systemTime(CLOCK_MONOTONIC) - startWhenNsec));
731 fflush(stdout);
732 }
733 if (metaLegacyTrackIdx >= 0 && metaTrackIdx >= 0 && !timestampsMonotonicUs.isEmpty()) {
734 err = writeWinscopeMetadataLegacy(timestampsMonotonicUs, metaLegacyTrackIdx, muxer);
735 if (err != NO_ERROR) {
736 fprintf(stderr, "Failed writing legacy winscope metadata to muxer (err=%d)\n", err);
737 return err;
738 }
739
740 err = writeWinscopeMetadata(timestampsMonotonicUs, metaTrackIdx, muxer);
741 if (err != NO_ERROR) {
742 fprintf(stderr, "Failed writing winscope metadata to muxer (err=%d)\n", err);
743 return err;
744 }
745 }
746 return NO_ERROR;
747 }
748
749 /*
750 * Raw H.264 byte stream output requested. Send the output to stdout
751 * if desired. If the output is a tty, reconfigure it to avoid the
752 * CRLF line termination that we see with "adb shell" commands.
753 */
prepareRawOutput(const char * fileName)754 static FILE* prepareRawOutput(const char* fileName) {
755 FILE* rawFp = NULL;
756
757 if (strcmp(fileName, "-") == 0) {
758 if (gVerbose) {
759 fprintf(stderr, "ERROR: verbose output and '-' not compatible");
760 return NULL;
761 }
762 rawFp = stdout;
763 } else {
764 rawFp = fopen(fileName, "w");
765 if (rawFp == NULL) {
766 fprintf(stderr, "fopen raw failed: %s\n", strerror(errno));
767 return NULL;
768 }
769 }
770
771 int fd = fileno(rawFp);
772 if (isatty(fd)) {
773 // best effort -- reconfigure tty for "raw"
774 ALOGD("raw video output to tty (fd=%d)", fd);
775 struct termios term;
776 if (tcgetattr(fd, &term) == 0) {
777 cfmakeraw(&term);
778 if (tcsetattr(fd, TCSANOW, &term) == 0) {
779 ALOGD("tty successfully configured for raw");
780 }
781 }
782 }
783
784 return rawFp;
785 }
786
floorToEven(uint32_t num)787 static inline uint32_t floorToEven(uint32_t num) {
788 return num & ~1;
789 }
790
791 struct RecordingData {
792 sp<MediaCodec> encoder;
793 // Configure virtual display.
794 sp<IBinder> dpy;
795
796 sp<Overlay> overlay;
797
~RecordingDataRecordingData798 ~RecordingData() {
799 if (dpy != nullptr) SurfaceComposerClient::destroyDisplay(dpy);
800 if (overlay != nullptr) overlay->stop();
801 if (encoder != nullptr) {
802 encoder->stop();
803 encoder->release();
804 }
805 }
806 };
807
808 /*
809 * Computes the maximum width and height across all physical displays.
810 */
getMaxDisplaySize()811 static ui::Size getMaxDisplaySize() {
812 const std::vector<PhysicalDisplayId> physicalDisplayIds =
813 SurfaceComposerClient::getPhysicalDisplayIds();
814 if (physicalDisplayIds.empty()) {
815 fprintf(stderr, "ERROR: Failed to get physical display ids\n");
816 return {};
817 }
818
819 ui::Size result;
820 for (auto& displayId : physicalDisplayIds) {
821 sp<IBinder> displayToken = SurfaceComposerClient::getPhysicalDisplayToken(displayId);
822 if (!displayToken) {
823 fprintf(stderr, "ERROR: failed to get display token\n");
824 continue;
825 }
826
827 ui::DisplayState displayState;
828 status_t err = SurfaceComposerClient::getDisplayState(displayToken, &displayState);
829 if (err != NO_ERROR) {
830 fprintf(stderr, "ERROR: failed to get display state\n");
831 continue;
832 }
833
834 result.height = std::max(result.height, displayState.layerStackSpaceRect.getHeight());
835 result.width = std::max(result.width, displayState.layerStackSpaceRect.getWidth());
836 }
837 return result;
838 }
839
840 /*
841 * Main "do work" start point.
842 *
843 * Configures codec, muxer, and virtual display, then starts moving bits
844 * around.
845 */
recordScreen(const char * fileName)846 static status_t recordScreen(const char* fileName) {
847 status_t err;
848
849 // Configure signal handler.
850 err = configureSignals();
851 if (err != NO_ERROR) return err;
852
853 // Start Binder thread pool. MediaCodec needs to be able to receive
854 // messages from mediaserver.
855 sp<ProcessState> self = ProcessState::self();
856 self->startThreadPool();
857
858 PhysicalDisplayId displayId;
859 err = getPhysicalDisplayId(displayId);
860 if (err != NO_ERROR) {
861 fprintf(stderr, "ERROR: Failed to get display id\n");
862 return err;
863 }
864
865 // Get main display parameters.
866 sp<IBinder> display = SurfaceComposerClient::getPhysicalDisplayToken(displayId);
867 if (display == nullptr) {
868 fprintf(stderr, "ERROR: no display\n");
869 return NAME_NOT_FOUND;
870 }
871
872 DisplayMode displayMode;
873 err = SurfaceComposerClient::getActiveDisplayMode(display, &displayMode);
874 if (err != NO_ERROR) {
875 fprintf(stderr, "ERROR: unable to get display config\n");
876 return err;
877 }
878
879 ui::DisplayState displayState;
880 err = SurfaceComposerClient::getDisplayState(display, &displayState);
881 if (err != NO_ERROR) {
882 fprintf(stderr, "ERROR: unable to get display state\n");
883 return err;
884 }
885
886 if (displayState.layerStack == ui::INVALID_LAYER_STACK) {
887 fprintf(stderr, "ERROR: INVALID_LAYER_STACK, please check your display state.\n");
888 return INVALID_OPERATION;
889 }
890
891 const ui::Size layerStackSpaceRect =
892 gPhysicalDisplayId ? displayState.layerStackSpaceRect : getMaxDisplaySize();
893 if (gVerbose) {
894 printf("Display is %dx%d @%.2ffps (orientation=%s), layerStack=%u\n",
895 layerStackSpaceRect.getWidth(), layerStackSpaceRect.getHeight(),
896 displayMode.refreshRate, toCString(displayState.orientation),
897 displayState.layerStack.id);
898 fflush(stdout);
899 }
900
901 // Encoder can't take odd number as config
902 if (gVideoWidth == 0) {
903 gVideoWidth = floorToEven(layerStackSpaceRect.getWidth());
904 }
905 if (gVideoHeight == 0) {
906 gVideoHeight = floorToEven(layerStackSpaceRect.getHeight());
907 }
908
909 RecordingData recordingData = RecordingData();
910 // Configure and start the encoder.
911 sp<FrameOutput> frameOutput;
912 sp<IGraphicBufferProducer> encoderInputSurface;
913 if (gOutputFormat != FORMAT_FRAMES && gOutputFormat != FORMAT_RAW_FRAMES) {
914 err = prepareEncoder(displayMode.refreshRate, &recordingData.encoder, &encoderInputSurface);
915
916 if (err != NO_ERROR && !gSizeSpecified) {
917 // fallback is defined for landscape; swap if we're in portrait
918 bool needSwap = gVideoWidth < gVideoHeight;
919 uint32_t newWidth = needSwap ? kFallbackHeight : kFallbackWidth;
920 uint32_t newHeight = needSwap ? kFallbackWidth : kFallbackHeight;
921 if (gVideoWidth != newWidth && gVideoHeight != newHeight) {
922 ALOGV("Retrying with 720p");
923 fprintf(stderr, "WARNING: failed at %dx%d, retrying at %dx%d\n",
924 gVideoWidth, gVideoHeight, newWidth, newHeight);
925 gVideoWidth = newWidth;
926 gVideoHeight = newHeight;
927 err = prepareEncoder(displayMode.refreshRate, &recordingData.encoder,
928 &encoderInputSurface);
929 }
930 }
931 if (err != NO_ERROR) return err;
932
933 // From here on, we must explicitly release() the encoder before it goes
934 // out of scope, or we will get an assertion failure from stagefright
935 // later on in a different thread.
936 } else {
937 // We're not using an encoder at all. The "encoder input surface" we hand to
938 // SurfaceFlinger will just feed directly to us.
939 frameOutput = new FrameOutput();
940 err = frameOutput->createInputSurface(gVideoWidth, gVideoHeight, &encoderInputSurface);
941 if (err != NO_ERROR) {
942 return err;
943 }
944 }
945
946 // Draw the "info" page by rendering a frame with GLES and sending
947 // it directly to the encoder.
948 // TODO: consider displaying this as a regular layer to avoid b/11697754
949 if (gWantInfoScreen) {
950 Overlay::drawInfoPage(encoderInputSurface);
951 }
952
953 // Configure optional overlay.
954 sp<IGraphicBufferProducer> bufferProducer;
955 if (gWantFrameTime) {
956 // Send virtual display frames to an external texture.
957 recordingData.overlay = new Overlay(gMonotonicTime);
958 err = recordingData.overlay->start(encoderInputSurface, &bufferProducer);
959 if (err != NO_ERROR) {
960 return err;
961 }
962 if (gVerbose) {
963 printf("Bugreport overlay created\n");
964 fflush(stdout);
965 }
966 } else {
967 // Use the encoder's input surface as the virtual display surface.
968 bufferProducer = encoderInputSurface;
969 }
970
971 // We need to hold a reference to mirrorRoot during the entire recording to ensure it's not
972 // cleaned up by SurfaceFlinger. When the reference is dropped, SurfaceFlinger will delete
973 // the resource.
974 sp<SurfaceControl> mirrorRoot;
975 // Configure virtual display.
976 err = prepareVirtualDisplay(displayState, bufferProducer, &recordingData.dpy, &mirrorRoot);
977 if (err != NO_ERROR) {
978 return err;
979 }
980
981 AMediaMuxer *muxer = nullptr;
982 FILE* rawFp = NULL;
983 switch (gOutputFormat) {
984 case FORMAT_MP4:
985 case FORMAT_WEBM:
986 case FORMAT_3GPP: {
987 // Configure muxer. We have to wait for the CSD blob from the encoder
988 // before we can start it.
989 err = unlink(fileName);
990 if (err != 0 && errno != ENOENT) {
991 fprintf(stderr, "ERROR: couldn't remove existing file\n");
992 abort();
993 }
994 int fd = open(fileName, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
995 if (fd < 0) {
996 fprintf(stderr, "ERROR: couldn't open file\n");
997 abort();
998 }
999 if (gOutputFormat == FORMAT_MP4) {
1000 muxer = AMediaMuxer_new(fd, AMEDIAMUXER_OUTPUT_FORMAT_MPEG_4);
1001 } else if (gOutputFormat == FORMAT_WEBM) {
1002 muxer = AMediaMuxer_new(fd, AMEDIAMUXER_OUTPUT_FORMAT_WEBM);
1003 } else {
1004 muxer = AMediaMuxer_new(fd, AMEDIAMUXER_OUTPUT_FORMAT_THREE_GPP);
1005 }
1006 close(fd);
1007 if (gRotate) {
1008 AMediaMuxer_setOrientationHint(muxer, 90); // TODO: does this do anything?
1009 }
1010 break;
1011 }
1012 case FORMAT_H264:
1013 case FORMAT_FRAMES:
1014 case FORMAT_RAW_FRAMES: {
1015 rawFp = prepareRawOutput(fileName);
1016 if (rawFp == NULL) {
1017 return -1;
1018 }
1019 break;
1020 }
1021 default:
1022 fprintf(stderr, "ERROR: unknown format %d\n", gOutputFormat);
1023 abort();
1024 }
1025
1026 if (gOutputFormat == FORMAT_FRAMES || gOutputFormat == FORMAT_RAW_FRAMES) {
1027 // TODO: if we want to make this a proper feature, we should output
1028 // an outer header with version info. Right now we never change
1029 // the frame size or format, so we could conceivably just send
1030 // the current frame header once and then follow it with an
1031 // unbroken stream of data.
1032
1033 // Make the EGL context current again. This gets unhooked if we're
1034 // using "--bugreport" mode.
1035 // TODO: figure out if we can eliminate this
1036 frameOutput->prepareToCopy();
1037
1038 while (!gStopRequested) {
1039 // Poll for frames, the same way we do for MediaCodec. We do
1040 // all of the work on the main thread.
1041 //
1042 // Ideally we'd sleep indefinitely and wake when the
1043 // stop was requested, but this will do for now. (It almost
1044 // works because wait() wakes when a signal hits, but we
1045 // need to handle the edge cases.)
1046 bool rawFrames = gOutputFormat == FORMAT_RAW_FRAMES;
1047 err = frameOutput->copyFrame(rawFp, 250000, rawFrames);
1048 if (err == ETIMEDOUT) {
1049 err = NO_ERROR;
1050 } else if (err != NO_ERROR) {
1051 ALOGE("Got error %d from copyFrame()", err);
1052 break;
1053 }
1054 }
1055 } else {
1056 // Main encoder loop.
1057 err = runEncoder(recordingData.encoder, muxer, rawFp, recordingData.dpy, displayState);
1058 if (err != NO_ERROR) {
1059 fprintf(stderr, "Encoder failed (err=%d)\n", err);
1060 // fall through to cleanup
1061 }
1062
1063 if (gVerbose) {
1064 printf("Stopping encoder and muxer\n");
1065 fflush(stdout);
1066 }
1067 }
1068
1069 // Shut everything down, starting with the producer side.
1070 encoderInputSurface = NULL;
1071 if (muxer != NULL) {
1072 // If we don't stop muxer explicitly, i.e. let the destructor run,
1073 // it may hang (b/11050628).
1074 err = AMediaMuxer_stop(muxer);
1075 } else if (rawFp != stdout) {
1076 fclose(rawFp);
1077 }
1078
1079 return err;
1080 }
1081
1082 /*
1083 * Sends a broadcast to the media scanner to tell it about the new video.
1084 *
1085 * This is optional, but nice to have.
1086 */
notifyMediaScanner(const char * fileName)1087 static status_t notifyMediaScanner(const char* fileName) {
1088 // need to do allocations before the fork()
1089 String8 fileUrl("file://");
1090 fileUrl.append(fileName);
1091
1092 const char* kCommand = "/system/bin/am";
1093 const char* const argv[] = {
1094 kCommand,
1095 "broadcast",
1096 "-a",
1097 "android.intent.action.MEDIA_SCANNER_SCAN_FILE",
1098 "-d",
1099 fileUrl.string(),
1100 NULL
1101 };
1102 if (gVerbose) {
1103 printf("Executing:");
1104 for (int i = 0; argv[i] != NULL; i++) {
1105 printf(" %s", argv[i]);
1106 }
1107 putchar('\n');
1108 fflush(stdout);
1109 }
1110
1111 pid_t pid = fork();
1112 if (pid < 0) {
1113 int err = errno;
1114 ALOGW("fork() failed: %s", strerror(err));
1115 return -err;
1116 } else if (pid > 0) {
1117 // parent; wait for the child, mostly to make the verbose-mode output
1118 // look right, but also to check for and log failures
1119 int status;
1120 pid_t actualPid = TEMP_FAILURE_RETRY(waitpid(pid, &status, 0));
1121 if (actualPid != pid) {
1122 ALOGW("waitpid(%d) returned %d (errno=%d)", pid, actualPid, errno);
1123 } else if (status != 0) {
1124 ALOGW("'am broadcast' exited with status=%d", status);
1125 } else {
1126 ALOGV("'am broadcast' exited successfully");
1127 }
1128 } else {
1129 if (!gVerbose) {
1130 // non-verbose, suppress 'am' output
1131 ALOGV("closing stdout/stderr in child");
1132 int fd = open("/dev/null", O_WRONLY);
1133 if (fd >= 0) {
1134 dup2(fd, STDOUT_FILENO);
1135 dup2(fd, STDERR_FILENO);
1136 close(fd);
1137 }
1138 }
1139 execv(kCommand, const_cast<char* const*>(argv));
1140 ALOGE("execv(%s) failed: %s\n", kCommand, strerror(errno));
1141 exit(1);
1142 }
1143 return NO_ERROR;
1144 }
1145
1146 /*
1147 * Parses a string of the form "1280x720".
1148 *
1149 * Returns true on success.
1150 */
parseWidthHeight(const char * widthHeight,uint32_t * pWidth,uint32_t * pHeight)1151 static bool parseWidthHeight(const char* widthHeight, uint32_t* pWidth,
1152 uint32_t* pHeight) {
1153 long width, height;
1154 char* end;
1155
1156 // Must specify base 10, or "0x0" gets parsed differently.
1157 width = strtol(widthHeight, &end, 10);
1158 if (end == widthHeight || *end != 'x' || *(end+1) == '\0') {
1159 // invalid chars in width, or missing 'x', or missing height
1160 return false;
1161 }
1162 height = strtol(end + 1, &end, 10);
1163 if (*end != '\0') {
1164 // invalid chars in height
1165 return false;
1166 }
1167
1168 *pWidth = width;
1169 *pHeight = height;
1170 return true;
1171 }
1172
1173 /*
1174 * Accepts a string with a bare number ("4000000") or with a single-character
1175 * unit ("4m").
1176 *
1177 * Returns an error if parsing fails.
1178 */
parseValueWithUnit(const char * str,uint32_t * pValue)1179 static status_t parseValueWithUnit(const char* str, uint32_t* pValue) {
1180 long value;
1181 char* endptr;
1182
1183 value = strtol(str, &endptr, 10);
1184 if (*endptr == '\0') {
1185 // bare number
1186 *pValue = value;
1187 return NO_ERROR;
1188 } else if (toupper(*endptr) == 'M' && *(endptr+1) == '\0') {
1189 *pValue = value * 1000000; // check for overflow?
1190 return NO_ERROR;
1191 } else {
1192 fprintf(stderr, "Unrecognized value: %s\n", str);
1193 return UNKNOWN_ERROR;
1194 }
1195 }
1196
1197 /*
1198 * Dumps usage on stderr.
1199 */
usage()1200 static void usage() {
1201 fprintf(stderr,
1202 "Usage: screenrecord [options] <filename>\n"
1203 "\n"
1204 "Android screenrecord v%d.%d. Records the device's display to a .mp4 file.\n"
1205 "\n"
1206 "Options:\n"
1207 "--size WIDTHxHEIGHT\n"
1208 " Set the video size, e.g. \"1280x720\". Default is the device's main\n"
1209 " display resolution (if supported), 1280x720 if not. For best results,\n"
1210 " use a size supported by the AVC encoder.\n"
1211 "--bit-rate RATE\n"
1212 " Set the video bit rate, in bits per second. Value may be specified as\n"
1213 " bits or megabits, e.g. '4000000' is equivalent to '4M'. Default %dMbps.\n"
1214 "--bugreport\n"
1215 " Add additional information, such as a timestamp overlay, that is helpful\n"
1216 " in videos captured to illustrate bugs.\n"
1217 "--time-limit TIME\n"
1218 " Set the maximum recording time, in seconds. Default is %d. Set to 0\n"
1219 " to remove the time limit.\n"
1220 "--display-id ID\n"
1221 " specify the physical display ID to record. Default is the primary display.\n"
1222 " see \"dumpsys SurfaceFlinger --display-id\" for valid display IDs.\n"
1223 "--verbose\n"
1224 " Display interesting information on stdout.\n"
1225 "--help\n"
1226 " Show this message.\n"
1227 "\n"
1228 "Recording continues until Ctrl-C is hit or the time limit is reached.\n"
1229 "\n",
1230 kVersionMajor, kVersionMinor, gBitRate / 1000000, gTimeLimitSec
1231 );
1232 }
1233
1234 /*
1235 * Parses args and kicks things off.
1236 */
main(int argc,char * const argv[])1237 int main(int argc, char* const argv[]) {
1238 static const struct option longOptions[] = {
1239 { "help", no_argument, NULL, 'h' },
1240 { "verbose", no_argument, NULL, 'v' },
1241 { "size", required_argument, NULL, 's' },
1242 { "bit-rate", required_argument, NULL, 'b' },
1243 { "time-limit", required_argument, NULL, 't' },
1244 { "bugreport", no_argument, NULL, 'u' },
1245 // "unofficial" options
1246 { "show-device-info", no_argument, NULL, 'i' },
1247 { "show-frame-time", no_argument, NULL, 'f' },
1248 { "rotate", no_argument, NULL, 'r' },
1249 { "output-format", required_argument, NULL, 'o' },
1250 { "codec-name", required_argument, NULL, 'N' },
1251 { "monotonic-time", no_argument, NULL, 'm' },
1252 { "persistent-surface", no_argument, NULL, 'p' },
1253 { "bframes", required_argument, NULL, 'B' },
1254 { "display-id", required_argument, NULL, 'd' },
1255 { NULL, 0, NULL, 0 }
1256 };
1257
1258 while (true) {
1259 int optionIndex = 0;
1260 int ic = getopt_long(argc, argv, "", longOptions, &optionIndex);
1261 if (ic == -1) {
1262 break;
1263 }
1264
1265 switch (ic) {
1266 case 'h':
1267 usage();
1268 return 0;
1269 case 'v':
1270 gVerbose = true;
1271 break;
1272 case 's':
1273 if (!parseWidthHeight(optarg, &gVideoWidth, &gVideoHeight)) {
1274 fprintf(stderr, "Invalid size '%s', must be width x height\n",
1275 optarg);
1276 return 2;
1277 }
1278 if (gVideoWidth == 0 || gVideoHeight == 0) {
1279 fprintf(stderr,
1280 "Invalid size %ux%u, width and height may not be zero\n",
1281 gVideoWidth, gVideoHeight);
1282 return 2;
1283 }
1284 gSizeSpecified = true;
1285 break;
1286 case 'b':
1287 if (parseValueWithUnit(optarg, &gBitRate) != NO_ERROR) {
1288 return 2;
1289 }
1290 if (gBitRate < kMinBitRate || gBitRate > kMaxBitRate) {
1291 fprintf(stderr,
1292 "Bit rate %dbps outside acceptable range [%d,%d]\n",
1293 gBitRate, kMinBitRate, kMaxBitRate);
1294 return 2;
1295 }
1296 break;
1297 case 't':
1298 {
1299 char *next;
1300 const int64_t timeLimitSec = strtol(optarg, &next, 10);
1301 if (next == optarg || (*next != '\0' && *next != ' ')) {
1302 fprintf(stderr, "Error parsing time limit argument\n");
1303 return 2;
1304 }
1305 if (timeLimitSec > std::numeric_limits<uint32_t>::max() || timeLimitSec < 0) {
1306 fprintf(stderr,
1307 "Time limit %" PRIi64 "s outside acceptable range [0,%u] seconds\n",
1308 timeLimitSec, std::numeric_limits<uint32_t>::max());
1309 return 2;
1310 }
1311 gTimeLimitSec = (timeLimitSec == 0) ?
1312 std::numeric_limits<uint32_t>::max() : timeLimitSec;
1313 if (gVerbose) {
1314 printf("Time limit set to %u seconds\n", gTimeLimitSec);
1315 fflush(stdout);
1316 }
1317 break;
1318 }
1319 case 'u':
1320 gWantInfoScreen = true;
1321 gWantFrameTime = true;
1322 break;
1323 case 'i':
1324 gWantInfoScreen = true;
1325 break;
1326 case 'f':
1327 gWantFrameTime = true;
1328 break;
1329 case 'r':
1330 // experimental feature
1331 gRotate = true;
1332 break;
1333 case 'o':
1334 if (strcmp(optarg, "mp4") == 0) {
1335 gOutputFormat = FORMAT_MP4;
1336 } else if (strcmp(optarg, "h264") == 0) {
1337 gOutputFormat = FORMAT_H264;
1338 } else if (strcmp(optarg, "webm") == 0) {
1339 gOutputFormat = FORMAT_WEBM;
1340 } else if (strcmp(optarg, "3gpp") == 0) {
1341 gOutputFormat = FORMAT_3GPP;
1342 } else if (strcmp(optarg, "frames") == 0) {
1343 gOutputFormat = FORMAT_FRAMES;
1344 } else if (strcmp(optarg, "raw-frames") == 0) {
1345 gOutputFormat = FORMAT_RAW_FRAMES;
1346 } else {
1347 fprintf(stderr, "Unknown format '%s'\n", optarg);
1348 return 2;
1349 }
1350 break;
1351 case 'N':
1352 gCodecName = optarg;
1353 break;
1354 case 'm':
1355 gMonotonicTime = true;
1356 break;
1357 case 'p':
1358 gPersistentSurface = true;
1359 break;
1360 case 'B':
1361 if (parseValueWithUnit(optarg, &gBframes) != NO_ERROR) {
1362 return 2;
1363 }
1364 break;
1365 case 'd':
1366 if (const auto id = android::DisplayId::fromValue<PhysicalDisplayId>(atoll(optarg));
1367 id && SurfaceComposerClient::getPhysicalDisplayToken(*id)) {
1368 gPhysicalDisplayId = *id;
1369 break;
1370 }
1371
1372 fprintf(stderr, "Invalid physical display ID\n");
1373 return 2;
1374 default:
1375 if (ic != '?') {
1376 fprintf(stderr, "getopt_long returned unexpected value 0x%x\n", ic);
1377 }
1378 return 2;
1379 }
1380 }
1381
1382 if (optind != argc - 1) {
1383 fprintf(stderr, "Must specify output file (see --help).\n");
1384 return 2;
1385 }
1386
1387 const char* fileName = argv[optind];
1388 if (gOutputFormat == FORMAT_MP4) {
1389 // MediaMuxer tries to create the file in the constructor, but we don't
1390 // learn about the failure until muxer.start(), which returns a generic
1391 // error code without logging anything. We attempt to create the file
1392 // now for better diagnostics.
1393 int fd = open(fileName, O_CREAT | O_RDWR, 0644);
1394 if (fd < 0) {
1395 fprintf(stderr, "Unable to open '%s': %s\n", fileName, strerror(errno));
1396 return 1;
1397 }
1398 close(fd);
1399 }
1400
1401 status_t err = recordScreen(fileName);
1402 if (err == NO_ERROR) {
1403 // Try to notify the media scanner. Not fatal if this fails.
1404 notifyMediaScanner(fileName);
1405 }
1406 ALOGD(err == NO_ERROR ? "success" : "failed");
1407 return (int) err;
1408 }
1409