• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright 2015 The WebRTC project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 package org.webrtc;
12 
13 import android.graphics.Matrix;
14 import android.opengl.GLES20;
15 import android.opengl.GLException;
16 import androidx.annotation.Nullable;
17 import java.nio.ByteBuffer;
18 import org.webrtc.VideoFrame.I420Buffer;
19 import org.webrtc.VideoFrame.TextureBuffer;
20 
21 /**
22  * Class for converting OES textures to a YUV ByteBuffer. It can be constructed on any thread, but
23  * should only be operated from a single thread with an active EGL context.
24  */
25 public final class YuvConverter {
26   private static final String TAG = "YuvConverter";
27 
28   private static final String FRAGMENT_SHADER =
29       // Difference in texture coordinate corresponding to one
30       // sub-pixel in the x direction.
31       "uniform vec2 xUnit;\n"
32       // Color conversion coefficients, including constant term
33       + "uniform vec4 coeffs;\n"
34       + "\n"
35       + "void main() {\n"
36       // Since the alpha read from the texture is always 1, this could
37       // be written as a mat4 x vec4 multiply. However, that seems to
38       // give a worse framerate, possibly because the additional
39       // multiplies by 1.0 consume resources.
40       + "  gl_FragColor.r = coeffs.a + dot(coeffs.rgb,\n"
41       + "      sample(tc - 1.5 * xUnit).rgb);\n"
42       + "  gl_FragColor.g = coeffs.a + dot(coeffs.rgb,\n"
43       + "      sample(tc - 0.5 * xUnit).rgb);\n"
44       + "  gl_FragColor.b = coeffs.a + dot(coeffs.rgb,\n"
45       + "      sample(tc + 0.5 * xUnit).rgb);\n"
46       + "  gl_FragColor.a = coeffs.a + dot(coeffs.rgb,\n"
47       + "      sample(tc + 1.5 * xUnit).rgb);\n"
48       + "}\n";
49 
50   private static class ShaderCallbacks implements GlGenericDrawer.ShaderCallbacks {
51     // Y'UV444 to RGB888, see https://en.wikipedia.org/wiki/YUV#Y%E2%80%B2UV444_to_RGB888_conversion
52     // We use the ITU-R BT.601 coefficients for Y, U and V.
53     // The values in Wikipedia are inaccurate, the accurate values derived from the spec are:
54     // Y = 0.299 * R + 0.587 * G + 0.114 * B
55     // U = -0.168736 * R - 0.331264 * G + 0.5 * B + 0.5
56     // V = 0.5 * R - 0.418688 * G - 0.0813124 * B + 0.5
57     // To map the Y-values to range [16-235] and U- and V-values to range [16-240], the matrix has
58     // been multiplied with matrix:
59     // {{219 / 255, 0, 0, 16 / 255},
60     // {0, 224 / 255, 0, 16 / 255},
61     // {0, 0, 224 / 255, 16 / 255},
62     // {0, 0, 0, 1}}
63     private static final float[] yCoeffs =
64         new float[] {0.256788f, 0.504129f, 0.0979059f, 0.0627451f};
65     private static final float[] uCoeffs =
66         new float[] {-0.148223f, -0.290993f, 0.439216f, 0.501961f};
67     private static final float[] vCoeffs =
68         new float[] {0.439216f, -0.367788f, -0.0714274f, 0.501961f};
69 
70     private int xUnitLoc;
71     private int coeffsLoc;
72 
73     private float[] coeffs;
74     private float stepSize;
75 
setPlaneY()76     public void setPlaneY() {
77       coeffs = yCoeffs;
78       stepSize = 1.0f;
79     }
80 
setPlaneU()81     public void setPlaneU() {
82       coeffs = uCoeffs;
83       stepSize = 2.0f;
84     }
85 
setPlaneV()86     public void setPlaneV() {
87       coeffs = vCoeffs;
88       stepSize = 2.0f;
89     }
90 
91     @Override
onNewShader(GlShader shader)92     public void onNewShader(GlShader shader) {
93       xUnitLoc = shader.getUniformLocation("xUnit");
94       coeffsLoc = shader.getUniformLocation("coeffs");
95     }
96 
97     @Override
onPrepareShader(GlShader shader, float[] texMatrix, int frameWidth, int frameHeight, int viewportWidth, int viewportHeight)98     public void onPrepareShader(GlShader shader, float[] texMatrix, int frameWidth, int frameHeight,
99         int viewportWidth, int viewportHeight) {
100       GLES20.glUniform4fv(coeffsLoc, /* count= */ 1, coeffs, /* offset= */ 0);
101       // Matrix * (1;0;0;0) / (width / stepSize). Note that OpenGL uses column major order.
102       GLES20.glUniform2f(
103           xUnitLoc, stepSize * texMatrix[0] / frameWidth, stepSize * texMatrix[1] / frameWidth);
104     }
105   }
106 
107   private final ThreadUtils.ThreadChecker threadChecker = new ThreadUtils.ThreadChecker();
108   private final GlTextureFrameBuffer i420TextureFrameBuffer =
109       new GlTextureFrameBuffer(GLES20.GL_RGBA);
110   private final ShaderCallbacks shaderCallbacks = new ShaderCallbacks();
111   private final GlGenericDrawer drawer = new GlGenericDrawer(FRAGMENT_SHADER, shaderCallbacks);
112   private final VideoFrameDrawer videoFrameDrawer;
113 
114   /**
115    * This class should be constructed on a thread that has an active EGL context.
116    */
YuvConverter()117   public YuvConverter() {
118     this(new VideoFrameDrawer());
119   }
120 
YuvConverter(VideoFrameDrawer videoFrameDrawer)121   public YuvConverter(VideoFrameDrawer videoFrameDrawer) {
122     this.videoFrameDrawer = videoFrameDrawer;
123     threadChecker.detachThread();
124   }
125 
126   /** Converts the texture buffer to I420. */
127   @Nullable
convert(TextureBuffer inputTextureBuffer)128   public I420Buffer convert(TextureBuffer inputTextureBuffer) {
129     try {
130       return convertInternal(inputTextureBuffer);
131     } catch (GLException e) {
132       Logging.w(TAG, "Failed to convert TextureBuffer", e);
133     }
134     return null;
135   }
136 
convertInternal(TextureBuffer inputTextureBuffer)137   private I420Buffer convertInternal(TextureBuffer inputTextureBuffer) {
138     TextureBuffer preparedBuffer = (TextureBuffer) videoFrameDrawer.prepareBufferForViewportSize(
139         inputTextureBuffer, inputTextureBuffer.getWidth(), inputTextureBuffer.getHeight());
140 
141     // We draw into a buffer laid out like
142     //
143     //    +---------+
144     //    |         |
145     //    |  Y      |
146     //    |         |
147     //    |         |
148     //    +----+----+
149     //    | U  | V  |
150     //    |    |    |
151     //    +----+----+
152     //
153     // In memory, we use the same stride for all of Y, U and V. The
154     // U data starts at offset `height` * `stride` from the Y data,
155     // and the V data starts at at offset |stride/2| from the U
156     // data, with rows of U and V data alternating.
157     //
158     // Now, it would have made sense to allocate a pixel buffer with
159     // a single byte per pixel (EGL10.EGL_COLOR_BUFFER_TYPE,
160     // EGL10.EGL_LUMINANCE_BUFFER,), but that seems to be
161     // unsupported by devices. So do the following hack: Allocate an
162     // RGBA buffer, of width `stride`/4. To render each of these
163     // large pixels, sample the texture at 4 different x coordinates
164     // and store the results in the four components.
165     //
166     // Since the V data needs to start on a boundary of such a
167     // larger pixel, it is not sufficient that `stride` is even, it
168     // has to be a multiple of 8 pixels.
169     final int frameWidth = preparedBuffer.getWidth();
170     final int frameHeight = preparedBuffer.getHeight();
171     final int stride = ((frameWidth + 7) / 8) * 8;
172     final int uvHeight = (frameHeight + 1) / 2;
173     // Total height of the combined memory layout.
174     final int totalHeight = frameHeight + uvHeight;
175     final ByteBuffer i420ByteBuffer = JniCommon.nativeAllocateByteBuffer(stride * totalHeight);
176     // Viewport width is divided by four since we are squeezing in four color bytes in each RGBA
177     // pixel.
178     final int viewportWidth = stride / 4;
179 
180     // Produce a frame buffer starting at top-left corner, not bottom-left.
181     final Matrix renderMatrix = new Matrix();
182     renderMatrix.preTranslate(0.5f, 0.5f);
183     renderMatrix.preScale(1f, -1f);
184     renderMatrix.preTranslate(-0.5f, -0.5f);
185 
186     i420TextureFrameBuffer.setSize(viewportWidth, totalHeight);
187 
188     // Bind our framebuffer.
189     GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, i420TextureFrameBuffer.getFrameBufferId());
190     GlUtil.checkNoGLES2Error("glBindFramebuffer");
191 
192     // Draw Y.
193     shaderCallbacks.setPlaneY();
194     VideoFrameDrawer.drawTexture(drawer, preparedBuffer, renderMatrix, frameWidth, frameHeight,
195         /* viewportX= */ 0, /* viewportY= */ 0, viewportWidth,
196         /* viewportHeight= */ frameHeight);
197 
198     // Draw U.
199     shaderCallbacks.setPlaneU();
200     VideoFrameDrawer.drawTexture(drawer, preparedBuffer, renderMatrix, frameWidth, frameHeight,
201         /* viewportX= */ 0, /* viewportY= */ frameHeight, viewportWidth / 2,
202         /* viewportHeight= */ uvHeight);
203 
204     // Draw V.
205     shaderCallbacks.setPlaneV();
206     VideoFrameDrawer.drawTexture(drawer, preparedBuffer, renderMatrix, frameWidth, frameHeight,
207         /* viewportX= */ viewportWidth / 2, /* viewportY= */ frameHeight, viewportWidth / 2,
208         /* viewportHeight= */ uvHeight);
209 
210     GLES20.glReadPixels(0, 0, i420TextureFrameBuffer.getWidth(), i420TextureFrameBuffer.getHeight(),
211         GLES20.GL_RGBA, GLES20.GL_UNSIGNED_BYTE, i420ByteBuffer);
212 
213     GlUtil.checkNoGLES2Error("YuvConverter.convert");
214 
215     // Restore normal framebuffer.
216     GLES20.glBindFramebuffer(GLES20.GL_FRAMEBUFFER, 0);
217 
218     // Prepare Y, U, and V ByteBuffer slices.
219     final int yPos = 0;
220     final int uPos = yPos + stride * frameHeight;
221     // Rows of U and V alternate in the buffer, so V data starts after the first row of U.
222     final int vPos = uPos + stride / 2;
223 
224     i420ByteBuffer.position(yPos);
225     i420ByteBuffer.limit(yPos + stride * frameHeight);
226     final ByteBuffer dataY = i420ByteBuffer.slice();
227 
228     i420ByteBuffer.position(uPos);
229     // The last row does not have padding.
230     final int uvSize = stride * (uvHeight - 1) + stride / 2;
231     i420ByteBuffer.limit(uPos + uvSize);
232     final ByteBuffer dataU = i420ByteBuffer.slice();
233 
234     i420ByteBuffer.position(vPos);
235     i420ByteBuffer.limit(vPos + uvSize);
236     final ByteBuffer dataV = i420ByteBuffer.slice();
237 
238     preparedBuffer.release();
239 
240     return JavaI420Buffer.wrap(frameWidth, frameHeight, dataY, stride, dataU, stride, dataV, stride,
241         () -> { JniCommon.nativeFreeByteBuffer(i420ByteBuffer); });
242   }
243 
release()244   public void release() {
245     threadChecker.checkIsOnValidThread();
246     drawer.release();
247     i420TextureFrameBuffer.release();
248     videoFrameDrawer.release();
249     // Allow this class to be reused.
250     threadChecker.detachThread();
251   }
252 }
253