https://stackoverflow.com/questions/36212904/yuv-420-888-interpretation-on-samsung-galaxy-s7-camera2
private static RenderScript renderScript;
public Bitmap YUV_420_888_toRGB(Image image, int width, int height){
// Get the three image planes
Image.Plane[] planes = image.getPlanes();
ByteBuffer buffer = planes[0].getBuffer();
byte[] y = new byte[buffer.remaining()];
buffer.get(y);
buffer = planes[1].getBuffer();
byte[] u = new byte[buffer.remaining()];
buffer.get(u);
buffer = planes[2].getBuffer();
byte[] v = new byte[buffer.remaining()];
buffer.get(v);
// get the relevant RowStrides and PixelStrides
// (we know from documentation that PixelStride is 1 for y)
int yRowStride= planes[0].getRowStride();
int uvRowStride= planes[1].getRowStride(); // we know from documentation that RowStride is the same for u and v.
int uvPixelStride= planes[1].getPixelStride(); // we know from documentation that PixelStride is the same for u and v.
if (yuv420888 == null) {
yuv420888 = new ScriptC_yuv420888 (renderScript);
typeUcharY = new Type.Builder(renderScript, Element.U8(renderScript));
typeUcharUV= new Type.Builder(renderScript, Element.U8(renderScript));
}
// Y,U,V are defined as global allocations, the out-Allocation is the Bitmap.
// Note also that uAlloc and vAlloc are 1-dimensional while yAlloc is 2-dimensional.
typeUcharY.setX(yRowStride).setY(height);
Allocation yAlloc = Allocation.createTyped(renderScript, typeUcharY.create());
yAlloc.copyFrom(y);
yuv420888.set_ypsIn(yAlloc);
// note that the size of the u's and v's are as follows:
// ( (width/2)*PixelStride + padding ) * (height/2)
// = (RowStride ) * (height/2)
// but I noted that on the S7 it is 1 less...
typeUcharUV.setX(u.length);
Allocation uAlloc = Allocation.createTyped(renderScript, typeUcharUV.create());
uAlloc.copyFrom(u);
yuv420888.set_uIn(uAlloc);
Allocation vAlloc = Allocation.createTyped(renderScript, typeUcharUV.create());
vAlloc.copyFrom(v);
yuv420888.set_vIn(vAlloc);
// handover parameters
yuv420888.set_picWidth(width);
yuv420888.set_uvRowStride (uvRowStride);
yuv420888.set_uvPixelStride (uvPixelStride);
Bitmap outBitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
Allocation outAlloc = Allocation.createFromBitmap(renderScript, outBitmap, Allocation.MipmapControl.MIPMAP_NONE, Allocation.USAGE_SCRIPT);
Script.LaunchOptions lo = new Script.LaunchOptions();
lo.setX(0, width); // by this we ignore the y’s padding zone, i.e. the right side of x between width and yRowStride
lo.setY(0, height);
yuv420888.forEach_doConvert(outAlloc,lo);
outAlloc.copyTo(outBitmap);
return outBitmap;
}
yuv420888.rs Renderscript脚本:
#pragma version(1)
#pragma rs java_package_name(hikvision.zhanyun.com.hikvision);
#pragma rs_fp_relaxed
int32_t width;
int32_t height;
uint picWidth, uvPixelStride, uvRowStride ;
rs_allocation ypsIn,uIn,vIn;
// The LaunchOptions ensure that the Kernel does not enter the padding zone of Y, so yRowStride can be ignored WITHIN the Kernel.
uchar4 __attribute__((kernel)) doConvert(uint32_t x, uint32_t y) {
// index for accessing the uIn's and vIn's
uint uvIndex= uvPixelStride * (x/2) + uvRowStride*(y/2);
// get the y,u,v values
uchar yps= rsGetElementAt_uchar(ypsIn, x, y);
uchar u= rsGetElementAt_uchar(uIn, uvIndex);
uchar v= rsGetElementAt_uchar(vIn, uvIndex);
// calc argb
int4 argb;
argb.r = yps + v * 1436 / 1024 - 179;
argb.g = yps -u * 46549 / 131072 + 44 -v * 93604 / 131072 + 91;
argb.b = yps +u * 1814 / 1024 - 227;
argb.a = 255;
uchar4 out = convert_uchar4(clamp(argb, 0, 255));
return out;
}
使用方法:
MediaCode使用略:
mediaDecode = MediaCodec.createDecoderByType("video/avc");
MediaFormat mediaFormat = MediaFormat.createVideoFormat("video/avc", 640, 480);
mediaFormat.setInteger(MediaFormat.KEY_BITRATE_MODE, MediaCodecInfo.EncoderCapabilities.BITRATE_MODE_CQ);
mediaFormat.setInteger(MediaFormat.KEY_COLOR_FORMAT, ImageFormat.YUV_420_888);
mediaDecode.configure(mediaFormat, null, null, 0);
mediaDecode.start();
// 中间开始解码视频流
int inputBufferIndex = mediaDecode.dequeueInputBuffer(0);
if (inputBufferIndex >= 0) { //当输入缓冲区有效时,就是>=0
ByteBuffer inputBuffer = mediaDecode.getInputBuffer(inputBufferIndex);
inputBuffer.put(frame, 0, frame.length);
mediaDecode.queueInputBuffer(inputBufferIndex, 0, frame.length, System.nanoTime() / 1000, 0);
} else {
Log.w(Log.TAG, "解码缓冲区不足");
}
MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
int outputBufferIndex = mediaDecode.dequeueOutputBuffer(bufferInfo, 0);//拿到输出缓冲区的索引
if (outputBufferIndex >= 0) {
Image img = mediaDecode.getOutputImage(outputBufferIndex);
long t = System.currentTimeMillis();
Bitmap bitmap = YUV_420_888_toRGB(img, 1920, 1080);
Log.e(Log.TAG, "转换YUV到位图: " + (System.currentTimeMillis() - t) + " " + bitmap.getHeight());
// saveBitmapAsJPEG(bitmap, "/sdcard/zhjinrui/spgp/demo.bmp");
mediaDecode.releaseOutputBuffer(outputBufferIndex, true);
}
以上转换