最近在做与视频处理有关的项目,涉及到从ipc从抓拍图像的需求,抓拍的264关键帧经解码后,得到yuv420p格式的图像数据。由于客户需求是希望直接得到jpeg图像,因此需要将yuv420p的图像数据,转换为jpeg进行输出。在网上找了挺多使用libjpeg库示例代码的,但是都无法直接使用,最后经过一些折腾,终于成功实现了使用libjpeg将yuv420p的数据转换为jpeg图像。
代码如下:
// outJpegFileName:输出的jpeg文件名称
// yuvData: yuv420格式的数据,其数据存储顺序为:y->u->v
// quaulity: 输出的jpeg图像质量,有效范围为0-100
int Yuv420PToJpeg(const char * outJpegFileName, unsigned char* yuvData, int image_width, int image_height, int quality)
{
struct jpeg_compress_struct cinfo;
struct jpeg_error_mgr jerr;
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_compress(&cinfo);
FILE * outfile; // target file
if ((outfile = fopen(outJpegFileName, "wb")) == NULL)
{
fprintf(stderr, "can't open %s\n", outJpegFileName);
exit(1);
}
jpeg_stdio_dest(&cinfo, outfile);
cinfo.image_width = image_width;
cinfo.image_height = image_height;
cinfo.input_components = 3; // # of color components per pixel
cinfo.in_color_space = JCS_YCbCr; //colorspace of input image
jpeg_set_defaults(&cinfo);
jpeg_set_quality(&cinfo, quality, TRUE);
//
// cinfo.raw_data_in = TRUE;
cinfo.jpeg_color_space = JCS_YCbCr;
cinfo.comp_info[0].h_samp_factor = 2;
cinfo.comp_info[0].v_samp_factor = 2;
/
jpeg_start_compress(&cinfo, TRUE);
JSAMPROW row_pointer[1];
// 获取y、u、v三个分量各自数据的指针地址
unsigned char *ybase, *ubase, *vbase;
ybase = yuvData;
ubase = yuvData + image_width*image_height;
vbase = ubase + image_height*image_width / 4;
unsigned char *yuvLine = new unsigned char[image_width * 3];
memset(yuvLine, 0, image_width * 3);
int j = 0;
while (cinfo.next_scanline < cinfo.image_height)
{
int idx = 0;
for (int i = 0; i<image_width; i++)
{
// 分别取y、u、v的数据
yuvLine[idx++] = ybase[i + j * image_width];
yuvLine[idx++] = ubase[(j>>1) * image_width/2 + (i>>1) ];
yuvLine[idx++] = vbase[(j>>1) * image_width/2 + (i>>1) ];
}
row_pointer[0] = yuvLine;
jpeg_write_scanlines(&cinfo, row_pointer, 1);
j++;
}
jpeg_finish_compress(&cinfo);
jpeg_destroy_compress(&cinfo);
fclose(outfile);
delete[]yuvLine;
return 0;
}版权声明:本文为qileilee原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接和本声明。