diff options
author | Alexis Ballier <aballier@gentoo.org> | 2015-09-16 11:33:40 +0200 |
---|---|---|
committer | Alexis Ballier <aballier@gentoo.org> | 2015-09-16 11:33:40 +0200 |
commit | fc758ce2a30914bd74410abeec0a48c4bd7bd840 (patch) | |
tree | 1f89f60d32ee4bcfdac39a8bfdd66c4f9028b30a /media-libs/libquicktime | |
parent | app-misc/strigi: fix build with ffmpeg git master. (diff) | |
download | gentoo-fc758ce2a30914bd74410abeec0a48c4bd7bd840.tar.gz gentoo-fc758ce2a30914bd74410abeec0a48c4bd7bd840.tar.bz2 gentoo-fc758ce2a30914bd74410abeec0a48c4bd7bd840.zip |
media-libs/libquicktime: fix build with ffmpeg git master.
Package-Manager: portage-2.2.20.1
Diffstat (limited to 'media-libs/libquicktime')
-rw-r--r-- | media-libs/libquicktime/files/libquicktime-1.2.4-ffmpeg29.patch | 287 | ||||
-rw-r--r-- | media-libs/libquicktime/libquicktime-1.2.4-r1.ebuild | 1 |
2 files changed, 288 insertions, 0 deletions
diff --git a/media-libs/libquicktime/files/libquicktime-1.2.4-ffmpeg29.patch b/media-libs/libquicktime/files/libquicktime-1.2.4-ffmpeg29.patch new file mode 100644 index 000000000000..96a323aa8170 --- /dev/null +++ b/media-libs/libquicktime/files/libquicktime-1.2.4-ffmpeg29.patch @@ -0,0 +1,287 @@ +Index: libquicktime-1.2.4/plugins/ffmpeg/video.c +=================================================================== +--- libquicktime-1.2.4.orig/plugins/ffmpeg/video.c ++++ libquicktime-1.2.4/plugins/ffmpeg/video.c +@@ -37,10 +37,10 @@ + #endif + + +-#ifdef PIX_FMT_YUV422P10 +-#define PIX_FMT_YUV422P10_OR_DUMMY PIX_FMT_YUV422P10 ++#ifdef AV_PIX_FMT_YUV422P10 ++#define AV_PIX_FMT_YUV422P10_OR_DUMMY AV_PIX_FMT_YUV422P10 + #else +-#define PIX_FMT_YUV422P10_OR_DUMMY -1234 ++#define AV_PIX_FMT_YUV422P10_OR_DUMMY -1234 + #endif + + #if LIBAVCODEC_VERSION_INT >= ((54<<16)|(1<<8)|0) +@@ -90,9 +90,9 @@ typedef struct + int imx_bitrate; + int imx_strip_vbi; + +- /* In some cases FFMpeg would report something like PIX_FMT_YUV422P, while +- we would like to treat it as PIX_FMT_YUVJ422P. It's only used for decoding */ +- enum PixelFormat reinterpret_pix_fmt; ++ /* In some cases FFMpeg would report something like AV_PIX_FMT_YUV422P, while ++ we would like to treat it as AV_PIX_FMT_YUVJ422P. It's only used for decoding */ ++ enum AVPixelFormat reinterpret_pix_fmt; + + int is_imx; + int y_offset; +@@ -137,42 +137,42 @@ typedef struct + + static const struct + { +- enum PixelFormat ffmpeg_id; ++ enum AVPixelFormat ffmpeg_id; + int lqt_id; + int exact; + } + colormodels[] = + { +- { PIX_FMT_YUV420P, BC_YUV420P, 1 }, ///< Planar YUV 4:2:0 (1 Cr & Cb sample per 2x2 Y samples) ++ { AV_PIX_FMT_YUV420P, BC_YUV420P, 1 }, ///< Planar YUV 4:2:0 (1 Cr & Cb sample per 2x2 Y samples) + #if LIBAVUTIL_VERSION_INT < (50<<16) +- { PIX_FMT_YUV422, BC_YUV422, 1 }, ++ { AV_PIX_FMT_YUV422, BC_YUV422, 1 }, + #else +- { PIX_FMT_YUYV422, BC_YUV422, 1 }, ++ { AV_PIX_FMT_YUYV422, BC_YUV422, 1 }, + #endif +- { PIX_FMT_RGB24, BC_RGB888, 1 }, ///< Packed pixel, 3 bytes per pixel, RGBRGB... +- { PIX_FMT_BGR24, BC_BGR888, 1 }, ///< Packed pixel, 3 bytes per pixel, BGRBGR... +- { PIX_FMT_YUV422P, BC_YUV422P, 1 }, ///< Planar YUV 4:2:2 (1 Cr & Cb sample per 2x1 Y samples) +- { PIX_FMT_YUV444P, BC_YUV444P, 1 }, ///< Planar YUV 4:4:4 (1 Cr & Cb sample per 1x1 Y samples) +- { PIX_FMT_YUV411P, BC_YUV411P, 1 }, ///< Planar YUV 4:1:1 (1 Cr & Cb sample per 4x1 Y samples) +- { PIX_FMT_YUV422P16, BC_YUV422P16, 1 }, ///< Planar 16 bit YUV 4:2:2 (1 Cr & Cb sample per 2x1 Y samples) +-#ifdef PIX_FMT_YUV422P10 +- { PIX_FMT_YUV422P10, BC_YUV422P10, 1 }, ///< 10 bit samples in uint16_t containers, planar 4:2:2 +-#endif +- { PIX_FMT_RGB565, BC_RGB565, 1 }, ///< always stored in cpu endianness +- { PIX_FMT_YUVJ420P, BC_YUVJ420P, 1 }, ///< Planar YUV 4:2:0 full scale (jpeg) +- { PIX_FMT_YUVJ422P, BC_YUVJ422P, 1 }, ///< Planar YUV 4:2:2 full scale (jpeg) +- { PIX_FMT_YUVJ444P, BC_YUVJ444P, 1 }, ///< Planar YUV 4:4:4 full scale (jpeg) ++ { AV_PIX_FMT_RGB24, BC_RGB888, 1 }, ///< Packed pixel, 3 bytes per pixel, RGBRGB... ++ { AV_PIX_FMT_BGR24, BC_BGR888, 1 }, ///< Packed pixel, 3 bytes per pixel, BGRBGR... ++ { AV_PIX_FMT_YUV422P, BC_YUV422P, 1 }, ///< Planar YUV 4:2:2 (1 Cr & Cb sample per 2x1 Y samples) ++ { AV_PIX_FMT_YUV444P, BC_YUV444P, 1 }, ///< Planar YUV 4:4:4 (1 Cr & Cb sample per 1x1 Y samples) ++ { AV_PIX_FMT_YUV411P, BC_YUV411P, 1 }, ///< Planar YUV 4:1:1 (1 Cr & Cb sample per 4x1 Y samples) ++ { AV_PIX_FMT_YUV422P16, BC_YUV422P16, 1 }, ///< Planar 16 bit YUV 4:2:2 (1 Cr & Cb sample per 2x1 Y samples) ++#ifdef AV_PIX_FMT_YUV422P10 ++ { AV_PIX_FMT_YUV422P10, BC_YUV422P10, 1 }, ///< 10 bit samples in uint16_t containers, planar 4:2:2 ++#endif ++ { AV_PIX_FMT_RGB565, BC_RGB565, 1 }, ///< always stored in cpu endianness ++ { AV_PIX_FMT_YUVJ420P, BC_YUVJ420P, 1 }, ///< Planar YUV 4:2:0 full scale (jpeg) ++ { AV_PIX_FMT_YUVJ422P, BC_YUVJ422P, 1 }, ///< Planar YUV 4:2:2 full scale (jpeg) ++ { AV_PIX_FMT_YUVJ444P, BC_YUVJ444P, 1 }, ///< Planar YUV 4:4:4 full scale (jpeg) + #if LIBAVUTIL_VERSION_INT < (50<<16) +- { PIX_FMT_RGBA32, BC_RGBA8888, 0 }, ///< Packed pixel, 4 bytes per pixel, BGRABGRA... ++ { AV_PIX_FMT_RGBA32, BC_RGBA8888, 0 }, ///< Packed pixel, 4 bytes per pixel, BGRABGRA... + #else +- { PIX_FMT_RGB32, BC_RGBA8888, 0 }, ///< Packed pixel, 4 bytes per pixel, BGRABGRA... ++ { AV_PIX_FMT_RGB32, BC_RGBA8888, 0 }, ///< Packed pixel, 4 bytes per pixel, BGRABGRA... + #endif +- { PIX_FMT_RGB555, BC_RGB888, 0 }, ///< always stored in cpu endianness, most significant bit to 1 +- { PIX_FMT_GRAY8, BC_RGB888, 0 }, +- { PIX_FMT_MONOWHITE, BC_RGB888, 0 }, ///< 0 is white +- { PIX_FMT_MONOBLACK, BC_RGB888, 0 }, ///< 0 is black +- { PIX_FMT_PAL8, BC_RGB888, 0 }, ///< 8 bit with RGBA palette +- { PIX_FMT_YUV410P, BC_YUV420P, 0 }, ///< Planar YUV 4:1:0 (1 Cr & Cb sample per 4x4 Y samples) ++ { AV_PIX_FMT_RGB555, BC_RGB888, 0 }, ///< always stored in cpu endianness, most significant bit to 1 ++ { AV_PIX_FMT_GRAY8, BC_RGB888, 0 }, ++ { AV_PIX_FMT_MONOWHITE, BC_RGB888, 0 }, ///< 0 is white ++ { AV_PIX_FMT_MONOBLACK, BC_RGB888, 0 }, ///< 0 is black ++ { AV_PIX_FMT_PAL8, BC_RGB888, 0 }, ///< 8 bit with RGBA palette ++ { AV_PIX_FMT_YUV410P, BC_YUV420P, 0 }, ///< Planar YUV 4:1:0 (1 Cr & Cb sample per 4x4 Y samples) + }; + + static const struct +@@ -343,16 +343,16 @@ static int lqt_tenbit_dnxhd_supported(AV + if (!codec->pix_fmts) + return 0; + +- for (i = 0; codec->pix_fmts[i] != PIX_FMT_NONE; ++i) ++ for (i = 0; codec->pix_fmts[i] != AV_PIX_FMT_NONE; ++i) + { +- if (codec->pix_fmts[i] == PIX_FMT_YUV422P10_OR_DUMMY) ++ if (codec->pix_fmts[i] == AV_PIX_FMT_YUV422P10_OR_DUMMY) + return 1; + } + + return 0; + } + +-static enum PixelFormat lqt_ffmpeg_get_ffmpeg_colormodel(int id) ++static enum AVPixelFormat lqt_ffmpeg_get_ffmpeg_colormodel(int id) + { + int i; + +@@ -361,10 +361,10 @@ static enum PixelFormat lqt_ffmpeg_get_f + if(colormodels[i].lqt_id == id) + return colormodels[i].ffmpeg_id; + } +- return PIX_FMT_NB; ++ return AV_PIX_FMT_NB; + } + +-static int lqt_ffmpeg_get_lqt_colormodel(enum PixelFormat id, int * exact) ++static int lqt_ffmpeg_get_lqt_colormodel(enum AVPixelFormat id, int * exact) + { + int i; + +@@ -402,24 +402,24 @@ static void lqt_ffmpeg_setup_decoding_co + /* First we try codec-specific colormodel matching. */ + if(codec->decoder->id == AV_CODEC_ID_DNXHD) + { +- /* FFMpeg supports PIX_FMT_YUV422P and PIX_FMT_YUV422P10 for DNxHD, which +- we sometimes interpret as PIX_FMT_YUVJ422P and PIX_FMT_YUVJ422P10. */ +- if (codec->avctx->pix_fmt == PIX_FMT_YUV422P || codec->avctx->pix_fmt == PIX_FMT_YUV422P10_OR_DUMMY) ++ /* FFMpeg supports AV_PIX_FMT_YUV422P and AV_PIX_FMT_YUV422P10 for DNxHD, which ++ we sometimes interpret as AV_PIX_FMT_YUVJ422P and AV_PIX_FMT_YUVJ422P10. */ ++ if (codec->avctx->pix_fmt == AV_PIX_FMT_YUV422P || codec->avctx->pix_fmt == AV_PIX_FMT_YUV422P10_OR_DUMMY) + { +- int p10 = (codec->avctx->pix_fmt == PIX_FMT_YUV422P10_OR_DUMMY); ++ int p10 = (codec->avctx->pix_fmt == AV_PIX_FMT_YUV422P10_OR_DUMMY); + *exact = 1; + if (lqt_ffmpeg_get_avid_yuv_range(vtrack->track) == AVID_FULL_YUV_RANGE) + { + vtrack->stream_cmodel = p10 ? BC_YUVJ422P10 : BC_YUVJ422P; +- codec->reinterpret_pix_fmt = p10 ? PIX_FMT_YUV422P10_OR_DUMMY : PIX_FMT_YUVJ422P; +- // Note: reinterpret_pix_fmt should really be PIX_FMT_YUVJ422P10, except ++ codec->reinterpret_pix_fmt = p10 ? AV_PIX_FMT_YUV422P10_OR_DUMMY : AV_PIX_FMT_YUVJ422P; ++ // Note: reinterpret_pix_fmt should really be AV_PIX_FMT_YUVJ422P10, except + // there is no such colormodel in FFMpeg. Fortunately, it's not a problem + // in this case, as reinterpret_pix_fmt is only used when *exact == 0. + } + else + { + vtrack->stream_cmodel = p10 ? BC_YUV422P10 : BC_YUV422P; +- codec->reinterpret_pix_fmt = p10 ? PIX_FMT_YUV422P10_OR_DUMMY : PIX_FMT_YUV422P; ++ codec->reinterpret_pix_fmt = p10 ? AV_PIX_FMT_YUV422P10_OR_DUMMY : AV_PIX_FMT_YUV422P; + } + return; + } +@@ -440,14 +440,14 @@ static void lqt_ffmpeg_setup_encoding_co + + if (codec->encoder->id == AV_CODEC_ID_DNXHD) + { +- /* FFMpeg's DNxHD encoder only supports PIX_FMT_YUV422P and PIX_FMT_YUV422P10 +- and doesn't know anything about PIX_FMT_YUVJ422P and PIX_FMT_YUVJ422P10 ++ /* FFMpeg's DNxHD encoder only supports AV_PIX_FMT_YUV422P and AV_PIX_FMT_YUV422P10 ++ and doesn't know anything about AV_PIX_FMT_YUVJ422P and AV_PIX_FMT_YUVJ422P10 + (in fact, the latter doesn't even exist) */ +- codec->avctx->pix_fmt = PIX_FMT_YUV422P; ++ codec->avctx->pix_fmt = AV_PIX_FMT_YUV422P; + if (vtrack->stream_cmodel == BC_YUV422P10 || vtrack->stream_cmodel == BC_YUVJ422P10) + { + if (lqt_tenbit_dnxhd_supported(codec->encoder)) +- codec->avctx->pix_fmt = PIX_FMT_YUV422P10_OR_DUMMY; ++ codec->avctx->pix_fmt = AV_PIX_FMT_YUV422P10_OR_DUMMY; + } + } + } +@@ -458,7 +458,7 @@ static void lqt_ffmpeg_setup_encoding_co + /* From avcodec.h: */ + + /* +- * PIX_FMT_RGBA32 is handled in an endian-specific manner. A RGBA ++ * AV_PIX_FMT_RGBA32 is handled in an endian-specific manner. A RGBA + * color is put together as: + * (A << 24) | (R << 16) | (G << 8) | B + * This is stored as BGRA on little endian CPU architectures and ARGB on +@@ -530,7 +530,7 @@ static void convert_rgba_to_argb(uint8_t + */ + + static void convert_image_decode(quicktime_ffmpeg_video_codec_t *codec, +- AVFrame * in_frame, enum PixelFormat in_format, ++ AVFrame * in_frame, enum AVPixelFormat in_format, + unsigned char ** out_frame, int out_format, + int width, int height, int row_span, int row_span_uv) + { +@@ -547,9 +547,9 @@ static void convert_image_decode(quickti + * RGBA format like in ffmpeg?? + */ + #if LIBAVUTIL_VERSION_INT < (50<<16) +- if((in_format == PIX_FMT_RGBA32) && (out_format == BC_RGBA8888)) ++ if((in_format == AV_PIX_FMT_RGBA32) && (out_format == BC_RGBA8888)) + #else +- if((in_format == PIX_FMT_RGB32) && (out_format == BC_RGBA8888)) ++ if((in_format == AV_PIX_FMT_RGB32) && (out_format == BC_RGBA8888)) + #endif + { + convert_image_decode_rgba(in_frame, out_frame, width, height, codec->y_offset); +@@ -829,7 +829,7 @@ static int lqt_ffmpeg_decode_video(quick + if(avcodec_open2(codec->avctx, codec->decoder, NULL) != 0) + return -1; + #endif +- codec->frame = avcodec_alloc_frame(); ++ codec->frame = av_frame_alloc(); + vtrack->stream_cmodel = LQT_COLORMODEL_NONE; + codec->initialized = 1; + } +@@ -929,10 +929,10 @@ static int lqt_ffmpeg_decode_video(quick + #ifdef HAVE_LIBSWSCALE + + #if LIBAVUTIL_VERSION_INT < (50<<16) +- if(!((codec->avctx->pix_fmt == PIX_FMT_RGBA32) && ++ if(!((codec->avctx->pix_fmt == AV_PIX_FMT_RGBA32) && + (vtrack->stream_cmodel == BC_RGBA8888))) + #else +- if(!((codec->avctx->pix_fmt == PIX_FMT_RGB32) && ++ if(!((codec->avctx->pix_fmt == AV_PIX_FMT_RGB32) && + (vtrack->stream_cmodel == BC_RGBA8888))) + #endif + { +@@ -1318,7 +1318,7 @@ static int lqt_ffmpeg_encode_video(quick + + if(!codec->initialized) + { +- codec->frame = avcodec_alloc_frame(); ++ codec->frame = av_frame_alloc(); + + /* time_base is 1/framerate for constant framerate */ + +@@ -1396,9 +1396,9 @@ static int lqt_ffmpeg_encode_video(quick + if(vtrack->stream_cmodel == BC_RGBA8888) + { + /* Libquicktime doesn't natively support a color model equivalent +- to PIX_FMT_ARGB, which is required for QTRLE with alpha channel. ++ to AV_PIX_FMT_ARGB, which is required for QTRLE with alpha channel. + So, we use BC_RGBA8888 and do ad hoc conversion below. */ +- codec->avctx->pix_fmt = PIX_FMT_ARGB; ++ codec->avctx->pix_fmt = AV_PIX_FMT_ARGB; + vtrack->track->mdia.minf.stbl.stsd.table[0].depth = 32; + } + } +@@ -1467,7 +1467,7 @@ static int lqt_ffmpeg_encode_video(quick + } + // codec->lqt_colormodel = ffmepg_2_lqt(codec->com.ffcodec_enc); + +- if(codec->y_offset != 0 || codec->avctx->pix_fmt == PIX_FMT_ARGB) ++ if(codec->y_offset != 0 || codec->avctx->pix_fmt == AV_PIX_FMT_ARGB) + { + if(!codec->tmp_rows) + { +@@ -1492,7 +1492,7 @@ static int lqt_ffmpeg_encode_video(quick + vtrack->stream_cmodel, + 0, 0, 0, codec->y_offset); + } +- else if(codec->avctx->pix_fmt == PIX_FMT_ARGB) ++ else if(codec->avctx->pix_fmt == AV_PIX_FMT_ARGB) + { + convert_rgba_to_argb(row_pointers[0], vtrack->stream_row_span, + codec->tmp_rows[0], codec->tmp_row_span, +Index: libquicktime-1.2.4/plugins/ffmpeg/audio.c +=================================================================== +--- libquicktime-1.2.4.orig/plugins/ffmpeg/audio.c ++++ libquicktime-1.2.4/plugins/ffmpeg/audio.c +@@ -1266,7 +1266,7 @@ static int lqt_ffmpeg_encode_audio(quick + pkt.data = codec->chunk_buffer; + pkt.size = codec->chunk_buffer_alloc; + +- avcodec_get_frame_defaults(&f); ++ av_frame_unref(&f); + f.nb_samples = codec->avctx->frame_size; + + avcodec_fill_audio_frame(&f, channels, codec->avctx->sample_fmt, diff --git a/media-libs/libquicktime/libquicktime-1.2.4-r1.ebuild b/media-libs/libquicktime/libquicktime-1.2.4-r1.ebuild index ebfed851d06f..67ab74cd1e64 100644 --- a/media-libs/libquicktime/libquicktime-1.2.4-r1.ebuild +++ b/media-libs/libquicktime/libquicktime-1.2.4-r1.ebuild @@ -53,6 +53,7 @@ DOCS="ChangeLog README TODO" src_prepare() { epatch "${FILESDIR}"/${P}+libav-9.patch \ "${FILESDIR}"/${P}-ffmpeg2.patch + has_version '>=media-video/ffmpeg-2.9' && epatch "${FILESDIR}"/${P}-ffmpeg29.patch for FILE in lqt_ffmpeg.c video.c audio.c ; do sed -i -e "s:CODEC_ID_:AV_&:g" "${S}/plugins/ffmpeg/${FILE}" || die |