From 3ddb07f581e29349853158efe7c7101091251cee Mon Sep 17 00:00:00 2001 From: Greg Cotten Date: Mon, 4 Nov 2019 14:49:58 -0800 Subject: [PATCH 1/6] add all possible pixel formats --- Sources/SwiftFFmpeg/VideoUtil.swift | 378 ++++++++++++++++++++++++++-- 1 file changed, 359 insertions(+), 19 deletions(-) diff --git a/Sources/SwiftFFmpeg/VideoUtil.swift b/Sources/SwiftFFmpeg/VideoUtil.swift index a51e715..e9b8f1b 100644 --- a/Sources/SwiftFFmpeg/VideoUtil.swift +++ b/Sources/SwiftFFmpeg/VideoUtil.swift @@ -65,34 +65,36 @@ extension AVPixelFormat { public static let GRAY8 = AV_PIX_FMT_GRAY8 /// Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb public static let MONOWHITE = AV_PIX_FMT_MONOWHITE - /// Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb + /// Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb public static let MONOBLACK = AV_PIX_FMT_MONOBLACK /// 8 bits with AV_PIX_FMT_RGB32 palette public static let PAL8 = AV_PIX_FMT_PAL8 + /// planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting color_range + public static let YUVJ420P = AV_PIX_FMT_YUVJ420P + /// planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting color_range + public static let YUVJ422P = AV_PIX_FMT_YUVJ422P + /// planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting color_range + public static let YUVJ444P = AV_PIX_FMT_YUVJ444P /// packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 public static let UYVY422 = AV_PIX_FMT_UYVY422 /// packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 public static let UYYVYY411 = AV_PIX_FMT_UYYVYY411 /// packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) public static let BGR8 = AV_PIX_FMT_BGR8 - /// packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte - /// is the one composed by the 4 msb bits + /// packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits public static let BGR4 = AV_PIX_FMT_BGR4 /// packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) public static let BGR4_BYTE = AV_PIX_FMT_BGR4_BYTE /// packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) public static let RGB8 = AV_PIX_FMT_RGB8 - /// packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte - /// is the one composed by the 4 msb bits + /// packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits public static let RGB4 = AV_PIX_FMT_RGB4 /// packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) public static let RGB4_BYTE = AV_PIX_FMT_RGB4_BYTE - /// planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved - /// (first byte U and the following byte V) + /// planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) public static let NV12 = AV_PIX_FMT_NV12 /// as above, but U and V bytes are swapped public static let NV21 = AV_PIX_FMT_NV21 - /// packed ARGB 8:8:8:8, 32bpp, ARGBARGB... public static let ARGB = AV_PIX_FMT_ARGB /// packed RGBA 8:8:8:8, 32bpp, RGBARGBA... @@ -101,20 +103,20 @@ extension AVPixelFormat { public static let ABGR = AV_PIX_FMT_ABGR /// packed BGRA 8:8:8:8, 32bpp, BGRABGRA... public static let BGRA = AV_PIX_FMT_BGRA - /// Y , 16bpp, big-endian public static let GRAY16BE = AV_PIX_FMT_GRAY16BE /// Y , 16bpp, little-endian public static let GRAY16LE = AV_PIX_FMT_GRAY16LE /// planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) public static let YUV440P = AV_PIX_FMT_YUV440P + /// planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range + public static let YUVJ440P = AV_PIX_FMT_YUVJ440P /// planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) public static let YUVA420P = AV_PIX_FMT_YUVA420P /// packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian public static let RGB48BE = AV_PIX_FMT_RGB48BE /// packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian public static let RGB48LE = AV_PIX_FMT_RGB48LE - /// packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian public static let RGB565BE = AV_PIX_FMT_RGB565BE /// packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian @@ -123,7 +125,6 @@ extension AVPixelFormat { public static let RGB555BE = AV_PIX_FMT_RGB555BE /// packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined public static let RGB555LE = AV_PIX_FMT_RGB555LE - /// packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian public static let BGR565BE = AV_PIX_FMT_BGR565BE /// packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian @@ -133,16 +134,32 @@ extension AVPixelFormat { /// packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined public static let BGR555LE = AV_PIX_FMT_BGR555LE + /** @name Deprecated pixel formats */ + /**@{*/ + /// HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers + public static let VAAPI_MOCO = AV_PIX_FMT_VAAPI_MOCO + /// HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers + public static let VAAPI_IDCT = AV_PIX_FMT_VAAPI_IDCT + /// HW decoding through VA API, Picture.data[3] contains a VASurfaceID + public static let VAAPI_VLD = AV_PIX_FMT_VAAPI_VLD + /**@}*/ + public static let VAAPI = AV_PIX_FMT_VAAPI + + /** + * Hardware acceleration through VA-API, data[3] contains a + * VASurfaceID. + */ + /// planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian - public static let YUV420P16LE = AV_PIX_FMT_YUV420P16LE /// < + public static let YUV420P16LE = AV_PIX_FMT_YUV420P16LE /// planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian - public static let YUV420P16BE = AV_PIX_FMT_YUV420P16BE /// + public static let YUV420P16BE = AV_PIX_FMT_YUV420P16BE /// planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian - public static let YUV422P16LE = AV_PIX_FMT_YUV422P16LE /// + public static let YUV422P16LE = AV_PIX_FMT_YUV422P16LE /// planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian - public static let YUV422P16BE = AV_PIX_FMT_YUV422P16BE /// + public static let YUV422P16BE = AV_PIX_FMT_YUV422P16BE /// planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian - public static let YUV444P16LE = AV_PIX_FMT_YUV444P16LE /// + public static let YUV444P16LE = AV_PIX_FMT_YUV444P16LE /// planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian public static let YUV444P16BE = AV_PIX_FMT_YUV444P16BE /// HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer @@ -169,9 +186,332 @@ extension AVPixelFormat { /// packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian public static let BGR48LE = AV_PIX_FMT_BGR48LE - /// hardware decoding through VideoToolbox - public static let videoToolbox = AV_PIX_FMT_VIDEOTOOLBOX - + /** + * The following 12 formats have the disadvantage of needing 1 format for each bit depth. + * Notice that each 9/10 bits sample is stored in 16 bits with extra padding. + * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better. + */ + /// planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + public static let YUV420P9BE = AV_PIX_FMT_YUV420P9BE + /// planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + public static let YUV420P9LE = AV_PIX_FMT_YUV420P9LE + /// planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + public static let YUV420P10BE = AV_PIX_FMT_YUV420P10BE + /// planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + public static let YUV420P10LE = AV_PIX_FMT_YUV420P10LE + /// planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + public static let YUV422P10BE = AV_PIX_FMT_YUV422P10BE + /// planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + public static let YUV422P10LE = AV_PIX_FMT_YUV422P10LE + /// planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + public static let YUV444P9BE = AV_PIX_FMT_YUV444P9BE + /// planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + public static let YUV444P9LE = AV_PIX_FMT_YUV444P9LE + /// planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + public static let YUV444P10BE = AV_PIX_FMT_YUV444P10BE + /// planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + public static let YUV444P10LE = AV_PIX_FMT_YUV444P10LE + /// planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + public static let YUV422P9BE = AV_PIX_FMT_YUV422P9BE + /// planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + public static let YUV422P9LE = AV_PIX_FMT_YUV422P9LE + /// planar GBR 4:4:4 24bpp + public static let GBRP = AV_PIX_FMT_GBRP + public static let GBR24P = AV_PIX_FMT_GBR24P + /// planar GBR 4:4:4 27bpp, big-endian + public static let GBRP9BE = AV_PIX_FMT_GBRP9BE + /// planar GBR 4:4:4 27bpp, little-endian + public static let GBRP9LE = AV_PIX_FMT_GBRP9LE + /// planar GBR 4:4:4 30bpp, big-endian + public static let GBRP10BE = AV_PIX_FMT_GBRP10BE + /// planar GBR 4:4:4 30bpp, little-endian + public static let GBRP10LE = AV_PIX_FMT_GBRP10LE + /// planar GBR 4:4:4 48bpp, big-endian + public static let GBRP16BE = AV_PIX_FMT_GBRP16BE + /// planar GBR 4:4:4 48bpp, little-endian + public static let GBRP16LE = AV_PIX_FMT_GBRP16LE + /// planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) + public static let YUVA422P = AV_PIX_FMT_YUVA422P + /// planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) + public static let YUVA444P = AV_PIX_FMT_YUVA444P + /// planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian + public static let YUVA420P9BE = AV_PIX_FMT_YUVA420P9BE + /// planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian + public static let YUVA420P9LE = AV_PIX_FMT_YUVA420P9LE + /// planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian + public static let YUVA422P9BE = AV_PIX_FMT_YUVA422P9BE + /// planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian + public static let YUVA422P9LE = AV_PIX_FMT_YUVA422P9LE + /// planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian + public static let YUVA444P9BE = AV_PIX_FMT_YUVA444P9BE + /// planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian + public static let YUVA444P9LE = AV_PIX_FMT_YUVA444P9LE + /// planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) + public static let YUVA420P10BE = AV_PIX_FMT_YUVA420P10BE + /// planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) + public static let YUVA420P10LE = AV_PIX_FMT_YUVA420P10LE + /// planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) + public static let YUVA422P10BE = AV_PIX_FMT_YUVA422P10BE + /// planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) + public static let YUVA422P10LE = AV_PIX_FMT_YUVA422P10LE + /// planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) + public static let YUVA444P10BE = AV_PIX_FMT_YUVA444P10BE + /// planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) + public static let YUVA444P10LE = AV_PIX_FMT_YUVA444P10LE + /// planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) + public static let YUVA420P16BE = AV_PIX_FMT_YUVA420P16BE + /// planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) + public static let YUVA420P16LE = AV_PIX_FMT_YUVA420P16LE + /// planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) + public static let YUVA422P16BE = AV_PIX_FMT_YUVA422P16BE + /// planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) + public static let YUVA422P16LE = AV_PIX_FMT_YUVA422P16LE + /// planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) + public static let YUVA444P16BE = AV_PIX_FMT_YUVA444P16BE + /// planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) + public static let YUVA444P16LE = AV_PIX_FMT_YUVA444P16LE + + /// HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface + public static let VDPAU = AV_PIX_FMT_VDPAU + + /// packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0 + public static let XYZ12LE = AV_PIX_FMT_XYZ12LE + /// packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0 + public static let XYZ12BE = AV_PIX_FMT_XYZ12BE + /// interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + public static let NV16 = AV_PIX_FMT_NV16 + /// interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + public static let NV20LE = AV_PIX_FMT_NV20LE + /// interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + public static let NV20BE = AV_PIX_FMT_NV20BE + + /// packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + public static let RGBA64BE = AV_PIX_FMT_RGBA64BE + /// packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + public static let RGBA64LE = AV_PIX_FMT_RGBA64LE + /// packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + public static let BGRA64BE = AV_PIX_FMT_BGRA64BE + /// packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + public static let BGRA64LE = AV_PIX_FMT_BGRA64LE + + /// packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb + public static let YVYU422 = AV_PIX_FMT_YVYU422 + + /// 16 bits gray, 16 bits alpha (big-endian) + public static let YA16BE = AV_PIX_FMT_YA16BE + /// 16 bits gray, 16 bits alpha (little-endian) + public static let YA16LE = AV_PIX_FMT_YA16LE + + /// planar GBRA 4:4:4:4 32bpp + public static let GBRAP = AV_PIX_FMT_GBRAP + /// planar GBRA 4:4:4:4 64bpp, big-endian + public static let GBRAP16BE = AV_PIX_FMT_GBRAP16BE + /// planar GBRA 4:4:4:4 64bpp, little-endian + public static let GBRAP16LE = AV_PIX_FMT_GBRAP16LE + /** + * HW acceleration through QSV, data[3] contains a pointer to the + * mfxFrameSurface1 structure. + */ + public static let QSV = AV_PIX_FMT_QSV + /** + * HW acceleration though MMAL, data[3] contains a pointer to the + * MMAL_BUFFER_HEADER_T structure. + */ + public static let MMAL = AV_PIX_FMT_MMAL + + /// HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView pointer + public static let D3D11VA_VLD = AV_PIX_FMT_D3D11VA_VLD + + /** + * HW acceleration through CUDA. data[i] contain CUdeviceptr pointers + * exactly as for system memory frames. + */ + public static let CUDA = AV_PIX_FMT_CUDA + + /// packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined + public static let _0RGB = AV_PIX_FMT_0RGB + /// packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined + public static let RGB0 = AV_PIX_FMT_RGB0 + /// packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined + public static let _0BGR = AV_PIX_FMT_0BGR + /// packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined + public static let BGR0 = AV_PIX_FMT_BGR0 + + /// planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + public static let YUV420P12BE = AV_PIX_FMT_YUV420P12BE + /// planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + public static let YUV420P12LE = AV_PIX_FMT_YUV420P12LE + /// planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + public static let YUV420P14BE = AV_PIX_FMT_YUV420P14BE + /// planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + public static let YUV420P14LE = AV_PIX_FMT_YUV420P14LE + /// planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + public static let YUV422P12BE = AV_PIX_FMT_YUV422P12BE + /// planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + public static let YUV422P12LE = AV_PIX_FMT_YUV422P12LE + /// planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + public static let YUV422P14BE = AV_PIX_FMT_YUV422P14BE + /// planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + public static let YUV422P14LE = AV_PIX_FMT_YUV422P14LE + /// planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + public static let YUV444P12BE = AV_PIX_FMT_YUV444P12BE + /// planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + public static let YUV444P12LE = AV_PIX_FMT_YUV444P12LE + /// planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + public static let YUV444P14BE = AV_PIX_FMT_YUV444P14BE + /// planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + public static let YUV444P14LE = AV_PIX_FMT_YUV444P14LE + /// planar GBR 4:4:4 36bpp, big-endian + public static let GBRP12BE = AV_PIX_FMT_GBRP12BE + /// planar GBR 4:4:4 36bpp, little-endian + public static let GBRP12LE = AV_PIX_FMT_GBRP12LE + /// planar GBR 4:4:4 42bpp, big-endian + public static let GBRP14BE = AV_PIX_FMT_GBRP14BE + /// planar GBR 4:4:4 42bpp, little-endian + public static let GBRP14LE = AV_PIX_FMT_GBRP14LE + /// planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV411P and setting color_range + public static let YUVJ411P = AV_PIX_FMT_YUVJ411P + + /// bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */ + public static let BAYER_BGGR8 = AV_PIX_FMT_BAYER_BGGR8 + /// bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */ + public static let BAYER_RGGB8 = AV_PIX_FMT_BAYER_RGGB8 + /// bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */ + public static let BAYER_GBRG8 = AV_PIX_FMT_BAYER_GBRG8 + /// bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */ + public static let BAYER_GRBG8 = AV_PIX_FMT_BAYER_GRBG8 + /// bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */ + public static let BAYER_BGGR16LE = AV_PIX_FMT_BAYER_BGGR16LE + /// bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */ + public static let BAYER_BGGR16BE = AV_PIX_FMT_BAYER_BGGR16BE + /// bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */ + public static let BAYER_RGGB16LE = AV_PIX_FMT_BAYER_RGGB16LE + /// bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */ + public static let BAYER_RGGB16BE = AV_PIX_FMT_BAYER_RGGB16BE + /// bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */ + public static let BAYER_GBRG16LE = AV_PIX_FMT_BAYER_GBRG16LE + /// bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */ + public static let BAYER_GBRG16BE = AV_PIX_FMT_BAYER_GBRG16BE + /// bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */ + public static let BAYER_GRBG16LE = AV_PIX_FMT_BAYER_GRBG16LE + /// bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */ + public static let BAYER_GRBG16BE = AV_PIX_FMT_BAYER_GRBG16BE + + /// XVideo Motion Acceleration via common packet passing + public static let XVMC = AV_PIX_FMT_XVMC + + /// planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian + public static let YUV440P10LE = AV_PIX_FMT_YUV440P10LE + /// planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian + public static let YUV440P10BE = AV_PIX_FMT_YUV440P10BE + /// planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian + public static let YUV440P12LE = AV_PIX_FMT_YUV440P12LE + /// planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian + public static let YUV440P12BE = AV_PIX_FMT_YUV440P12BE + /// packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian + public static let AYUV64LE = AV_PIX_FMT_AYUV64LE + /// packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), big-endian + public static let AYUV64BE = AV_PIX_FMT_AYUV64BE + + /// hardware decoding through Videotoolbox + public static let VIDEOTOOLBOX = AV_PIX_FMT_VIDEOTOOLBOX + + /// like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, little-endian + public static let P010LE = AV_PIX_FMT_P010LE + /// like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, big-endian + public static let P010BE = AV_PIX_FMT_P010BE + + /// planar GBR 4:4:4:4 48bpp, big-endian + public static let GBRAP12BE = AV_PIX_FMT_GBRAP12BE + /// planar GBR 4:4:4:4 48bpp, little-endian + public static let GBRAP12LE = AV_PIX_FMT_GBRAP12LE + + /// planar GBR 4:4:4:4 40bpp, big-endian + public static let GBRAP10BE = AV_PIX_FMT_GBRAP10BE + /// planar GBR 4:4:4:4 40bpp, little-endian + public static let GBRAP10LE = AV_PIX_FMT_GBRAP10LE + + /// hardware decoding through MediaCodec + public static let MEDIACODEC = AV_PIX_FMT_MEDIACODEC + + /// Y , 12bpp, big-endian + public static let GRAY12BE = AV_PIX_FMT_GRAY12BE + /// Y , 12bpp, little-endian + public static let GRAY12LE = AV_PIX_FMT_GRAY12LE + /// Y , 10bpp, big-endian + public static let GRAY10BE = AV_PIX_FMT_GRAY10BE + /// Y , 10bpp, little-endian + public static let GRAY10LE = AV_PIX_FMT_GRAY10LE + + /// like NV12, with 16bpp per component, little-endian + public static let P016LE = AV_PIX_FMT_P016LE + /// like NV12, with 16bpp per component, big-endian + public static let P016BE = AV_PIX_FMT_P016BE + + /** + * Hardware surfaces for Direct3D11. + * + * This is preferred over the legacy AV_PIX_FMT_D3D11VA_VLD. The new D3D11 + * hwaccel API and filtering support AV_PIX_FMT_D3D11 only. + * + * data[0] contains a ID3D11Texture2D pointer, and data[1] contains the + * texture array index of the frame as intptr_t if the ID3D11Texture2D is + * an array texture (or always 0 if it's a normal texture). + */ + public static let D3D11 = AV_PIX_FMT_D3D11 + + /// Y , 9bpp, big-endian + public static let GRAY9BE = AV_PIX_FMT_GRAY9BE + /// Y , 9bpp, little-endian + public static let GRAY9LE = AV_PIX_FMT_GRAY9LE + + /// IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian + public static let GBRPF32BE = AV_PIX_FMT_GBRPF32BE + /// IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian + public static let GBRPF32LE = AV_PIX_FMT_GBRPF32LE + /// IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian + public static let GBRAPF32BE = AV_PIX_FMT_GBRAPF32BE + /// IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian + public static let GBRAPF32LE = AV_PIX_FMT_GBRAPF32LE + + /** + * DRM-managed buffers exposed through PRIME buffer sharing. + * + * data[0] points to an AVDRMFrameDescriptor. + */ + public static let DRM_PRIME = AV_PIX_FMT_DRM_PRIME + /** + * Hardware surfaces for OpenCL. + * + * data[i] contain 2D image objects (typed in C as cl_mem, used + * in OpenCL as image2d_t) for each plane of the surface. + */ + public static let OPENCL = AV_PIX_FMT_OPENCL + + /// Y , 14bpp, big-endian + public static let GRAY14BE = AV_PIX_FMT_GRAY14BE + /// Y , 14bpp, little-endian + public static let GRAY14LE = AV_PIX_FMT_GRAY14LE + + /// IEEE-754 single precision Y, 32bpp, big-endian + public static let GRAYF32BE = AV_PIX_FMT_GRAYF32BE + /// IEEE-754 single precision Y, 32bpp, little-endian + public static let GRAYF32LE = AV_PIX_FMT_GRAYF32LE + + /// planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, big-endian + public static let YUVA422P12BE = AV_PIX_FMT_YUVA422P12BE + /// planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), 12b alpha, little-endian + public static let YUVA422P12LE = AV_PIX_FMT_YUVA422P12LE + /// planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, big-endian + public static let YUVA444P12BE = AV_PIX_FMT_YUVA444P12BE + /// planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), 12b alpha, little-endian + public static let YUVA444P12LE = AV_PIX_FMT_YUVA444P12LE + + /// planar YUV 4:4:4, 24bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) + public static let NV24 = AV_PIX_FMT_NV24 + /// as above, but U and V bytes are swapped + public static let NV42 = AV_PIX_FMT_NV42 + /// number of pixel formats, __DO NOT USE THIS__ if you want to link with shared `libav*` /// because the number of formats might differ between versions public static let nb = AV_PIX_FMT_NB From 08b55269bd4532b80f574877673c6d6135a5e8e6 Mon Sep 17 00:00:00 2001 From: Greg Cotten Date: Mon, 4 Nov 2019 14:50:18 -0800 Subject: [PATCH 2/6] add probeSize accessor to AVFormatContext --- Sources/SwiftFFmpeg/AVFormatContext.swift | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Sources/SwiftFFmpeg/AVFormatContext.swift b/Sources/SwiftFFmpeg/AVFormatContext.swift index d908dde..d1b61b1 100644 --- a/Sources/SwiftFFmpeg/AVFormatContext.swift +++ b/Sources/SwiftFFmpeg/AVFormatContext.swift @@ -101,6 +101,14 @@ public final class AVFormatContext { get { Flag(rawValue: cContext.flags) } set { cContextPtr.pointee.flags = newValue.rawValue } } + + /// Maximum size of the data read from input for determining the input container format. + /// + /// Demuxing only, set by the caller before avformat_open_input(). + public var probeSize: Int64 { + get { cContext.probesize } + set { cContextPtr.pointee.probesize = newValue } + } /// Metadata that applies to the whole file. /// From 55c5b3c20c888c2b4e41520719c293f7b1abd7f5 Mon Sep 17 00:00:00 2001 From: Greg Cotten Date: Mon, 4 Nov 2019 14:58:15 -0800 Subject: [PATCH 3/6] clarify AVColorRange in comments --- Sources/SwiftFFmpeg/VideoUtil.swift | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Sources/SwiftFFmpeg/VideoUtil.swift b/Sources/SwiftFFmpeg/VideoUtil.swift index e9b8f1b..72603eb 100644 --- a/Sources/SwiftFFmpeg/VideoUtil.swift +++ b/Sources/SwiftFFmpeg/VideoUtil.swift @@ -699,10 +699,10 @@ public typealias AVColorRange = CFFmpeg.AVColorRange extension CFFmpeg.AVColorRange { public static let UNSPECIFIED = AVCOL_RANGE_UNSPECIFIED - /// the normal 219*2^(n-8) "MPEG" YUV ranges + /// the normal 219*2^(n-8) "MPEG" YUV ranges - also known as "Legal" or "Video" range public static let MPEG = AVCOL_RANGE_MPEG - /// the normal 2^n-1 "JPEG" YUV ranges + /// the normal 2^n-1 "JPEG" YUV ranges - also known as "Full" range public static let JPEG = AVCOL_RANGE_JPEG /// Not part of ABI From 5d13b2ffb8bd4bd79923e9d21db7019aa219d6be Mon Sep 17 00:00:00 2001 From: Greg Cotten Date: Wed, 22 Jul 2020 11:36:33 -0700 Subject: [PATCH 4/6] Add missing hardware context --- Sources/SwiftFFmpeg/AVHWContext.swift | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Sources/SwiftFFmpeg/AVHWContext.swift b/Sources/SwiftFFmpeg/AVHWContext.swift index 1bc74fd..e589ed9 100644 --- a/Sources/SwiftFFmpeg/AVHWContext.swift +++ b/Sources/SwiftFFmpeg/AVHWContext.swift @@ -16,6 +16,8 @@ public enum AVHWDeviceType: UInt32 { case vdpau /// Use CUDA (Compute Unified Device Architecture, NVIDIA) hardware acceleration. case cuda + /// Use VA-API (Video Acceleration API) hardware acceleration. + case vaapi /// Use DXVA2 (DirectX Video Acceleration) hardware acceleration. case dxva2 /// Use QSV (Intel Quick Sync Video) hardware acceleration. From 4bd08abe0a6e17d5179851c6365c550d872a38e5 Mon Sep 17 00:00:00 2001 From: Greg Cotten Date: Tue, 29 Sep 2020 18:20:10 -0700 Subject: [PATCH 5/6] expose AVFrameSideData in AVFrame --- Sources/CFFmpeg/avutil_shim.h | 1 + Sources/SwiftFFmpeg/AVFrame.swift | 13 ++ Sources/SwiftFFmpeg/AVFrameSideData.swift | 168 ++++++++++++++++++++++ Sources/SwiftFFmpeg/HDRMetadata.swift | 91 ++++++++++++ 4 files changed, 273 insertions(+) create mode 100644 Sources/SwiftFFmpeg/AVFrameSideData.swift create mode 100644 Sources/SwiftFFmpeg/HDRMetadata.swift diff --git a/Sources/CFFmpeg/avutil_shim.h b/Sources/CFFmpeg/avutil_shim.h index faa1151..00f199e 100644 --- a/Sources/CFFmpeg/avutil_shim.h +++ b/Sources/CFFmpeg/avutil_shim.h @@ -14,6 +14,7 @@ #include #include #include +#include static const int64_t swift_AV_NOPTS_VALUE = AV_NOPTS_VALUE; diff --git a/Sources/SwiftFFmpeg/AVFrame.swift b/Sources/SwiftFFmpeg/AVFrame.swift index a91ffad..def82f4 100644 --- a/Sources/SwiftFFmpeg/AVFrame.swift +++ b/Sources/SwiftFFmpeg/AVFrame.swift @@ -162,6 +162,19 @@ public final class AVFrame { public var extendedBufferCount: Int { Int(native.pointee.nb_extended_buf) } + + public var sideData: [AVFrameSideData] { + var list = [AVFrameSideData]() + for i in 0 ..< sideDataCount { + list.append(AVFrameSideData(native: native.pointee.side_data[i]!)) + } + return list + } + + /// The number of elements in `sideData`. + public var sideDataCount: Int { + Int(native.pointee.nb_side_data) + } /// The frame timestamp estimated using various heuristics, in stream timebase. /// diff --git a/Sources/SwiftFFmpeg/AVFrameSideData.swift b/Sources/SwiftFFmpeg/AVFrameSideData.swift new file mode 100644 index 0000000..a8d5e7c --- /dev/null +++ b/Sources/SwiftFFmpeg/AVFrameSideData.swift @@ -0,0 +1,168 @@ +// +// AVSideData.swift +// SwiftFFmpeg +// +// Created by Greg Cotten on 3/31/20. +// +// + +import CFFmpeg + +public typealias AVFrameSideDataType = CFFmpeg.AVFrameSideDataType + +extension AVFrameSideDataType { + + /// The data is the AVPanScan struct defined in libavcodec. + public static let panScan = AV_FRAME_DATA_PANSCAN + + /// ATSC A53 Part 4 Closed Captions. + /// A53 CC bitstream is stored as uint8_t in AVFrameSideData.data. + /// The number of bytes of CC data is AVFrameSideData.size. + public static let a53CC = AV_FRAME_DATA_A53_CC + + /// Stereoscopic 3d metadata. + /// The data is the AVStereo3D struct defined in libavutil/stereo3d.h. + public static let stereo3D = AV_FRAME_DATA_STEREO3D + + /// The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h. + public static let matrixEncoding = AV_FRAME_DATA_MATRIXENCODING + + /// Metadata relevant to a downmix procedure. + /// The data is the AVDownmixInfo struct defined in libavutil/downmix_info.h. + public static let downMixInfo = AV_FRAME_DATA_DOWNMIX_INFO + + /// ReplayGain information in the form of the AVReplayGain struct. + public static let replayGain = AV_FRAME_DATA_REPLAYGAIN + + /// This side data contains a 3x3 transformation matrix describing an affine + /// transformation that needs to be applied to the frame for correct + /// presentation. + /// See libavutil/display.h for a detailed description of the data. + public static let displayMatrix = AV_FRAME_DATA_DISPLAYMATRIX + + /// Active Format Description data consisting of a single byte as specified + /// in ETSI TS 101 154 using AVActiveFormatDescription enum. + public static let afd = AV_FRAME_DATA_AFD + + /// Motion vectors exported by some codecs (on demand through the export_mvs + /// flag set in the libavcodec AVCodecContext flags2 option). + /// The data is the AVMotionVector struct defined in + /// libavutil/motion_vector.h. + public static let motionVectors = AV_FRAME_DATA_MOTION_VECTORS + + /// Recommmends skipping the specified number of samples. This is exported + /// only if the "skip_manual" AVOption is set in libavcodec. + /// This has the same format as AV_PKT_DATA_SKIP_SAMPLES. + /// @code + /// u32le number of samples to skip from start of this packet + /// u32le number of samples to skip from end of this packet + /// u8 reason for start skip + /// u8 reason for end skip (0=padding silence, 1=convergence) + /// @endcode + public static let skipSamples = AV_FRAME_DATA_SKIP_SAMPLES + + /// This side data must be associated with an audio frame and corresponds to + /// enum AVAudioServiceType defined in avcodec.h. + public static let audioServiceType = AV_FRAME_DATA_AUDIO_SERVICE_TYPE + + /// Mastering display metadata associated with a video frame. The payload is + /// an AVMasteringDisplayMetadata type and contains information about the + /// mastering display color volume. + public static let masteringDisplayMetadata = AV_FRAME_DATA_MASTERING_DISPLAY_METADATA + + /// The GOP timecode in 25 bit timecode format. Data format is 64-bit integer. + /// This is set on the first frame of a GOP that has a temporal reference of 0. + public static let gopTimecode = AV_FRAME_DATA_GOP_TIMECODE + + + /// The data represents the AVSphericalMapping structure defined in + /// libavutil/spherical.h. + public static let spherical = AV_FRAME_DATA_SPHERICAL + + + /// Content light level (based on CTA-861.3). This payload contains data in + /// the form of the AVContentLightMetadata struct. + public static let contentLightLevel = AV_FRAME_DATA_CONTENT_LIGHT_LEVEL + + + /// The data contains an ICC profile as an opaque octet buffer following the + /// format described by ISO 15076-1 with an optional name defined in the + /// metadata key entry "name". + public static let iccProfile = AV_FRAME_DATA_ICC_PROFILE + + #if FF_API_FRAME_QP + + /// Implementation-specific description of the format of AV_FRAME_QP_TABLE_DATA. + /// The contents of this side data are undocumented and internal; use + /// av_frame_set_qp_table() and av_frame_get_qp_table() to access this in a + /// meaningful way instead. + public static let qpTableProperties = AV_FRAME_DATA_QP_TABLE_PROPERTIES + + /// Raw QP table data. Its format is described by + /// AV_FRAME_DATA_QP_TABLE_PROPERTIES. Use av_frame_set_qp_table() and + /// av_frame_get_qp_table() to access this instead. + public static let qpTableData = AV_FRAME_DATA_QP_TABLE_DATA + #endif + + /// Timecode which conforms to SMPTE ST 12-1. The data is an array of 4 uint32_t + /// where the first uint32_t describes how many (1-3) of the other timecodes are used. + /// The timecode format is described in the av_timecode_get_smpte_from_framenum() + /// function in libavutil/timecode.c. + public static let S12MTimecode = AV_FRAME_DATA_S12M_TIMECODE + + /// HDR dynamic metadata associated with a video frame. The payload is + /// an AVDynamicHDRPlus type and contains information for color + /// volume transform - application 4 of SMPTE 2094-40:2016 standard. + public static let dynamicHDRPlus = AV_FRAME_DATA_DYNAMIC_HDR_PLUS + + ///Regions Of Interest, the data is an array of AVRegionOfInterest type, the number of + ///array element is implied by AVFrameSideData.size / AVRegionOfInterest.self_size. + public static let regionsOfInterest = AV_FRAME_DATA_REGIONS_OF_INTEREST + + /// The name of the type. + public var name: String { + String(cString: av_frame_side_data_name(self)) + } +} + + + +typealias CAVFrameSideData = CFFmpeg.AVFrameSideData + +/// Structure to hold side data for an AVFrame. +/// +/// sizeof(AVFrameSideData) is not a part of the public ABI, so new fields may be added +/// o the end with a minor bump. +public final class AVFrameSideData { + let native: UnsafeMutablePointer + + init(native: UnsafeMutablePointer) { + self.native = native + } + + public var type: AVFrameSideDataType { + AVFrameSideDataType(rawValue: native.pointee.type.rawValue) + } + + public var data: UnsafeMutablePointer { + native.pointee.data + } + + public var size: Int { + Int(native.pointee.size) + } + + public var metadata: [String: String] { + var dict = [String: String]() + var tag: UnsafeMutablePointer? + while let next = av_dict_get(native.pointee.metadata, "", tag, AV_DICT_IGNORE_SUFFIX) { + dict[String(cString: next.pointee.key)] = String(cString: next.pointee.value) + tag = next + } + return dict + } + + public var buffer: AVBuffer { + AVBuffer(native: native.pointee.buf) + } +} diff --git a/Sources/SwiftFFmpeg/HDRMetadata.swift b/Sources/SwiftFFmpeg/HDRMetadata.swift new file mode 100644 index 0000000..c27ebce --- /dev/null +++ b/Sources/SwiftFFmpeg/HDRMetadata.swift @@ -0,0 +1,91 @@ +// +// MasteringDisplayMetadata.swift +// SwiftFFmpeg +// +// Created by Greg Cotten on 4/1/20. +// +// + +import CFFmpeg + +public typealias CAVMasteringDisplayMetadata = CFFmpeg.AVMasteringDisplayMetadata + +public struct AVMasteringDisplayMetadata { + + /// CIE 1931 xy chromaticity coords of color primaries (r, g, b order). + public var display_primaries: [[AVRational]] + + + /// CIE 1931 xy chromaticity coords of white point. + public var white_point: [AVRational] + + + /// Min luminance of mastering display (cd/m^2). + public var min_luminance: AVRational + + + /// Max luminance of mastering display (cd/m^2). + public var max_luminance: AVRational + + + /// Flag indicating whether the display primaries (and white point) are set. + public var has_primaries: Bool + + + /// Flag indicating whether the luminance (min_ and max_) have been set. + public var has_luminance: Bool + + init(cMasteringDisplayMetadata cData: CAVMasteringDisplayMetadata) { + display_primaries = [[cData.display_primaries.0.0, cData.display_primaries.0.1], [cData.display_primaries.1.0, cData.display_primaries.1.1], [cData.display_primaries.2.0, cData.display_primaries.2.1]] + white_point = [cData.white_point.0, cData.white_point.1] + + min_luminance = cData.min_luminance + + max_luminance = cData.max_luminance + + has_primaries = cData.has_primaries == 1 + + has_luminance = cData.has_luminance == 1 + } +} + +public extension AVFrameSideData { + var masteringDisplayMetadata: AVMasteringDisplayMetadata? { + guard type == .masteringDisplayMetadata else { return nil } + + return data.withMemoryRebound(to: CAVMasteringDisplayMetadata.self, capacity: 1) { (cMasteringDisplayMetadata) -> AVMasteringDisplayMetadata in + .init(cMasteringDisplayMetadata: cMasteringDisplayMetadata.pointee) + } + } +} + +public typealias CAVContentLightMetadata = CFFmpeg.AVContentLightMetadata + + +/// Content light level needed by to transmit HDR over HDMI (CTA-861.3). +/// To be used as payload of a AVFrameSideData or AVPacketSideData with the +/// appropriate type. +/// @note The struct should be allocated with av_content_light_metadata_alloc() +/// and its size is not a part of the public ABI. +public struct AVContentLightMetadata { + /// Max content light level (cd/m^2). + public var maxFALL: UInt32 + + /// Max average light level per frame (cd/m^2). + public var maxCLL: UInt32 + + init(cContentLightMetadata cData: CAVContentLightMetadata) { + maxFALL = cData.MaxFALL + maxCLL = cData.MaxCLL + } +} + +public extension AVFrameSideData { + var contentLightMetadata: AVContentLightMetadata? { + guard type == .contentLightLevel else { return nil } + + return data.withMemoryRebound(to: CAVContentLightMetadata.self, capacity: 1) { (cContentLightMetadata) -> AVContentLightMetadata in + .init(cContentLightMetadata: cContentLightMetadata.pointee) + } + } +} From 96dc22d0db1da3a2590429ae5f748caaa7dbef84 Mon Sep 17 00:00:00 2001 From: Greg Cotten Date: Wed, 30 Sep 2020 14:02:53 -0700 Subject: [PATCH 6/6] expose AVFieldOrder from codec parameters --- Sources/SwiftFFmpeg/AVCodecParameters.swift | 6 ++++++ Sources/SwiftFFmpeg/VideoUtil.swift | 20 ++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/Sources/SwiftFFmpeg/AVCodecParameters.swift b/Sources/SwiftFFmpeg/AVCodecParameters.swift index 8b77f9f..97b0cdf 100644 --- a/Sources/SwiftFFmpeg/AVCodecParameters.swift +++ b/Sources/SwiftFFmpeg/AVCodecParameters.swift @@ -123,6 +123,12 @@ extension AVCodecParameters { get { Int(native.pointee.video_delay) } set { native.pointee.video_delay = Int32(newValue) } } + + /// The field order of the video frame. + public var fieldOrder: AVFieldOrder { + get { native.pointee.field_order } + set { native.pointee.field_order = newValue } + } /// The color range of the video frame. public var colorRange: AVColorRange { diff --git a/Sources/SwiftFFmpeg/VideoUtil.swift b/Sources/SwiftFFmpeg/VideoUtil.swift index e11485d..8a00b08 100644 --- a/Sources/SwiftFFmpeg/VideoUtil.swift +++ b/Sources/SwiftFFmpeg/VideoUtil.swift @@ -192,3 +192,23 @@ extension AVPixelFormat: CustomStringConvertible { name } } + +public typealias AVFieldOrder = CFFmpeg.AVFieldOrder + +extension CFFmpeg.AVFieldOrder { + public static let UNKNOWN = AV_FIELD_UNKNOWN + + public static let PROGRESSIVE = AV_FIELD_PROGRESSIVE + + /// Top coded_first, top displayed first + public static let TT = AV_FIELD_TT + + /// Bottom coded first, bottom displayed first + public static let BB = AV_FIELD_BB + + /// Top coded first, bottom displayed first + public static let TB = AV_FIELD_TB + + /// Bottom coded first, top displayed first + public static let BT = AV_FIELD_BT +}