mirror of
https://github.com/libretro/Lakka-LibreELEC.git
synced 2024-11-28 01:34:40 +00:00
5c23848068
Patch created using revisions ea3d24b..fe61bbb from branch v4l2-request-n6.0 of https://github.com/jernejsk/FFmpeg
4276 lines
171 KiB
Diff
4276 lines
171 KiB
Diff
From 61a3cdcb354186f574bf3220de0472370fa53ccd Mon Sep 17 00:00:00 2001
|
|
From: Jonas Karlman <jonas@kwiboo.se>
|
|
Date: Mon, 3 Dec 2018 23:48:04 +0100
|
|
Subject: [PATCH 01/13] avutil: add av_buffer_pool_flush()
|
|
|
|
Used by V4L2 request API hwaccel
|
|
|
|
Signed-off-by: Jonas Karlman <jonas@kwiboo.se>
|
|
---
|
|
libavutil/buffer.c | 13 +++++++++++++
|
|
libavutil/buffer.h | 5 +++++
|
|
2 files changed, 18 insertions(+)
|
|
|
|
diff --git a/libavutil/buffer.c b/libavutil/buffer.c
|
|
index e4562a79b1..09da632c00 100644
|
|
--- a/libavutil/buffer.c
|
|
+++ b/libavutil/buffer.c
|
|
@@ -319,6 +319,19 @@ static void buffer_pool_free(AVBufferPool *pool)
|
|
av_freep(&pool);
|
|
}
|
|
|
|
+void av_buffer_pool_flush(AVBufferPool *pool)
|
|
+{
|
|
+ ff_mutex_lock(&pool->mutex);
|
|
+ while (pool->pool) {
|
|
+ BufferPoolEntry *buf = pool->pool;
|
|
+ pool->pool = buf->next;
|
|
+
|
|
+ buf->free(buf->opaque, buf->data);
|
|
+ av_freep(&buf);
|
|
+ }
|
|
+ ff_mutex_unlock(&pool->mutex);
|
|
+}
|
|
+
|
|
void av_buffer_pool_uninit(AVBufferPool **ppool)
|
|
{
|
|
AVBufferPool *pool;
|
|
diff --git a/libavutil/buffer.h b/libavutil/buffer.h
|
|
index e1ef5b7f07..fde9bae4f6 100644
|
|
--- a/libavutil/buffer.h
|
|
+++ b/libavutil/buffer.h
|
|
@@ -284,6 +284,11 @@ AVBufferPool *av_buffer_pool_init2(size_t size, void *opaque,
|
|
AVBufferRef* (*alloc)(void *opaque, size_t size),
|
|
void (*pool_free)(void *opaque));
|
|
|
|
+/**
|
|
+ * Free all available buffers in a buffer pool.
|
|
+ */
|
|
+ void av_buffer_pool_flush(AVBufferPool *pool);
|
|
+
|
|
/**
|
|
* Mark the pool as being available for freeing. It will actually be freed only
|
|
* once all the allocated buffers associated with the pool are released. Thus it
|
|
|
|
From 3fe3baf21ef5c934699bfc0aefb4b2b4180c2c72 Mon Sep 17 00:00:00 2001
|
|
From: Jonas Karlman <jonas@kwiboo.se>
|
|
Date: Sat, 15 Dec 2018 22:32:16 +0100
|
|
Subject: [PATCH 02/13] Add common V4L2 request API code
|
|
|
|
Signed-off-by: Jonas Karlman <jonas@kwiboo.se>
|
|
Signed-off-by: Alex Bee <knaerzche@gmail.com>
|
|
---
|
|
configure | 12 +
|
|
libavcodec/Makefile | 1 +
|
|
libavcodec/hwconfig.h | 2 +
|
|
libavcodec/v4l2_request.c | 1023 +++++++++++++++++++++++++++++++++++++
|
|
libavcodec/v4l2_request.h | 77 +++
|
|
5 files changed, 1115 insertions(+)
|
|
create mode 100644 libavcodec/v4l2_request.c
|
|
create mode 100644 libavcodec/v4l2_request.h
|
|
|
|
diff --git a/configure b/configure
|
|
index b6616f00b6..6167b122e0 100755
|
|
--- a/configure
|
|
+++ b/configure
|
|
@@ -281,6 +281,7 @@ External library support:
|
|
if openssl, gnutls or mbedtls is not used [no]
|
|
--enable-libtwolame enable MP2 encoding via libtwolame [no]
|
|
--enable-libuavs3d enable AVS3 decoding via libuavs3d [no]
|
|
+ --enable-libudev enable libudev [no]
|
|
--enable-libv4l2 enable libv4l2/v4l-utils [no]
|
|
--enable-libvidstab enable video stabilization using vid.stab [no]
|
|
--enable-libvmaf enable vmaf filter via libvmaf [no]
|
|
@@ -350,6 +351,7 @@ External library support:
|
|
--enable-omx-rpi enable OpenMAX IL code for Raspberry Pi [no]
|
|
--enable-rkmpp enable Rockchip Media Process Platform code [no]
|
|
--disable-v4l2-m2m disable V4L2 mem2mem code [autodetect]
|
|
+ --enable-v4l2-request enable V4L2 request API code [no]
|
|
--disable-vaapi disable Video Acceleration API (mainly Unix/Intel) code [autodetect]
|
|
--disable-vdpau disable Nvidia Video Decode and Presentation API for Unix code [autodetect]
|
|
--disable-videotoolbox disable VideoToolbox code [autodetect]
|
|
@@ -1857,6 +1859,7 @@ EXTERNAL_LIBRARY_LIST="
|
|
libtheora
|
|
libtwolame
|
|
libuavs3d
|
|
+ libudev
|
|
libv4l2
|
|
libvmaf
|
|
libvorbis
|
|
@@ -1913,6 +1916,7 @@ HWACCEL_LIBRARY_LIST="
|
|
mmal
|
|
omx
|
|
opencl
|
|
+ v4l2_request
|
|
"
|
|
|
|
DOCUMENT_LIST="
|
|
@@ -2999,6 +3003,7 @@ d3d11va_deps="dxva_h ID3D11VideoDecoder ID3D11VideoContext"
|
|
dxva2_deps="dxva2api_h DXVA2_ConfigPictureDecode ole32 user32"
|
|
ffnvcodec_deps_any="libdl LoadLibrary"
|
|
nvdec_deps="ffnvcodec"
|
|
+v4l2_request_deps="linux_videodev2_h linux_media_h v4l2_timeval_to_ns libdrm libudev"
|
|
vaapi_x11_deps="xlib_x11"
|
|
videotoolbox_hwaccel_deps="videotoolbox pthreads"
|
|
videotoolbox_hwaccel_extralibs="-framework QuartzCore"
|
|
@@ -6692,6 +6697,7 @@ enabled libtwolame && require libtwolame twolame.h twolame_init -ltwolame
|
|
{ check_lib libtwolame twolame.h twolame_encode_buffer_float32_interleaved -ltwolame ||
|
|
die "ERROR: libtwolame must be installed and version must be >= 0.3.10"; }
|
|
enabled libuavs3d && require_pkg_config libuavs3d "uavs3d >= 1.1.41" uavs3d.h uavs3d_decode
|
|
+enabled libudev && require_pkg_config libudev libudev libudev.h udev_new
|
|
enabled libv4l2 && require_pkg_config libv4l2 libv4l2 libv4l2.h v4l2_ioctl
|
|
enabled libvidstab && require_pkg_config libvidstab "vidstab >= 0.98" vid.stab/libvidstab.h vsMotionDetectInit
|
|
enabled libvmaf && require_pkg_config libvmaf "libvmaf >= 2.0.0" libvmaf.h vmaf_init
|
|
@@ -6794,6 +6800,10 @@ enabled rkmpp && { require_pkg_config rkmpp rockchip_mpp rockchip/r
|
|
{ enabled libdrm ||
|
|
die "ERROR: rkmpp requires --enable-libdrm"; }
|
|
}
|
|
+enabled v4l2_request && { enabled libdrm ||
|
|
+ die "ERROR: v4l2-request requires --enable-libdrm"; } &&
|
|
+ { enabled libudev ||
|
|
+ die "ERROR: v4l2-request requires --enable-libudev"; }
|
|
enabled vapoursynth && require_pkg_config vapoursynth "vapoursynth-script >= 42" VSScript.h vsscript_init
|
|
|
|
|
|
@@ -6876,6 +6886,8 @@ if enabled v4l2_m2m; then
|
|
check_cc vp9_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_VP9;"
|
|
fi
|
|
|
|
+check_func_headers "linux/media.h linux/videodev2.h" v4l2_timeval_to_ns
|
|
+
|
|
check_headers sys/videoio.h
|
|
test_code cc sys/videoio.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width = 0;" && enable_sanitized struct_v4l2_frmivalenum_discrete
|
|
|
|
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
|
|
index 389253f5d0..0148242ed0 100644
|
|
--- a/libavcodec/Makefile
|
|
+++ b/libavcodec/Makefile
|
|
@@ -170,6 +170,7 @@ OBJS-$(CONFIG_VP3DSP) += vp3dsp.o
|
|
OBJS-$(CONFIG_VP56DSP) += vp56dsp.o
|
|
OBJS-$(CONFIG_VP8DSP) += vp8dsp.o
|
|
OBJS-$(CONFIG_V4L2_M2M) += v4l2_m2m.o v4l2_context.o v4l2_buffers.o v4l2_fmt.o
|
|
+OBJS-$(CONFIG_V4L2_REQUEST) += v4l2_request.o
|
|
OBJS-$(CONFIG_WMA_FREQS) += wma_freqs.o
|
|
OBJS-$(CONFIG_WMV2DSP) += wmv2dsp.o
|
|
|
|
diff --git a/libavcodec/hwconfig.h b/libavcodec/hwconfig.h
|
|
index 721424912c..00864efc27 100644
|
|
--- a/libavcodec/hwconfig.h
|
|
+++ b/libavcodec/hwconfig.h
|
|
@@ -78,6 +78,8 @@ typedef struct AVCodecHWConfigInternal {
|
|
HW_CONFIG_HWACCEL(1, 1, 1, VIDEOTOOLBOX, VIDEOTOOLBOX, ff_ ## codec ## _videotoolbox_hwaccel)
|
|
#define HWACCEL_D3D11VA(codec) \
|
|
HW_CONFIG_HWACCEL(0, 0, 1, D3D11VA_VLD, NONE, ff_ ## codec ## _d3d11va_hwaccel)
|
|
+#define HWACCEL_V4L2REQUEST(codec) \
|
|
+ HW_CONFIG_HWACCEL(1, 0, 0, DRM_PRIME, DRM, ff_ ## codec ## _v4l2request_hwaccel)
|
|
|
|
#define HW_CONFIG_ENCODER(device, frames, ad_hoc, format, device_type_) \
|
|
&(const AVCodecHWConfigInternal) { \
|
|
diff --git a/libavcodec/v4l2_request.c b/libavcodec/v4l2_request.c
|
|
new file mode 100644
|
|
index 0000000000..e7faf100f0
|
|
--- /dev/null
|
|
+++ b/libavcodec/v4l2_request.c
|
|
@@ -0,0 +1,1023 @@
|
|
+/*
|
|
+ * This file is part of FFmpeg.
|
|
+ *
|
|
+ * FFmpeg is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU Lesser General Public
|
|
+ * License as published by the Free Software Foundation; either
|
|
+ * version 2.1 of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * FFmpeg is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+ * Lesser General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU Lesser General Public
|
|
+ * License along with FFmpeg; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
+ */
|
|
+
|
|
+#include <drm_fourcc.h>
|
|
+#include <linux/media.h>
|
|
+#include <sys/mman.h>
|
|
+#include <sys/types.h>
|
|
+#include <sys/stat.h>
|
|
+#include <fcntl.h>
|
|
+
|
|
+#include <sys/sysmacros.h>
|
|
+#include <libudev.h>
|
|
+
|
|
+#include "decode.h"
|
|
+#include "internal.h"
|
|
+#include "v4l2_request.h"
|
|
+
|
|
+uint64_t ff_v4l2_request_get_capture_timestamp(AVFrame *frame)
|
|
+{
|
|
+ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)frame->data[0];
|
|
+ return req ? v4l2_timeval_to_ns(&req->capture.buffer.timestamp) : 0;
|
|
+}
|
|
+
|
|
+int ff_v4l2_request_reset_frame(AVCodecContext *avctx, AVFrame *frame)
|
|
+{
|
|
+ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)frame->data[0];
|
|
+ memset(&req->drm, 0, sizeof(AVDRMFrameDescriptor));
|
|
+ req->output.used = 0;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int ff_v4l2_request_append_output_buffer(AVCodecContext *avctx, AVFrame *frame, const uint8_t *data, uint32_t size)
|
|
+{
|
|
+ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)frame->data[0];
|
|
+ if (req->output.used + size + (AV_INPUT_BUFFER_PADDING_SIZE * 4) <= req->output.size) {
|
|
+ memcpy(req->output.addr + req->output.used, data, size);
|
|
+ req->output.used += size;
|
|
+ } else {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: output.used=%u output.size=%u size=%u\n", __func__, req->output.used, req->output.size, size);
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int v4l2_request_controls(V4L2RequestContext *ctx, int request_fd, unsigned long type, struct v4l2_ext_control *control, int count)
|
|
+{
|
|
+ struct v4l2_ext_controls controls = {
|
|
+ .controls = control,
|
|
+ .count = count,
|
|
+ .request_fd = request_fd,
|
|
+ .which = (request_fd >= 0) ? V4L2_CTRL_WHICH_REQUEST_VAL : 0,
|
|
+ };
|
|
+
|
|
+ if (!control || !count)
|
|
+ return 0;
|
|
+
|
|
+ return ioctl(ctx->video_fd, type, &controls);
|
|
+}
|
|
+
|
|
+static int v4l2_request_set_controls(V4L2RequestContext *ctx, int request_fd, struct v4l2_ext_control *control, int count)
|
|
+{
|
|
+ return v4l2_request_controls(ctx, request_fd, VIDIOC_S_EXT_CTRLS, control, count);
|
|
+}
|
|
+
|
|
+int ff_v4l2_request_set_controls(AVCodecContext *avctx, struct v4l2_ext_control *control, int count)
|
|
+{
|
|
+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data;
|
|
+ int ret;
|
|
+
|
|
+ ret = v4l2_request_controls(ctx, -1, VIDIOC_S_EXT_CTRLS, control, count);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: set controls failed, %s (%d)\n", __func__, strerror(errno), errno);
|
|
+ return AVERROR(EINVAL);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int ff_v4l2_request_get_controls(AVCodecContext *avctx, struct v4l2_ext_control *control, int count)
|
|
+{
|
|
+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data;
|
|
+ int ret;
|
|
+
|
|
+ ret = v4l2_request_controls(ctx, -1, VIDIOC_G_EXT_CTRLS, control, count);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: get controls failed, %s (%d)\n", __func__, strerror(errno), errno);
|
|
+ return AVERROR(EINVAL);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int ff_v4l2_request_query_control(AVCodecContext *avctx, struct v4l2_query_ext_ctrl *control)
|
|
+{
|
|
+ int ret;
|
|
+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data;
|
|
+
|
|
+ ret = ioctl(ctx->video_fd, VIDIOC_QUERY_EXT_CTRL, control);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: query control failed, %s (%d)\n", __func__, strerror(errno), errno);
|
|
+ return AVERROR(EINVAL);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int ff_v4l2_request_query_control_default_value(AVCodecContext *avctx, uint32_t id)
|
|
+{
|
|
+ int ret;
|
|
+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data;
|
|
+ struct v4l2_queryctrl control = {
|
|
+ .id = id,
|
|
+ };
|
|
+
|
|
+ ret = ioctl(ctx->video_fd, VIDIOC_QUERYCTRL, &control);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: query control failed, %s (%d)\n", __func__, strerror(errno), errno);
|
|
+ return AVERROR(EINVAL);
|
|
+ }
|
|
+
|
|
+ return control.default_value;
|
|
+}
|
|
+
|
|
+static int v4l2_request_queue_buffer(V4L2RequestContext *ctx, int request_fd, V4L2RequestBuffer *buf, uint32_t flags)
|
|
+{
|
|
+ struct v4l2_plane planes[1] = {};
|
|
+ struct v4l2_buffer buffer = {
|
|
+ .type = buf->buffer.type,
|
|
+ .memory = buf->buffer.memory,
|
|
+ .index = buf->index,
|
|
+ .timestamp.tv_usec = buf->index + 1,
|
|
+ .bytesused = buf->used,
|
|
+ .request_fd = request_fd,
|
|
+ .flags = ((request_fd >= 0) ? V4L2_BUF_FLAG_REQUEST_FD : 0) | flags,
|
|
+ };
|
|
+
|
|
+ if (V4L2_TYPE_IS_MULTIPLANAR(buf->buffer.type)) {
|
|
+ planes[0].bytesused = buf->used;
|
|
+ buffer.bytesused = 0;
|
|
+ buffer.length = 1;
|
|
+ buffer.m.planes = planes;
|
|
+ }
|
|
+
|
|
+ return ioctl(ctx->video_fd, VIDIOC_QBUF, &buffer);
|
|
+}
|
|
+
|
|
+static int v4l2_request_dequeue_buffer(V4L2RequestContext *ctx, V4L2RequestBuffer *buf)
|
|
+{
|
|
+ int ret;
|
|
+ struct v4l2_plane planes[1] = {};
|
|
+ struct v4l2_buffer buffer = {
|
|
+ .type = buf->buffer.type,
|
|
+ .memory = buf->buffer.memory,
|
|
+ .index = buf->index,
|
|
+ };
|
|
+
|
|
+ if (V4L2_TYPE_IS_MULTIPLANAR(buf->buffer.type)) {
|
|
+ buffer.length = 1;
|
|
+ buffer.m.planes = planes;
|
|
+ }
|
|
+
|
|
+ ret = ioctl(ctx->video_fd, VIDIOC_DQBUF, &buffer);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ buf->buffer.timestamp = buffer.timestamp;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+const uint32_t v4l2_request_capture_pixelformats[] = {
|
|
+ V4L2_PIX_FMT_NV12,
|
|
+#ifdef DRM_FORMAT_MOD_ALLWINNER_TILED
|
|
+ V4L2_PIX_FMT_SUNXI_TILED_NV12,
|
|
+#endif
|
|
+};
|
|
+
|
|
+static int v4l2_request_set_drm_descriptor(V4L2RequestDescriptor *req, struct v4l2_format *format)
|
|
+{
|
|
+ AVDRMFrameDescriptor *desc = &req->drm;
|
|
+ AVDRMLayerDescriptor *layer = &desc->layers[0];
|
|
+ uint32_t pixelformat = V4L2_TYPE_IS_MULTIPLANAR(format->type) ? format->fmt.pix_mp.pixelformat : format->fmt.pix.pixelformat;
|
|
+
|
|
+ switch (pixelformat) {
|
|
+ case V4L2_PIX_FMT_NV12:
|
|
+ layer->format = DRM_FORMAT_NV12;
|
|
+ desc->objects[0].format_modifier = DRM_FORMAT_MOD_LINEAR;
|
|
+ break;
|
|
+#ifdef DRM_FORMAT_MOD_ALLWINNER_TILED
|
|
+ case V4L2_PIX_FMT_SUNXI_TILED_NV12:
|
|
+ layer->format = DRM_FORMAT_NV12;
|
|
+ desc->objects[0].format_modifier = DRM_FORMAT_MOD_ALLWINNER_TILED;
|
|
+ break;
|
|
+#endif
|
|
+ default:
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ desc->nb_objects = 1;
|
|
+ desc->objects[0].fd = req->capture.fd;
|
|
+ desc->objects[0].size = req->capture.size;
|
|
+
|
|
+ desc->nb_layers = 1;
|
|
+ layer->nb_planes = 2;
|
|
+
|
|
+ layer->planes[0].object_index = 0;
|
|
+ layer->planes[0].offset = 0;
|
|
+ layer->planes[0].pitch = V4L2_TYPE_IS_MULTIPLANAR(format->type) ? format->fmt.pix_mp.plane_fmt[0].bytesperline : format->fmt.pix.bytesperline;
|
|
+
|
|
+ layer->planes[1].object_index = 0;
|
|
+ layer->planes[1].offset = layer->planes[0].pitch * (V4L2_TYPE_IS_MULTIPLANAR(format->type) ? format->fmt.pix_mp.height : format->fmt.pix.height);
|
|
+ layer->planes[1].pitch = layer->planes[0].pitch;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int v4l2_request_queue_decode(AVCodecContext *avctx, AVFrame *frame, struct v4l2_ext_control *control, int count, int first_slice, int last_slice)
|
|
+{
|
|
+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data;
|
|
+ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)frame->data[0];
|
|
+ struct timeval tv = { 2, 0 };
|
|
+ fd_set except_fds;
|
|
+ int ret;
|
|
+
|
|
+ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p used=%u controls=%d index=%d fd=%d request_fd=%d first_slice=%d last_slice=%d\n", __func__, avctx, req->output.used, count, req->capture.index, req->capture.fd, req->request_fd, first_slice, last_slice);
|
|
+
|
|
+ ret = v4l2_request_set_controls(ctx, req->request_fd, control, count);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: set controls failed for request %d, %s (%d)\n", __func__, req->request_fd, strerror(errno), errno);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ memset(req->output.addr + req->output.used, 0, AV_INPUT_BUFFER_PADDING_SIZE * 4);
|
|
+
|
|
+ ret = v4l2_request_queue_buffer(ctx, req->request_fd, &req->output, last_slice ? 0 : V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: queue output buffer %d failed for request %d, %s (%d)\n", __func__, req->output.index, req->request_fd, strerror(errno), errno);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ if (first_slice) {
|
|
+ ret = v4l2_request_queue_buffer(ctx, -1, &req->capture, 0);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: queue capture buffer %d failed for request %d, %s (%d)\n", __func__, req->capture.index, req->request_fd, strerror(errno), errno);
|
|
+ return -1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // NOTE: do we need to dequeue when request fails/timeout?
|
|
+
|
|
+ // 4. queue request and wait
|
|
+ ret = ioctl(req->request_fd, MEDIA_REQUEST_IOC_QUEUE, NULL);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: queue request %d failed, %s (%d)\n", __func__, req->request_fd, strerror(errno), errno);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ FD_ZERO(&except_fds);
|
|
+ FD_SET(req->request_fd, &except_fds);
|
|
+
|
|
+ ret = select(req->request_fd + 1, NULL, NULL, &except_fds, &tv);
|
|
+ if (ret == 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: request %d timeout\n", __func__, req->request_fd);
|
|
+ goto fail;
|
|
+ } else if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: select request %d failed, %s (%d)\n", __func__, req->request_fd, strerror(errno), errno);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ ret = v4l2_request_dequeue_buffer(ctx, &req->output);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: dequeue output buffer %d failed for request %d, %s (%d)\n", __func__, req->output.index, req->request_fd, strerror(errno), errno);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ if (last_slice) {
|
|
+ ret = v4l2_request_dequeue_buffer(ctx, &req->capture);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: dequeue capture buffer %d failed for request %d, %s (%d)\n", __func__, req->capture.index, req->request_fd, strerror(errno), errno);
|
|
+ return -1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // TODO: check errors
|
|
+ // buffer.flags & V4L2_BUF_FLAG_ERROR
|
|
+
|
|
+ ret = ioctl(req->request_fd, MEDIA_REQUEST_IOC_REINIT, NULL);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: reinit request %d failed, %s (%d)\n", __func__, req->request_fd, strerror(errno), errno);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ if (last_slice)
|
|
+ return v4l2_request_set_drm_descriptor(req, &ctx->format);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+fail:
|
|
+ ret = v4l2_request_dequeue_buffer(ctx, &req->output);
|
|
+ if (ret < 0)
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: dequeue output buffer %d failed for request %d, %s (%d)\n", __func__, req->output.index, req->request_fd, strerror(errno), errno);
|
|
+
|
|
+ ret = v4l2_request_dequeue_buffer(ctx, &req->capture);
|
|
+ if (ret < 0)
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: dequeue capture buffer %d failed for request %d, %s (%d)\n", __func__, req->capture.index, req->request_fd, strerror(errno), errno);
|
|
+
|
|
+ ret = ioctl(req->request_fd, MEDIA_REQUEST_IOC_REINIT, NULL);
|
|
+ if (ret < 0)
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: reinit request %d failed, %s (%d)\n", __func__, req->request_fd, strerror(errno), errno);
|
|
+
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+int ff_v4l2_request_decode_slice(AVCodecContext *avctx, AVFrame *frame, struct v4l2_ext_control *control, int count, int first_slice, int last_slice)
|
|
+{
|
|
+ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)frame->data[0];
|
|
+
|
|
+ // fall back to queue each slice as a full frame
|
|
+ if ((req->output.capabilities & V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF) != V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF)
|
|
+ return v4l2_request_queue_decode(avctx, frame, control, count, 1, 1);
|
|
+
|
|
+ return v4l2_request_queue_decode(avctx, frame, control, count, first_slice, last_slice);
|
|
+}
|
|
+
|
|
+int ff_v4l2_request_decode_frame(AVCodecContext *avctx, AVFrame *frame, struct v4l2_ext_control *control, int count)
|
|
+{
|
|
+ return v4l2_request_queue_decode(avctx, frame, control, count, 1, 1);
|
|
+}
|
|
+
|
|
+static int v4l2_request_try_framesize(AVCodecContext *avctx, uint32_t pixelformat)
|
|
+{
|
|
+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data;
|
|
+ struct v4l2_frmsizeenum frmsize = {
|
|
+ .index = 0,
|
|
+ .pixel_format = pixelformat,
|
|
+ };
|
|
+
|
|
+ if (ioctl(ctx->video_fd, VIDIOC_ENUM_FRAMESIZES, &frmsize) < 0)
|
|
+ return 0;
|
|
+
|
|
+ /*
|
|
+ * We only validate min/max framesize for V4L2_FRMSIZE_TYPE_STEPWISE here, since the alignment
|
|
+ * which is eventually needed will be done driver-side later in VIDIOC_S_FMT and there is no need
|
|
+ * validate step_width/step_height here
|
|
+ */
|
|
+
|
|
+ do {
|
|
+
|
|
+ if (frmsize.type == V4L2_FRMSIZE_TYPE_DISCRETE && frmsize.discrete.width == avctx->coded_width &&
|
|
+ frmsize.discrete.height == avctx->coded_height)
|
|
+ return 0;
|
|
+ else if ((frmsize.type == V4L2_FRMSIZE_TYPE_STEPWISE || frmsize.type == V4L2_FRMSIZE_TYPE_CONTINUOUS) &&
|
|
+ avctx->coded_width >= frmsize.stepwise.min_width && avctx->coded_height >= frmsize.stepwise.min_height &&
|
|
+ avctx->coded_width <= frmsize.stepwise.max_width && avctx->coded_height <= frmsize.stepwise.max_height)
|
|
+ return 0;
|
|
+
|
|
+ frmsize.index++;
|
|
+
|
|
+ } while (ioctl(ctx->video_fd, VIDIOC_ENUM_FRAMESIZES, &frmsize) >= 0);
|
|
+
|
|
+ av_log(avctx, AV_LOG_INFO, "%s: pixelformat %u not supported for width %u height %u\n", __func__, pixelformat, avctx->coded_width, avctx->coded_height);
|
|
+
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+static int v4l2_request_try_format(AVCodecContext *avctx, enum v4l2_buf_type type, uint32_t pixelformat)
|
|
+{
|
|
+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data;
|
|
+ struct v4l2_fmtdesc fmtdesc = {
|
|
+ .index = 0,
|
|
+ .type = type,
|
|
+ };
|
|
+
|
|
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
|
|
+ struct v4l2_create_buffers buffers = {
|
|
+ .count = 0,
|
|
+ .memory = V4L2_MEMORY_MMAP,
|
|
+ .format.type = type,
|
|
+ };
|
|
+
|
|
+ if (ioctl(ctx->video_fd, VIDIOC_CREATE_BUFS, &buffers) < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: create buffers failed for type %u, %s (%d)\n", __func__, type, strerror(errno), errno);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ if ((buffers.capabilities & V4L2_BUF_CAP_SUPPORTS_REQUESTS) != V4L2_BUF_CAP_SUPPORTS_REQUESTS) {
|
|
+ av_log(avctx, AV_LOG_INFO, "%s: output buffer type do not support requests, capabilities %u\n", __func__, buffers.capabilities);
|
|
+ return -1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ while (ioctl(ctx->video_fd, VIDIOC_ENUM_FMT, &fmtdesc) >= 0) {
|
|
+ if (fmtdesc.pixelformat == pixelformat)
|
|
+ return 0;
|
|
+
|
|
+ fmtdesc.index++;
|
|
+ }
|
|
+
|
|
+ av_log(avctx, AV_LOG_INFO, "%s: pixelformat %u not supported for type %u\n", __func__, pixelformat, type);
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+static int v4l2_request_set_format(AVCodecContext *avctx, enum v4l2_buf_type type, uint32_t pixelformat, uint32_t buffersize)
|
|
+{
|
|
+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data;
|
|
+ struct v4l2_format format = {
|
|
+ .type = type,
|
|
+ };
|
|
+
|
|
+ if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
|
|
+ format.fmt.pix_mp.width = avctx->coded_width;
|
|
+ format.fmt.pix_mp.height = avctx->coded_height;
|
|
+ format.fmt.pix_mp.pixelformat = pixelformat;
|
|
+ format.fmt.pix_mp.plane_fmt[0].sizeimage = buffersize;
|
|
+ format.fmt.pix_mp.num_planes = 1;
|
|
+ } else {
|
|
+ format.fmt.pix.width = avctx->coded_width;
|
|
+ format.fmt.pix.height = avctx->coded_height;
|
|
+ format.fmt.pix.pixelformat = pixelformat;
|
|
+ format.fmt.pix.sizeimage = buffersize;
|
|
+ }
|
|
+
|
|
+ return ioctl(ctx->video_fd, VIDIOC_S_FMT, &format);
|
|
+}
|
|
+
|
|
+static int v4l2_request_select_capture_format(AVCodecContext *avctx)
|
|
+{
|
|
+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data;
|
|
+ enum v4l2_buf_type type = ctx->format.type;
|
|
+
|
|
+#if 0
|
|
+ struct v4l2_format format = {
|
|
+ .type = type,
|
|
+ };
|
|
+ struct v4l2_fmtdesc fmtdesc = {
|
|
+ .index = 0,
|
|
+ .type = type,
|
|
+ };
|
|
+ uint32_t pixelformat;
|
|
+ int i;
|
|
+
|
|
+ if (ioctl(ctx->video_fd, VIDIOC_G_FMT, &format) < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: get capture format failed, %s (%d)\n", __func__, strerror(errno), errno);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ pixelformat = V4L2_TYPE_IS_MULTIPLANAR(type) ? format.fmt.pix_mp.pixelformat : format.fmt.pix.pixelformat;
|
|
+
|
|
+ for (i = 0; i < FF_ARRAY_ELEMS(v4l2_request_capture_pixelformats); i++) {
|
|
+ if (pixelformat == v4l2_request_capture_pixelformats[i])
|
|
+ return v4l2_request_set_format(avctx, type, pixelformat, 0);
|
|
+ }
|
|
+
|
|
+ while (ioctl(ctx->video_fd, VIDIOC_ENUM_FMT, &fmtdesc) >= 0) {
|
|
+ for (i = 0; i < FF_ARRAY_ELEMS(v4l2_request_capture_pixelformats); i++) {
|
|
+ if (fmtdesc.pixelformat == v4l2_request_capture_pixelformats[i])
|
|
+ return v4l2_request_set_format(avctx, type, fmtdesc.pixelformat, 0);
|
|
+ }
|
|
+
|
|
+ fmtdesc.index++;
|
|
+ }
|
|
+#else
|
|
+ for (int i = 0; i < FF_ARRAY_ELEMS(v4l2_request_capture_pixelformats); i++) {
|
|
+ uint32_t pixelformat = v4l2_request_capture_pixelformats[i];
|
|
+ if (!v4l2_request_try_format(avctx, type, pixelformat))
|
|
+ return v4l2_request_set_format(avctx, type, pixelformat, 0);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+static int v4l2_request_probe_video_device(struct udev_device *device, AVCodecContext *avctx, uint32_t pixelformat, uint32_t buffersize, struct v4l2_ext_control *control, int count)
|
|
+{
|
|
+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data;
|
|
+ int ret = AVERROR(EINVAL);
|
|
+ struct v4l2_capability capability = {0};
|
|
+ unsigned int capabilities = 0;
|
|
+
|
|
+ const char *path = udev_device_get_devnode(device);
|
|
+ if (!path) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: get video device devnode failed\n", __func__);
|
|
+ ret = AVERROR(EINVAL);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ ctx->video_fd = open(path, O_RDWR | O_NONBLOCK, 0);
|
|
+ if (ctx->video_fd < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: opening %s failed, %s (%d)\n", __func__, path, strerror(errno), errno);
|
|
+ ret = AVERROR(EINVAL);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ ret = ioctl(ctx->video_fd, VIDIOC_QUERYCAP, &capability);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: get video capability failed, %s (%d)\n", __func__, strerror(errno), errno);
|
|
+ ret = AVERROR(EINVAL);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ if (capability.capabilities & V4L2_CAP_DEVICE_CAPS)
|
|
+ capabilities = capability.device_caps;
|
|
+ else
|
|
+ capabilities = capability.capabilities;
|
|
+
|
|
+ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p ctx=%p path=%s capabilities=%u\n", __func__, avctx, ctx, path, capabilities);
|
|
+
|
|
+ if ((capabilities & V4L2_CAP_STREAMING) != V4L2_CAP_STREAMING) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: missing required streaming capability\n", __func__);
|
|
+ ret = AVERROR(EINVAL);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ if ((capabilities & V4L2_CAP_VIDEO_M2M_MPLANE) == V4L2_CAP_VIDEO_M2M_MPLANE) {
|
|
+ ctx->output_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
|
|
+ ctx->format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
|
|
+ } else if ((capabilities & V4L2_CAP_VIDEO_M2M) == V4L2_CAP_VIDEO_M2M) {
|
|
+ ctx->output_type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
|
|
+ ctx->format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
|
|
+ } else {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: missing required mem2mem capability\n", __func__);
|
|
+ ret = AVERROR(EINVAL);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ ret = v4l2_request_try_format(avctx, ctx->output_type, pixelformat);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_WARNING, "%s: try output format failed\n", __func__);
|
|
+ ret = AVERROR(EINVAL);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ ret = v4l2_request_try_framesize(avctx, pixelformat);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_WARNING, "%s: try framesize failed\n", __func__);
|
|
+ ret = AVERROR(EINVAL);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ ret = v4l2_request_set_format(avctx, ctx->output_type, pixelformat, buffersize);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: set output format failed, %s (%d)\n", __func__, strerror(errno), errno);
|
|
+ ret = AVERROR(EINVAL);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ ret = v4l2_request_set_controls(ctx, -1, control, count);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: set controls failed, %s (%d)\n", __func__, strerror(errno), errno);
|
|
+ ret = AVERROR(EINVAL);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ ret = v4l2_request_select_capture_format(avctx);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_WARNING, "%s: select capture format failed\n", __func__);
|
|
+ ret = AVERROR(EINVAL);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+fail:
|
|
+ if (ctx->video_fd >= 0) {
|
|
+ close(ctx->video_fd);
|
|
+ ctx->video_fd = -1;
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int v4l2_request_init_context(AVCodecContext *avctx)
|
|
+{
|
|
+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data;
|
|
+ int ret;
|
|
+
|
|
+ ret = ioctl(ctx->video_fd, VIDIOC_G_FMT, &ctx->format);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: get capture format failed, %s (%d)\n", __func__, strerror(errno), errno);
|
|
+ ret = AVERROR(EINVAL);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ if (V4L2_TYPE_IS_MULTIPLANAR(ctx->format.type)) {
|
|
+ av_log(avctx, AV_LOG_DEBUG, "%s: pixelformat=%d width=%u height=%u bytesperline=%u sizeimage=%u num_planes=%u\n", __func__, ctx->format.fmt.pix_mp.pixelformat, ctx->format.fmt.pix_mp.width, ctx->format.fmt.pix_mp.height, ctx->format.fmt.pix_mp.plane_fmt[0].bytesperline, ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage, ctx->format.fmt.pix_mp.num_planes);
|
|
+ } else {
|
|
+ av_log(avctx, AV_LOG_DEBUG, "%s: pixelformat=%d width=%u height=%u bytesperline=%u sizeimage=%u\n", __func__, ctx->format.fmt.pix.pixelformat, ctx->format.fmt.pix.width, ctx->format.fmt.pix.height, ctx->format.fmt.pix.bytesperline, ctx->format.fmt.pix.sizeimage);
|
|
+ }
|
|
+
|
|
+ ret = ff_decode_get_hw_frames_ctx(avctx, AV_HWDEVICE_TYPE_DRM);
|
|
+ if (ret < 0)
|
|
+ goto fail;
|
|
+
|
|
+ ret = ioctl(ctx->video_fd, VIDIOC_STREAMON, &ctx->output_type);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: output stream on failed, %s (%d)\n", __func__, strerror(errno), errno);
|
|
+ ret = AVERROR(EINVAL);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ ret = ioctl(ctx->video_fd, VIDIOC_STREAMON, &ctx->format.type);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: capture stream on failed, %s (%d)\n", __func__, strerror(errno), errno);
|
|
+ ret = AVERROR(EINVAL);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+fail:
|
|
+ ff_v4l2_request_uninit(avctx);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int v4l2_request_probe_media_device(struct udev_device *device, AVCodecContext *avctx, uint32_t pixelformat, uint32_t buffersize, struct v4l2_ext_control *control, int count)
|
|
+{
|
|
+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data;
|
|
+ int ret;
|
|
+ struct media_device_info device_info = {0};
|
|
+ struct media_v2_topology topology = {0};
|
|
+ struct media_v2_interface *interfaces = NULL;
|
|
+ struct udev *udev = udev_device_get_udev(device);
|
|
+ struct udev_device *video_device;
|
|
+ dev_t devnum;
|
|
+
|
|
+ const char *path = udev_device_get_devnode(device);
|
|
+ if (!path) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: get media device devnode failed\n", __func__);
|
|
+ ret = AVERROR(EINVAL);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ ctx->media_fd = open(path, O_RDWR, 0);
|
|
+ if (ctx->media_fd < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: opening %s failed, %s (%d)\n", __func__, path, strerror(errno), errno);
|
|
+ ret = AVERROR(EINVAL);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ ret = ioctl(ctx->media_fd, MEDIA_IOC_DEVICE_INFO, &device_info);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: get media device info failed, %s (%d)\n", __func__, strerror(errno), errno);
|
|
+ ret = AVERROR(EINVAL);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p ctx=%p path=%s driver=%s\n", __func__, avctx, ctx, path, device_info.driver);
|
|
+
|
|
+ ret = ioctl(ctx->media_fd, MEDIA_IOC_G_TOPOLOGY, &topology);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: get media topology failed, %s (%d)\n", __func__, strerror(errno), errno);
|
|
+ ret = AVERROR(EINVAL);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ if (topology.num_interfaces <= 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: media device has no interfaces\n", __func__);
|
|
+ ret = AVERROR(EINVAL);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ interfaces = av_mallocz(topology.num_interfaces * sizeof(struct media_v2_interface));
|
|
+ if (!interfaces) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: allocating media interface struct failed\n", __func__);
|
|
+ ret = AVERROR(ENOMEM);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ topology.ptr_interfaces = (__u64)(uintptr_t)interfaces;
|
|
+ ret = ioctl(ctx->media_fd, MEDIA_IOC_G_TOPOLOGY, &topology);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: get media topology failed, %s (%d)\n", __func__, strerror(errno), errno);
|
|
+ ret = AVERROR(EINVAL);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ ret = AVERROR(EINVAL);
|
|
+ for (int i = 0; i < topology.num_interfaces; i++) {
|
|
+ if (interfaces[i].intf_type != MEDIA_INTF_T_V4L_VIDEO)
|
|
+ continue;
|
|
+
|
|
+ devnum = makedev(interfaces[i].devnode.major, interfaces[i].devnode.minor);
|
|
+ video_device = udev_device_new_from_devnum(udev, 'c', devnum);
|
|
+ if (!video_device) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: video_device=%p\n", __func__, video_device);
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ ret = v4l2_request_probe_video_device(video_device, avctx, pixelformat, buffersize, control, count);
|
|
+ udev_device_unref(video_device);
|
|
+
|
|
+ if (!ret)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ av_freep(&interfaces);
|
|
+ return ret;
|
|
+
|
|
+fail:
|
|
+ av_freep(&interfaces);
|
|
+ if (ctx->media_fd >= 0) {
|
|
+ close(ctx->media_fd);
|
|
+ ctx->media_fd = -1;
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int ff_v4l2_request_init(AVCodecContext *avctx, uint32_t pixelformat, uint32_t buffersize, struct v4l2_ext_control *control, int count)
|
|
+{
|
|
+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data;
|
|
+ int ret = AVERROR(EINVAL);
|
|
+ struct udev *udev;
|
|
+ struct udev_enumerate *enumerate;
|
|
+ struct udev_list_entry *devices;
|
|
+ struct udev_list_entry *entry;
|
|
+ struct udev_device *device;
|
|
+
|
|
+ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p hw_device_ctx=%p hw_frames_ctx=%p\n", __func__, avctx, avctx->hw_device_ctx, avctx->hw_frames_ctx);
|
|
+
|
|
+ ctx->media_fd = -1;
|
|
+ ctx->video_fd = -1;
|
|
+
|
|
+ udev = udev_new();
|
|
+ if (!udev) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: allocating udev context failed\n", __func__);
|
|
+ ret = AVERROR(ENOMEM);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ enumerate = udev_enumerate_new(udev);
|
|
+ if (!enumerate) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: allocating udev enumerator failed\n", __func__);
|
|
+ ret = AVERROR(ENOMEM);
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ udev_enumerate_add_match_subsystem(enumerate, "media");
|
|
+ udev_enumerate_scan_devices(enumerate);
|
|
+
|
|
+ devices = udev_enumerate_get_list_entry(enumerate);
|
|
+ udev_list_entry_foreach(entry, devices) {
|
|
+ const char *path = udev_list_entry_get_name(entry);
|
|
+ if (!path)
|
|
+ continue;
|
|
+
|
|
+ device = udev_device_new_from_syspath(udev, path);
|
|
+ if (!device)
|
|
+ continue;
|
|
+
|
|
+ ret = v4l2_request_probe_media_device(device, avctx, pixelformat, buffersize, control, count);
|
|
+ udev_device_unref(device);
|
|
+
|
|
+ if (!ret)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ udev_enumerate_unref(enumerate);
|
|
+
|
|
+ if (!ret)
|
|
+ ret = v4l2_request_init_context(avctx);
|
|
+
|
|
+fail:
|
|
+ udev_unref(udev);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int ff_v4l2_request_uninit(AVCodecContext *avctx)
|
|
+{
|
|
+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data;
|
|
+ int ret;
|
|
+
|
|
+ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p ctx=%p\n", __func__, avctx, ctx);
|
|
+
|
|
+ if (ctx->video_fd >= 0) {
|
|
+ ret = ioctl(ctx->video_fd, VIDIOC_STREAMOFF, &ctx->output_type);
|
|
+ if (ret < 0)
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: output stream off failed, %s (%d)\n", __func__, strerror(errno), errno);
|
|
+
|
|
+ ret = ioctl(ctx->video_fd, VIDIOC_STREAMOFF, &ctx->format.type);
|
|
+ if (ret < 0)
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: capture stream off failed, %s (%d)\n", __func__, strerror(errno), errno);
|
|
+ }
|
|
+
|
|
+ if (avctx->hw_frames_ctx) {
|
|
+ AVHWFramesContext *hwfc = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
|
|
+ av_buffer_pool_flush(hwfc->pool);
|
|
+ }
|
|
+
|
|
+ if (ctx->video_fd >= 0)
|
|
+ close(ctx->video_fd);
|
|
+
|
|
+ if (ctx->media_fd >= 0)
|
|
+ close(ctx->media_fd);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int v4l2_request_buffer_alloc(AVCodecContext *avctx, V4L2RequestBuffer *buf, enum v4l2_buf_type type)
|
|
+{
|
|
+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data;
|
|
+ int ret;
|
|
+ struct v4l2_plane planes[1] = {};
|
|
+ struct v4l2_create_buffers buffers = {
|
|
+ .count = 1,
|
|
+ .memory = V4L2_MEMORY_MMAP,
|
|
+ .format.type = type,
|
|
+ };
|
|
+
|
|
+ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p buf=%p type=%u\n", __func__, avctx, buf, type);
|
|
+
|
|
+ ret = ioctl(ctx->video_fd, VIDIOC_G_FMT, &buffers.format);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: get format failed for type %u, %s (%d)\n", __func__, type, strerror(errno), errno);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (V4L2_TYPE_IS_MULTIPLANAR(buffers.format.type)) {
|
|
+ av_log(avctx, AV_LOG_DEBUG, "%s: pixelformat=%d width=%u height=%u bytesperline=%u sizeimage=%u num_planes=%u\n", __func__, buffers.format.fmt.pix_mp.pixelformat, buffers.format.fmt.pix_mp.width, buffers.format.fmt.pix_mp.height, buffers.format.fmt.pix_mp.plane_fmt[0].bytesperline, buffers.format.fmt.pix_mp.plane_fmt[0].sizeimage, buffers.format.fmt.pix_mp.num_planes);
|
|
+ } else {
|
|
+ av_log(avctx, AV_LOG_DEBUG, "%s: pixelformat=%d width=%u height=%u bytesperline=%u sizeimage=%u\n", __func__, buffers.format.fmt.pix.pixelformat, buffers.format.fmt.pix.width, buffers.format.fmt.pix.height, buffers.format.fmt.pix.bytesperline, buffers.format.fmt.pix.sizeimage);
|
|
+ }
|
|
+
|
|
+ ret = ioctl(ctx->video_fd, VIDIOC_CREATE_BUFS, &buffers);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: create buffers failed for type %u, %s (%d)\n", __func__, type, strerror(errno), errno);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
|
|
+ buf->width = buffers.format.fmt.pix_mp.width;
|
|
+ buf->height = buffers.format.fmt.pix_mp.height;
|
|
+ buf->size = buffers.format.fmt.pix_mp.plane_fmt[0].sizeimage;
|
|
+ buf->buffer.length = 1;
|
|
+ buf->buffer.m.planes = planes;
|
|
+ } else {
|
|
+ buf->width = buffers.format.fmt.pix.width;
|
|
+ buf->height = buffers.format.fmt.pix.height;
|
|
+ buf->size = buffers.format.fmt.pix.sizeimage;
|
|
+ }
|
|
+
|
|
+ buf->index = buffers.index;
|
|
+ buf->capabilities = buffers.capabilities;
|
|
+ buf->used = 0;
|
|
+
|
|
+ buf->buffer.type = type;
|
|
+ buf->buffer.memory = V4L2_MEMORY_MMAP;
|
|
+ buf->buffer.index = buf->index;
|
|
+
|
|
+ ret = ioctl(ctx->video_fd, VIDIOC_QUERYBUF, &buf->buffer);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: query buffer %d failed, %s (%d)\n", __func__, buf->index, strerror(errno), errno);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ buf->buffer.timestamp.tv_usec = buf->index + 1;
|
|
+
|
|
+ if (V4L2_TYPE_IS_OUTPUT(type)) {
|
|
+ void *addr = mmap(NULL, buf->size, PROT_READ | PROT_WRITE, MAP_SHARED, ctx->video_fd, V4L2_TYPE_IS_MULTIPLANAR(type) ? buf->buffer.m.planes[0].m.mem_offset : buf->buffer.m.offset);
|
|
+ if (addr == MAP_FAILED) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: mmap failed, %s (%d)\n", __func__, strerror(errno), errno);
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ buf->addr = (uint8_t*)addr;
|
|
+ } else {
|
|
+ struct v4l2_exportbuffer exportbuffer = {
|
|
+ .type = type,
|
|
+ .index = buf->index,
|
|
+ .flags = O_RDONLY,
|
|
+ };
|
|
+
|
|
+ ret = ioctl(ctx->video_fd, VIDIOC_EXPBUF, &exportbuffer);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: export buffer %d failed, %s (%d)\n", __func__, buf->index, strerror(errno), errno);
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ buf->fd = exportbuffer.fd;
|
|
+ }
|
|
+
|
|
+ av_log(avctx, AV_LOG_DEBUG, "%s: buf=%p index=%d fd=%d addr=%p width=%u height=%u size=%u\n", __func__, buf, buf->index, buf->fd, buf->addr, buf->width, buf->height, buf->size);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void v4l2_request_buffer_free(V4L2RequestBuffer *buf)
|
|
+{
|
|
+ av_log(NULL, AV_LOG_DEBUG, "%s: buf=%p index=%d fd=%d addr=%p width=%u height=%u size=%u\n", __func__, buf, buf->index, buf->fd, buf->addr, buf->width, buf->height, buf->size);
|
|
+
|
|
+ if (buf->addr)
|
|
+ munmap(buf->addr, buf->size);
|
|
+
|
|
+ if (buf->fd >= 0)
|
|
+ close(buf->fd);
|
|
+}
|
|
+
|
|
+static void v4l2_request_frame_free(void *opaque, uint8_t *data)
|
|
+{
|
|
+ AVCodecContext *avctx = opaque;
|
|
+ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)data;
|
|
+
|
|
+ av_log(NULL, AV_LOG_DEBUG, "%s: avctx=%p data=%p request_fd=%d\n", __func__, avctx, data, req->request_fd);
|
|
+
|
|
+ if (req->request_fd >= 0)
|
|
+ close(req->request_fd);
|
|
+
|
|
+ v4l2_request_buffer_free(&req->capture);
|
|
+ v4l2_request_buffer_free(&req->output);
|
|
+
|
|
+ av_free(data);
|
|
+}
|
|
+
|
|
+static AVBufferRef *v4l2_request_frame_alloc(void *opaque, size_t size)
|
|
+{
|
|
+ AVCodecContext *avctx = opaque;
|
|
+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data;
|
|
+ V4L2RequestDescriptor *req;
|
|
+ AVBufferRef *ref;
|
|
+ uint8_t *data;
|
|
+ int ret;
|
|
+
|
|
+ data = av_mallocz(size);
|
|
+ if (!data)
|
|
+ return NULL;
|
|
+
|
|
+ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p size=%d data=%p\n", __func__, avctx, size, data);
|
|
+
|
|
+ ref = av_buffer_create(data, size, v4l2_request_frame_free, avctx, 0);
|
|
+ if (!ref) {
|
|
+ av_freep(&data);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ req = (V4L2RequestDescriptor*)data;
|
|
+ req->request_fd = -1;
|
|
+ req->output.fd = -1;
|
|
+ req->capture.fd = -1;
|
|
+
|
|
+ ret = v4l2_request_buffer_alloc(avctx, &req->output, ctx->output_type);
|
|
+ if (ret < 0) {
|
|
+ av_buffer_unref(&ref);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ ret = v4l2_request_buffer_alloc(avctx, &req->capture, ctx->format.type);
|
|
+ if (ret < 0) {
|
|
+ av_buffer_unref(&ref);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ ret = ioctl(ctx->media_fd, MEDIA_IOC_REQUEST_ALLOC, &req->request_fd);
|
|
+ if (ret < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: request alloc failed, %s (%d)\n", __func__, strerror(errno), errno);
|
|
+ av_buffer_unref(&ref);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p size=%d data=%p request_fd=%d\n", __func__, avctx, size, data, req->request_fd);
|
|
+ return ref;
|
|
+}
|
|
+
|
|
+static void v4l2_request_pool_free(void *opaque)
|
|
+{
|
|
+ av_log(NULL, AV_LOG_DEBUG, "%s: opaque=%p\n", __func__, opaque);
|
|
+}
|
|
+
|
|
+static void v4l2_request_hwframe_ctx_free(AVHWFramesContext *hwfc)
|
|
+{
|
|
+ av_log(NULL, AV_LOG_DEBUG, "%s: hwfc=%p pool=%p\n", __func__, hwfc, hwfc->pool);
|
|
+
|
|
+ av_buffer_pool_flush(hwfc->pool);
|
|
+ av_buffer_pool_uninit(&hwfc->pool);
|
|
+}
|
|
+
|
|
+int ff_v4l2_request_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
|
|
+{
|
|
+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data;
|
|
+ AVHWFramesContext *hwfc = (AVHWFramesContext*)hw_frames_ctx->data;
|
|
+
|
|
+ hwfc->format = AV_PIX_FMT_DRM_PRIME;
|
|
+ hwfc->sw_format = AV_PIX_FMT_NV12;
|
|
+ if (V4L2_TYPE_IS_MULTIPLANAR(ctx->format.type)) {
|
|
+ hwfc->width = ctx->format.fmt.pix_mp.width;
|
|
+ hwfc->height = ctx->format.fmt.pix_mp.height;
|
|
+ } else {
|
|
+ hwfc->width = ctx->format.fmt.pix.width;
|
|
+ hwfc->height = ctx->format.fmt.pix.height;
|
|
+ }
|
|
+
|
|
+ hwfc->pool = av_buffer_pool_init2(sizeof(V4L2RequestDescriptor), avctx, v4l2_request_frame_alloc, v4l2_request_pool_free);
|
|
+ if (!hwfc->pool)
|
|
+ return AVERROR(ENOMEM);
|
|
+
|
|
+ hwfc->free = v4l2_request_hwframe_ctx_free;
|
|
+
|
|
+ hwfc->initial_pool_size = 1;
|
|
+
|
|
+ switch (avctx->codec_id) {
|
|
+ case AV_CODEC_ID_VP9:
|
|
+ hwfc->initial_pool_size += 8;
|
|
+ break;
|
|
+ case AV_CODEC_ID_VP8:
|
|
+ hwfc->initial_pool_size += 3;
|
|
+ break;
|
|
+ default:
|
|
+ hwfc->initial_pool_size += 2;
|
|
+ }
|
|
+
|
|
+ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p ctx=%p hw_frames_ctx=%p hwfc=%p pool=%p width=%d height=%d initial_pool_size=%d\n", __func__, avctx, ctx, hw_frames_ctx, hwfc, hwfc->pool, hwfc->width, hwfc->height, hwfc->initial_pool_size);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
diff --git a/libavcodec/v4l2_request.h b/libavcodec/v4l2_request.h
|
|
new file mode 100644
|
|
index 0000000000..58d2aa70af
|
|
--- /dev/null
|
|
+++ b/libavcodec/v4l2_request.h
|
|
@@ -0,0 +1,77 @@
|
|
+/*
|
|
+ * This file is part of FFmpeg.
|
|
+ *
|
|
+ * FFmpeg is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU Lesser General Public
|
|
+ * License as published by the Free Software Foundation; either
|
|
+ * version 2.1 of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * FFmpeg is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+ * Lesser General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU Lesser General Public
|
|
+ * License along with FFmpeg; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
+ */
|
|
+
|
|
+#ifndef AVCODEC_V4L2_REQUEST_H
|
|
+#define AVCODEC_V4L2_REQUEST_H
|
|
+
|
|
+#include <linux/videodev2.h>
|
|
+
|
|
+#include "libavutil/hwcontext_drm.h"
|
|
+
|
|
+typedef struct V4L2RequestContext {
|
|
+ int video_fd;
|
|
+ int media_fd;
|
|
+ enum v4l2_buf_type output_type;
|
|
+ struct v4l2_format format;
|
|
+ int timestamp;
|
|
+} V4L2RequestContext;
|
|
+
|
|
+typedef struct V4L2RequestBuffer {
|
|
+ int index;
|
|
+ int fd;
|
|
+ uint8_t *addr;
|
|
+ uint32_t width;
|
|
+ uint32_t height;
|
|
+ uint32_t size;
|
|
+ uint32_t used;
|
|
+ uint32_t capabilities;
|
|
+ struct v4l2_buffer buffer;
|
|
+} V4L2RequestBuffer;
|
|
+
|
|
+typedef struct V4L2RequestDescriptor {
|
|
+ AVDRMFrameDescriptor drm;
|
|
+ int request_fd;
|
|
+ V4L2RequestBuffer output;
|
|
+ V4L2RequestBuffer capture;
|
|
+} V4L2RequestDescriptor;
|
|
+
|
|
+uint64_t ff_v4l2_request_get_capture_timestamp(AVFrame *frame);
|
|
+
|
|
+int ff_v4l2_request_reset_frame(AVCodecContext *avctx, AVFrame *frame);
|
|
+
|
|
+int ff_v4l2_request_append_output_buffer(AVCodecContext *avctx, AVFrame *frame, const uint8_t *data, uint32_t size);
|
|
+
|
|
+int ff_v4l2_request_set_controls(AVCodecContext *avctx, struct v4l2_ext_control *control, int count);
|
|
+
|
|
+int ff_v4l2_request_get_controls(AVCodecContext *avctx, struct v4l2_ext_control *control, int count);
|
|
+
|
|
+int ff_v4l2_request_query_control(AVCodecContext *avctx, struct v4l2_query_ext_ctrl *control);
|
|
+
|
|
+int ff_v4l2_request_query_control_default_value(AVCodecContext *avctx, uint32_t id);
|
|
+
|
|
+int ff_v4l2_request_decode_slice(AVCodecContext *avctx, AVFrame *frame, struct v4l2_ext_control *control, int count, int first_slice, int last_slice);
|
|
+
|
|
+int ff_v4l2_request_decode_frame(AVCodecContext *avctx, AVFrame *frame, struct v4l2_ext_control *control, int count);
|
|
+
|
|
+int ff_v4l2_request_init(AVCodecContext *avctx, uint32_t pixelformat, uint32_t buffersize, struct v4l2_ext_control *control, int count);
|
|
+
|
|
+int ff_v4l2_request_uninit(AVCodecContext *avctx);
|
|
+
|
|
+int ff_v4l2_request_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx);
|
|
+
|
|
+#endif /* AVCODEC_V4L2_REQUEST_H */
|
|
|
|
From 04ba66bab9951753498cb21080b53826dcec7a26 Mon Sep 17 00:00:00 2001
|
|
From: Boris Brezillon <boris.brezillon@collabora.com>
|
|
Date: Wed, 22 May 2019 14:44:22 +0200
|
|
Subject: [PATCH 03/13] h264dec: add ref_pic_marking and pic_order_cnt bit_size
|
|
to slice context
|
|
|
|
Used by V4L2 request API h264 hwaccel
|
|
|
|
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
|
|
Signed-off-by: Jonas Karlman <jonas@kwiboo.se>
|
|
---
|
|
libavcodec/h264_slice.c | 6 +++++-
|
|
libavcodec/h264dec.h | 2 ++
|
|
2 files changed, 7 insertions(+), 1 deletion(-)
|
|
|
|
diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c
|
|
index 7767e16cf1..e782a69200 100644
|
|
--- a/libavcodec/h264_slice.c
|
|
+++ b/libavcodec/h264_slice.c
|
|
@@ -1670,7 +1670,7 @@ static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl,
|
|
unsigned int slice_type, tmp, i;
|
|
int field_pic_flag, bottom_field_flag;
|
|
int first_slice = sl == h->slice_ctx && !h->current_slice;
|
|
- int picture_structure;
|
|
+ int picture_structure, pos;
|
|
|
|
if (first_slice)
|
|
av_assert0(!h->setup_finished);
|
|
@@ -1761,6 +1761,7 @@ static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl,
|
|
|
|
sl->poc_lsb = 0;
|
|
sl->delta_poc_bottom = 0;
|
|
+ pos = sl->gb.index;
|
|
if (sps->poc_type == 0) {
|
|
sl->poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb);
|
|
|
|
@@ -1775,6 +1776,7 @@ static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl,
|
|
if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME)
|
|
sl->delta_poc[1] = get_se_golomb(&sl->gb);
|
|
}
|
|
+ sl->pic_order_cnt_bit_size = sl->gb.index - pos;
|
|
|
|
sl->redundant_pic_count = 0;
|
|
if (pps->redundant_pic_cnt_present)
|
|
@@ -1814,9 +1816,11 @@ static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl,
|
|
|
|
sl->explicit_ref_marking = 0;
|
|
if (nal->ref_idc) {
|
|
+ pos = sl->gb.index;
|
|
ret = ff_h264_decode_ref_pic_marking(sl, &sl->gb, nal, h->avctx);
|
|
if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
|
|
return AVERROR_INVALIDDATA;
|
|
+ sl->ref_pic_marking_bit_size = sl->gb.index - pos;
|
|
}
|
|
|
|
if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) {
|
|
diff --git a/libavcodec/h264dec.h b/libavcodec/h264dec.h
|
|
index 9a1ec1bace..a87415f822 100644
|
|
--- a/libavcodec/h264dec.h
|
|
+++ b/libavcodec/h264dec.h
|
|
@@ -314,6 +314,7 @@ typedef struct H264SliceContext {
|
|
MMCO mmco[H264_MAX_MMCO_COUNT];
|
|
int nb_mmco;
|
|
int explicit_ref_marking;
|
|
+ int ref_pic_marking_bit_size;
|
|
|
|
int frame_num;
|
|
int idr_pic_id;
|
|
@@ -322,6 +323,7 @@ typedef struct H264SliceContext {
|
|
int delta_poc[2];
|
|
int curr_pic_num;
|
|
int max_pic_num;
|
|
+ int pic_order_cnt_bit_size;
|
|
} H264SliceContext;
|
|
|
|
/**
|
|
|
|
From 08b9d91572ac849db0680eb5f172920f6a0bc961 Mon Sep 17 00:00:00 2001
|
|
From: Jernej Skrabec <jernej.skrabec@siol.net>
|
|
Date: Sat, 15 Dec 2018 22:32:16 +0100
|
|
Subject: [PATCH 04/13] Add V4L2 request API h264 hwaccel
|
|
|
|
Signed-off-by: Jernej Skrabec <jernej.skrabec@siol.net>
|
|
Signed-off-by: Jonas Karlman <jonas@kwiboo.se>
|
|
---
|
|
configure | 3 +
|
|
libavcodec/Makefile | 1 +
|
|
libavcodec/h264_slice.c | 4 +
|
|
libavcodec/h264dec.c | 3 +
|
|
libavcodec/hwaccels.h | 1 +
|
|
libavcodec/v4l2_request_h264.c | 456 +++++++++++++++++++++++++++++++++
|
|
6 files changed, 468 insertions(+)
|
|
create mode 100644 libavcodec/v4l2_request_h264.c
|
|
|
|
diff --git a/configure b/configure
|
|
index 6167b122e0..b3ca83bf2b 100755
|
|
--- a/configure
|
|
+++ b/configure
|
|
@@ -3033,6 +3033,8 @@ h264_dxva2_hwaccel_deps="dxva2"
|
|
h264_dxva2_hwaccel_select="h264_decoder"
|
|
h264_nvdec_hwaccel_deps="nvdec"
|
|
h264_nvdec_hwaccel_select="h264_decoder"
|
|
+h264_v4l2request_hwaccel_deps="v4l2_request h264_v4l2_request"
|
|
+h264_v4l2request_hwaccel_select="h264_decoder"
|
|
h264_vaapi_hwaccel_deps="vaapi"
|
|
h264_vaapi_hwaccel_select="h264_decoder"
|
|
h264_vdpau_hwaccel_deps="vdpau"
|
|
@@ -6887,6 +6889,7 @@ if enabled v4l2_m2m; then
|
|
fi
|
|
|
|
check_func_headers "linux/media.h linux/videodev2.h" v4l2_timeval_to_ns
|
|
+check_cc h264_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_H264_SLICE;"
|
|
|
|
check_headers sys/videoio.h
|
|
test_code cc sys/videoio.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width = 0;" && enable_sanitized struct_v4l2_frmivalenum_discrete
|
|
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
|
|
index 0148242ed0..26004171b3 100644
|
|
--- a/libavcodec/Makefile
|
|
+++ b/libavcodec/Makefile
|
|
@@ -990,6 +990,7 @@ OBJS-$(CONFIG_H264_D3D11VA_HWACCEL) += dxva2_h264.o
|
|
OBJS-$(CONFIG_H264_DXVA2_HWACCEL) += dxva2_h264.o
|
|
OBJS-$(CONFIG_H264_NVDEC_HWACCEL) += nvdec_h264.o
|
|
OBJS-$(CONFIG_H264_QSV_HWACCEL) += qsvdec.o
|
|
+OBJS-$(CONFIG_H264_V4L2REQUEST_HWACCEL) += v4l2_request_h264.o
|
|
OBJS-$(CONFIG_H264_VAAPI_HWACCEL) += vaapi_h264.o
|
|
OBJS-$(CONFIG_H264_VDPAU_HWACCEL) += vdpau_h264.o
|
|
OBJS-$(CONFIG_H264_VIDEOTOOLBOX_HWACCEL) += videotoolbox.o
|
|
diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c
|
|
index e782a69200..3d0d45b2a3 100644
|
|
--- a/libavcodec/h264_slice.c
|
|
+++ b/libavcodec/h264_slice.c
|
|
@@ -778,6 +778,7 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
|
|
#define HWACCEL_MAX (CONFIG_H264_DXVA2_HWACCEL + \
|
|
(CONFIG_H264_D3D11VA_HWACCEL * 2) + \
|
|
CONFIG_H264_NVDEC_HWACCEL + \
|
|
+ CONFIG_H264_V4L2REQUEST_HWACCEL + \
|
|
CONFIG_H264_VAAPI_HWACCEL + \
|
|
CONFIG_H264_VIDEOTOOLBOX_HWACCEL + \
|
|
CONFIG_H264_VDPAU_HWACCEL)
|
|
@@ -867,6 +868,9 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
|
|
#endif
|
|
#if CONFIG_H264_VAAPI_HWACCEL
|
|
*fmt++ = AV_PIX_FMT_VAAPI;
|
|
+#endif
|
|
+#if CONFIG_H264_V4L2REQUEST_HWACCEL
|
|
+ *fmt++ = AV_PIX_FMT_DRM_PRIME;
|
|
#endif
|
|
if (h->avctx->codec->pix_fmts)
|
|
choices = h->avctx->codec->pix_fmts;
|
|
diff --git a/libavcodec/h264dec.c b/libavcodec/h264dec.c
|
|
index 2d691731c5..33cc25e1d3 100644
|
|
--- a/libavcodec/h264dec.c
|
|
+++ b/libavcodec/h264dec.c
|
|
@@ -1093,6 +1093,9 @@ const FFCodec ff_h264_decoder = {
|
|
#endif
|
|
#if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
|
|
HWACCEL_VIDEOTOOLBOX(h264),
|
|
+#endif
|
|
+#if CONFIG_H264_V4L2REQUEST_HWACCEL
|
|
+ HWACCEL_V4L2REQUEST(h264),
|
|
#endif
|
|
NULL
|
|
},
|
|
diff --git a/libavcodec/hwaccels.h b/libavcodec/hwaccels.h
|
|
index aca55831f3..014b95f0c0 100644
|
|
--- a/libavcodec/hwaccels.h
|
|
+++ b/libavcodec/hwaccels.h
|
|
@@ -33,6 +33,7 @@ extern const AVHWAccel ff_h264_d3d11va_hwaccel;
|
|
extern const AVHWAccel ff_h264_d3d11va2_hwaccel;
|
|
extern const AVHWAccel ff_h264_dxva2_hwaccel;
|
|
extern const AVHWAccel ff_h264_nvdec_hwaccel;
|
|
+extern const AVHWAccel ff_h264_v4l2request_hwaccel;
|
|
extern const AVHWAccel ff_h264_vaapi_hwaccel;
|
|
extern const AVHWAccel ff_h264_vdpau_hwaccel;
|
|
extern const AVHWAccel ff_h264_videotoolbox_hwaccel;
|
|
diff --git a/libavcodec/v4l2_request_h264.c b/libavcodec/v4l2_request_h264.c
|
|
new file mode 100644
|
|
index 0000000000..c960c9c887
|
|
--- /dev/null
|
|
+++ b/libavcodec/v4l2_request_h264.c
|
|
@@ -0,0 +1,456 @@
|
|
+/*
|
|
+ * This file is part of FFmpeg.
|
|
+ *
|
|
+ * FFmpeg is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU Lesser General Public
|
|
+ * License as published by the Free Software Foundation; either
|
|
+ * version 2.1 of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * FFmpeg is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+ * Lesser General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU Lesser General Public
|
|
+ * License along with FFmpeg; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
+ */
|
|
+
|
|
+#include "h264dec.h"
|
|
+#include "hwconfig.h"
|
|
+#include "internal.h"
|
|
+#include "v4l2_request.h"
|
|
+
|
|
+typedef struct V4L2RequestControlsH264 {
|
|
+ struct v4l2_ctrl_h264_sps sps;
|
|
+ struct v4l2_ctrl_h264_pps pps;
|
|
+ struct v4l2_ctrl_h264_scaling_matrix scaling_matrix;
|
|
+ struct v4l2_ctrl_h264_decode_params decode_params;
|
|
+ struct v4l2_ctrl_h264_slice_params slice_params;
|
|
+ struct v4l2_ctrl_h264_pred_weights pred_weights;
|
|
+ int pred_weights_required;
|
|
+ int first_slice;
|
|
+ int num_slices;
|
|
+} V4L2RequestControlsH264;
|
|
+
|
|
+typedef struct V4L2RequestContextH264 {
|
|
+ V4L2RequestContext base;
|
|
+ int decode_mode;
|
|
+ int start_code;
|
|
+} V4L2RequestContextH264;
|
|
+
|
|
+static uint8_t nalu_slice_start_code[] = { 0x00, 0x00, 0x01 };
|
|
+
|
|
+static void fill_weight_factors(struct v4l2_h264_weight_factors *factors, int list, const H264SliceContext *sl)
|
|
+{
|
|
+ for (int i = 0; i < sl->ref_count[list]; i++) {
|
|
+ if (sl->pwt.luma_weight_flag[list]) {
|
|
+ factors->luma_weight[i] = sl->pwt.luma_weight[i][list][0];
|
|
+ factors->luma_offset[i] = sl->pwt.luma_weight[i][list][1];
|
|
+ } else {
|
|
+ factors->luma_weight[i] = 1 << sl->pwt.luma_log2_weight_denom;
|
|
+ factors->luma_offset[i] = 0;
|
|
+ }
|
|
+ for (int j = 0; j < 2; j++) {
|
|
+ if (sl->pwt.chroma_weight_flag[list]) {
|
|
+ factors->chroma_weight[i][j] = sl->pwt.chroma_weight[i][list][j][0];
|
|
+ factors->chroma_offset[i][j] = sl->pwt.chroma_weight[i][list][j][1];
|
|
+ } else {
|
|
+ factors->chroma_weight[i][j] = 1 << sl->pwt.chroma_log2_weight_denom;
|
|
+ factors->chroma_offset[i][j] = 0;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+static void fill_dpb_entry(struct v4l2_h264_dpb_entry *entry, const H264Picture *pic)
|
|
+{
|
|
+ entry->reference_ts = ff_v4l2_request_get_capture_timestamp(pic->f);
|
|
+ entry->pic_num = pic->pic_id;
|
|
+ entry->frame_num = pic->long_ref ? pic->pic_id : pic->frame_num;
|
|
+ entry->fields = pic->reference & V4L2_H264_FRAME_REF;
|
|
+ entry->flags = V4L2_H264_DPB_ENTRY_FLAG_VALID;
|
|
+ if (entry->fields)
|
|
+ entry->flags |= V4L2_H264_DPB_ENTRY_FLAG_ACTIVE;
|
|
+ if (pic->long_ref)
|
|
+ entry->flags |= V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM;
|
|
+ if (pic->field_picture)
|
|
+ entry->flags |= V4L2_H264_DPB_ENTRY_FLAG_FIELD;
|
|
+ if (pic->field_poc[0] != INT_MAX)
|
|
+ entry->top_field_order_cnt = pic->field_poc[0];
|
|
+ if (pic->field_poc[1] != INT_MAX)
|
|
+ entry->bottom_field_order_cnt = pic->field_poc[1];
|
|
+}
|
|
+
|
|
+static void fill_dpb(struct v4l2_ctrl_h264_decode_params *decode, const H264Context *h)
|
|
+{
|
|
+ int entries = 0;
|
|
+
|
|
+ for (int i = 0; i < h->short_ref_count; i++) {
|
|
+ const H264Picture *pic = h->short_ref[i];
|
|
+ if (pic && (pic->field_poc[0] != INT_MAX || pic->field_poc[1] != INT_MAX))
|
|
+ fill_dpb_entry(&decode->dpb[entries++], pic);
|
|
+ }
|
|
+
|
|
+ if (!h->long_ref_count)
|
|
+ return;
|
|
+
|
|
+ for (int i = 0; i < FF_ARRAY_ELEMS(h->long_ref); i++) {
|
|
+ const H264Picture *pic = h->long_ref[i];
|
|
+ if (pic && (pic->field_poc[0] != INT_MAX || pic->field_poc[1] != INT_MAX))
|
|
+ fill_dpb_entry(&decode->dpb[entries++], pic);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void fill_ref_list(struct v4l2_h264_reference *reference, struct v4l2_ctrl_h264_decode_params *decode, const H264Ref *ref)
|
|
+{
|
|
+ uint64_t timestamp;
|
|
+
|
|
+ if (!ref->parent)
|
|
+ return;
|
|
+
|
|
+ timestamp = ff_v4l2_request_get_capture_timestamp(ref->parent->f);
|
|
+
|
|
+ for (uint8_t i = 0; i < FF_ARRAY_ELEMS(decode->dpb); i++) {
|
|
+ struct v4l2_h264_dpb_entry *entry = &decode->dpb[i];
|
|
+ if ((entry->flags & V4L2_H264_DPB_ENTRY_FLAG_VALID) &&
|
|
+ entry->reference_ts == timestamp) {
|
|
+ reference->fields = ref->reference & V4L2_H264_FRAME_REF;
|
|
+ reference->index = i;
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+static void fill_sps(struct v4l2_ctrl_h264_sps *ctrl, const H264Context *h)
|
|
+{
|
|
+ const SPS *sps = h->ps.sps;
|
|
+
|
|
+ *ctrl = (struct v4l2_ctrl_h264_sps) {
|
|
+ .profile_idc = sps->profile_idc,
|
|
+ .constraint_set_flags = sps->constraint_set_flags,
|
|
+ .level_idc = sps->level_idc,
|
|
+ .seq_parameter_set_id = sps->sps_id,
|
|
+ .chroma_format_idc = sps->chroma_format_idc,
|
|
+ .bit_depth_luma_minus8 = sps->bit_depth_luma - 8,
|
|
+ .bit_depth_chroma_minus8 = sps->bit_depth_chroma - 8,
|
|
+ .log2_max_frame_num_minus4 = sps->log2_max_frame_num - 4,
|
|
+ .pic_order_cnt_type = sps->poc_type,
|
|
+ .log2_max_pic_order_cnt_lsb_minus4 = sps->log2_max_poc_lsb - 4,
|
|
+ .max_num_ref_frames = sps->ref_frame_count,
|
|
+ .num_ref_frames_in_pic_order_cnt_cycle = sps->poc_cycle_length,
|
|
+ .offset_for_non_ref_pic = sps->offset_for_non_ref_pic,
|
|
+ .offset_for_top_to_bottom_field = sps->offset_for_top_to_bottom_field,
|
|
+ .pic_width_in_mbs_minus1 = h->mb_width - 1,
|
|
+ .pic_height_in_map_units_minus1 = sps->frame_mbs_only_flag ? h->mb_height - 1 : h->mb_height / 2 - 1,
|
|
+ };
|
|
+
|
|
+ if (sps->poc_cycle_length > 0 && sps->poc_cycle_length <= 255)
|
|
+ memcpy(ctrl->offset_for_ref_frame, sps->offset_for_ref_frame, sps->poc_cycle_length * sizeof(ctrl->offset_for_ref_frame[0]));
|
|
+
|
|
+ if (sps->residual_color_transform_flag)
|
|
+ ctrl->flags |= V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE;
|
|
+ if (sps->transform_bypass)
|
|
+ ctrl->flags |= V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS;
|
|
+ if (sps->delta_pic_order_always_zero_flag)
|
|
+ ctrl->flags |= V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO;
|
|
+ if (sps->gaps_in_frame_num_allowed_flag)
|
|
+ ctrl->flags |= V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED;
|
|
+ if (sps->frame_mbs_only_flag)
|
|
+ ctrl->flags |= V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY;
|
|
+ if (sps->mb_aff)
|
|
+ ctrl->flags |= V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD;
|
|
+ if (sps->direct_8x8_inference_flag)
|
|
+ ctrl->flags |= V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE;
|
|
+}
|
|
+
|
|
+static void fill_pps(struct v4l2_ctrl_h264_pps *ctrl, const H264Context *h)
|
|
+{
|
|
+ const SPS *sps = h->ps.sps;
|
|
+ const PPS *pps = h->ps.pps;
|
|
+ const H264SliceContext *sl = &h->slice_ctx[0];
|
|
+ int qp_bd_offset = 6 * (sps->bit_depth_luma - 8);
|
|
+
|
|
+ *ctrl = (struct v4l2_ctrl_h264_pps) {
|
|
+ .pic_parameter_set_id = sl->pps_id,
|
|
+ .seq_parameter_set_id = pps->sps_id,
|
|
+ .num_slice_groups_minus1 = pps->slice_group_count - 1,
|
|
+ .num_ref_idx_l0_default_active_minus1 = pps->ref_count[0] - 1,
|
|
+ .num_ref_idx_l1_default_active_minus1 = pps->ref_count[1] - 1,
|
|
+ .weighted_bipred_idc = pps->weighted_bipred_idc,
|
|
+ .pic_init_qp_minus26 = pps->init_qp - 26 - qp_bd_offset,
|
|
+ .pic_init_qs_minus26 = pps->init_qs - 26 - qp_bd_offset,
|
|
+ .chroma_qp_index_offset = pps->chroma_qp_index_offset[0],
|
|
+ .second_chroma_qp_index_offset = pps->chroma_qp_index_offset[1],
|
|
+ };
|
|
+
|
|
+ if (pps->cabac)
|
|
+ ctrl->flags |= V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE;
|
|
+ if (pps->pic_order_present)
|
|
+ ctrl->flags |= V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT;
|
|
+ if (pps->weighted_pred)
|
|
+ ctrl->flags |= V4L2_H264_PPS_FLAG_WEIGHTED_PRED;
|
|
+ if (pps->deblocking_filter_parameters_present)
|
|
+ ctrl->flags |= V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT;
|
|
+ if (pps->constrained_intra_pred)
|
|
+ ctrl->flags |= V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED;
|
|
+ if (pps->redundant_pic_cnt_present)
|
|
+ ctrl->flags |= V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT;
|
|
+ if (pps->transform_8x8_mode)
|
|
+ ctrl->flags |= V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE;
|
|
+
|
|
+ /* FFmpeg always provide a scaling matrix */
|
|
+ ctrl->flags |= V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT;
|
|
+}
|
|
+
|
|
+static int v4l2_request_h264_start_frame(AVCodecContext *avctx,
|
|
+ av_unused const uint8_t *buffer,
|
|
+ av_unused uint32_t size)
|
|
+{
|
|
+ const H264Context *h = avctx->priv_data;
|
|
+ const PPS *pps = h->ps.pps;
|
|
+ const SPS *sps = h->ps.sps;
|
|
+ const H264SliceContext *sl = &h->slice_ctx[0];
|
|
+ V4L2RequestControlsH264 *controls = h->cur_pic_ptr->hwaccel_picture_private;
|
|
+
|
|
+ fill_sps(&controls->sps, h);
|
|
+ fill_pps(&controls->pps, h);
|
|
+
|
|
+ memcpy(controls->scaling_matrix.scaling_list_4x4, pps->scaling_matrix4, sizeof(controls->scaling_matrix.scaling_list_4x4));
|
|
+ memcpy(controls->scaling_matrix.scaling_list_8x8[0], pps->scaling_matrix8[0], sizeof(controls->scaling_matrix.scaling_list_8x8[0]));
|
|
+ memcpy(controls->scaling_matrix.scaling_list_8x8[1], pps->scaling_matrix8[3], sizeof(controls->scaling_matrix.scaling_list_8x8[1]));
|
|
+
|
|
+ if (sps->chroma_format_idc == 3) {
|
|
+ memcpy(controls->scaling_matrix.scaling_list_8x8[2], pps->scaling_matrix8[1], sizeof(controls->scaling_matrix.scaling_list_8x8[2]));
|
|
+ memcpy(controls->scaling_matrix.scaling_list_8x8[3], pps->scaling_matrix8[4], sizeof(controls->scaling_matrix.scaling_list_8x8[3]));
|
|
+ memcpy(controls->scaling_matrix.scaling_list_8x8[4], pps->scaling_matrix8[2], sizeof(controls->scaling_matrix.scaling_list_8x8[4]));
|
|
+ memcpy(controls->scaling_matrix.scaling_list_8x8[5], pps->scaling_matrix8[5], sizeof(controls->scaling_matrix.scaling_list_8x8[5]));
|
|
+ }
|
|
+
|
|
+ controls->decode_params = (struct v4l2_ctrl_h264_decode_params) {
|
|
+ .nal_ref_idc = h->nal_ref_idc,
|
|
+ .frame_num = h->poc.frame_num,
|
|
+ .top_field_order_cnt = h->cur_pic_ptr->field_poc[0] != INT_MAX ? h->cur_pic_ptr->field_poc[0] : 0,
|
|
+ .bottom_field_order_cnt = h->cur_pic_ptr->field_poc[1] != INT_MAX ? h->cur_pic_ptr->field_poc[1] : 0,
|
|
+ .idr_pic_id = sl->idr_pic_id,
|
|
+ .pic_order_cnt_lsb = sl->poc_lsb,
|
|
+ .delta_pic_order_cnt_bottom = sl->delta_poc_bottom,
|
|
+ .delta_pic_order_cnt0 = sl->delta_poc[0],
|
|
+ .delta_pic_order_cnt1 = sl->delta_poc[1],
|
|
+ /* Size in bits of dec_ref_pic_marking() syntax element. */
|
|
+ .dec_ref_pic_marking_bit_size = sl->ref_pic_marking_bit_size,
|
|
+ /* Size in bits of pic order count syntax. */
|
|
+ .pic_order_cnt_bit_size = sl->pic_order_cnt_bit_size,
|
|
+ .slice_group_change_cycle = 0, /* slice group not supported by FFmpeg */
|
|
+ };
|
|
+
|
|
+ if (h->picture_idr)
|
|
+ controls->decode_params.flags |= V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC;
|
|
+ if (FIELD_PICTURE(h))
|
|
+ controls->decode_params.flags |= V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC;
|
|
+ if (h->picture_structure == PICT_BOTTOM_FIELD)
|
|
+ controls->decode_params.flags |= V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD;
|
|
+
|
|
+ fill_dpb(&controls->decode_params, h);
|
|
+
|
|
+ controls->first_slice = 1;
|
|
+ controls->num_slices = 0;
|
|
+
|
|
+ return ff_v4l2_request_reset_frame(avctx, h->cur_pic_ptr->f);
|
|
+}
|
|
+
|
|
+static int v4l2_request_h264_queue_decode(AVCodecContext *avctx, int last_slice)
|
|
+{
|
|
+ const H264Context *h = avctx->priv_data;
|
|
+ V4L2RequestControlsH264 *controls = h->cur_pic_ptr->hwaccel_picture_private;
|
|
+ V4L2RequestContextH264 *ctx = avctx->internal->hwaccel_priv_data;
|
|
+
|
|
+ struct v4l2_ext_control control[] = {
|
|
+ {
|
|
+ .id = V4L2_CID_STATELESS_H264_SPS,
|
|
+ .ptr = &controls->sps,
|
|
+ .size = sizeof(controls->sps),
|
|
+ },
|
|
+ {
|
|
+ .id = V4L2_CID_STATELESS_H264_PPS,
|
|
+ .ptr = &controls->pps,
|
|
+ .size = sizeof(controls->pps),
|
|
+ },
|
|
+ {
|
|
+ .id = V4L2_CID_STATELESS_H264_SCALING_MATRIX,
|
|
+ .ptr = &controls->scaling_matrix,
|
|
+ .size = sizeof(controls->scaling_matrix),
|
|
+ },
|
|
+ {
|
|
+ .id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
|
|
+ .ptr = &controls->decode_params,
|
|
+ .size = sizeof(controls->decode_params),
|
|
+ },
|
|
+ {
|
|
+ .id = V4L2_CID_STATELESS_H264_SLICE_PARAMS,
|
|
+ .ptr = &controls->slice_params,
|
|
+ .size = sizeof(controls->slice_params),
|
|
+ },
|
|
+ {
|
|
+ .id = V4L2_CID_STATELESS_H264_PRED_WEIGHTS,
|
|
+ .ptr = &controls->pred_weights,
|
|
+ .size = sizeof(controls->pred_weights),
|
|
+ },
|
|
+ };
|
|
+
|
|
+ if (ctx->decode_mode == V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED) {
|
|
+ int count = FF_ARRAY_ELEMS(control) - (controls->pred_weights_required ? 0 : 1);
|
|
+ return ff_v4l2_request_decode_slice(avctx, h->cur_pic_ptr->f, control, count, controls->first_slice, last_slice);
|
|
+ }
|
|
+
|
|
+ return ff_v4l2_request_decode_frame(avctx, h->cur_pic_ptr->f, control, FF_ARRAY_ELEMS(control) - 2);
|
|
+}
|
|
+
|
|
+static int v4l2_request_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
|
|
+{
|
|
+ const H264Context *h = avctx->priv_data;
|
|
+ const PPS *pps = h->ps.pps;
|
|
+ const H264SliceContext *sl = &h->slice_ctx[0];
|
|
+ V4L2RequestControlsH264 *controls = h->cur_pic_ptr->hwaccel_picture_private;
|
|
+ V4L2RequestContextH264 *ctx = avctx->internal->hwaccel_priv_data;
|
|
+ int i, ret, count;
|
|
+
|
|
+ if (ctx->decode_mode == V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED && controls->num_slices) {
|
|
+ ret = v4l2_request_h264_queue_decode(avctx, 0);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ff_v4l2_request_reset_frame(avctx, h->cur_pic_ptr->f);
|
|
+ controls->first_slice = 0;
|
|
+ }
|
|
+
|
|
+ if (ctx->start_code == V4L2_STATELESS_H264_START_CODE_ANNEX_B) {
|
|
+ ret = ff_v4l2_request_append_output_buffer(avctx, h->cur_pic_ptr->f, nalu_slice_start_code, 3);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = ff_v4l2_request_append_output_buffer(avctx, h->cur_pic_ptr->f, buffer, size);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ if (ctx->decode_mode != V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED)
|
|
+ return 0;
|
|
+
|
|
+ controls->slice_params = (struct v4l2_ctrl_h264_slice_params) {
|
|
+ /* Offset in bits to slice_data() from the beginning of this slice. */
|
|
+ .header_bit_size = get_bits_count(&sl->gb),
|
|
+
|
|
+ .first_mb_in_slice = sl->first_mb_addr,
|
|
+
|
|
+ .slice_type = ff_h264_get_slice_type(sl),
|
|
+ .colour_plane_id = 0, /* separate colour plane not supported by FFmpeg */
|
|
+ .redundant_pic_cnt = sl->redundant_pic_count,
|
|
+ .cabac_init_idc = sl->cabac_init_idc,
|
|
+ .slice_qp_delta = sl->qscale - pps->init_qp,
|
|
+ .slice_qs_delta = 0, /* not implemented by FFmpeg */
|
|
+ .disable_deblocking_filter_idc = sl->deblocking_filter < 2 ? !sl->deblocking_filter : sl->deblocking_filter,
|
|
+ .slice_alpha_c0_offset_div2 = sl->slice_alpha_c0_offset / 2,
|
|
+ .slice_beta_offset_div2 = sl->slice_beta_offset / 2,
|
|
+ .num_ref_idx_l0_active_minus1 = sl->list_count > 0 ? sl->ref_count[0] - 1 : 0,
|
|
+ .num_ref_idx_l1_active_minus1 = sl->list_count > 1 ? sl->ref_count[1] - 1 : 0,
|
|
+ };
|
|
+
|
|
+ if (sl->slice_type == AV_PICTURE_TYPE_B && sl->direct_spatial_mv_pred)
|
|
+ controls->slice_params.flags |= V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED;
|
|
+ /* V4L2_H264_SLICE_FLAG_SP_FOR_SWITCH: not implemented by FFmpeg */
|
|
+
|
|
+ controls->pred_weights_required = V4L2_H264_CTRL_PRED_WEIGHTS_REQUIRED(&controls->pps, &controls->slice_params);
|
|
+ if (controls->pred_weights_required) {
|
|
+ controls->pred_weights.chroma_log2_weight_denom = sl->pwt.chroma_log2_weight_denom;
|
|
+ controls->pred_weights.luma_log2_weight_denom = sl->pwt.luma_log2_weight_denom;
|
|
+ }
|
|
+
|
|
+ count = sl->list_count > 0 ? sl->ref_count[0] : 0;
|
|
+ for (i = 0; i < count; i++)
|
|
+ fill_ref_list(&controls->slice_params.ref_pic_list0[i], &controls->decode_params, &sl->ref_list[0][i]);
|
|
+ if (count && controls->pred_weights_required)
|
|
+ fill_weight_factors(&controls->pred_weights.weight_factors[0], 0, sl);
|
|
+
|
|
+ count = sl->list_count > 1 ? sl->ref_count[1] : 0;
|
|
+ for (i = 0; i < count; i++)
|
|
+ fill_ref_list(&controls->slice_params.ref_pic_list1[i], &controls->decode_params, &sl->ref_list[1][i]);
|
|
+ if (count && controls->pred_weights_required)
|
|
+ fill_weight_factors(&controls->pred_weights.weight_factors[1], 1, sl);
|
|
+
|
|
+ controls->num_slices++;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int v4l2_request_h264_end_frame(AVCodecContext *avctx)
|
|
+{
|
|
+ return v4l2_request_h264_queue_decode(avctx, 1);
|
|
+}
|
|
+
|
|
+static int v4l2_request_h264_set_controls(AVCodecContext *avctx)
|
|
+{
|
|
+ V4L2RequestContextH264 *ctx = avctx->internal->hwaccel_priv_data;
|
|
+
|
|
+ struct v4l2_ext_control control[] = {
|
|
+ { .id = V4L2_CID_STATELESS_H264_DECODE_MODE, },
|
|
+ { .id = V4L2_CID_STATELESS_H264_START_CODE, },
|
|
+ };
|
|
+
|
|
+ ctx->decode_mode = ff_v4l2_request_query_control_default_value(avctx, V4L2_CID_STATELESS_H264_DECODE_MODE);
|
|
+ if (ctx->decode_mode != V4L2_STATELESS_H264_DECODE_MODE_SLICE_BASED &&
|
|
+ ctx->decode_mode != V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: unsupported decode mode, %d\n", __func__, ctx->decode_mode);
|
|
+ return AVERROR(EINVAL);
|
|
+ }
|
|
+
|
|
+ ctx->start_code = ff_v4l2_request_query_control_default_value(avctx, V4L2_CID_STATELESS_H264_START_CODE);
|
|
+ if (ctx->start_code != V4L2_STATELESS_H264_START_CODE_NONE &&
|
|
+ ctx->start_code != V4L2_STATELESS_H264_START_CODE_ANNEX_B) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: unsupported start code, %d\n", __func__, ctx->start_code);
|
|
+ return AVERROR(EINVAL);
|
|
+ }
|
|
+
|
|
+ control[0].value = ctx->decode_mode;
|
|
+ control[1].value = ctx->start_code;
|
|
+
|
|
+ return ff_v4l2_request_set_controls(avctx, control, FF_ARRAY_ELEMS(control));
|
|
+}
|
|
+
|
|
+static int v4l2_request_h264_init(AVCodecContext *avctx)
|
|
+{
|
|
+ const H264Context *h = avctx->priv_data;
|
|
+ struct v4l2_ctrl_h264_sps sps;
|
|
+ int ret;
|
|
+
|
|
+ struct v4l2_ext_control control[] = {
|
|
+ {
|
|
+ .id = V4L2_CID_STATELESS_H264_SPS,
|
|
+ .ptr = &sps,
|
|
+ .size = sizeof(sps),
|
|
+ },
|
|
+ };
|
|
+
|
|
+ fill_sps(&sps, h);
|
|
+
|
|
+ ret = ff_v4l2_request_init(avctx, V4L2_PIX_FMT_H264_SLICE, 4 * 1024 * 1024, control, FF_ARRAY_ELEMS(control));
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ return v4l2_request_h264_set_controls(avctx);
|
|
+}
|
|
+
|
|
+const AVHWAccel ff_h264_v4l2request_hwaccel = {
|
|
+ .name = "h264_v4l2request",
|
|
+ .type = AVMEDIA_TYPE_VIDEO,
|
|
+ .id = AV_CODEC_ID_H264,
|
|
+ .pix_fmt = AV_PIX_FMT_DRM_PRIME,
|
|
+ .start_frame = v4l2_request_h264_start_frame,
|
|
+ .decode_slice = v4l2_request_h264_decode_slice,
|
|
+ .end_frame = v4l2_request_h264_end_frame,
|
|
+ .frame_priv_data_size = sizeof(V4L2RequestControlsH264),
|
|
+ .init = v4l2_request_h264_init,
|
|
+ .uninit = ff_v4l2_request_uninit,
|
|
+ .priv_data_size = sizeof(V4L2RequestContextH264),
|
|
+ .frame_params = ff_v4l2_request_frame_params,
|
|
+ .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
|
|
+};
|
|
|
|
From ef1f67b26f8fddd19b08864710f689f23f7fa68f Mon Sep 17 00:00:00 2001
|
|
From: Jonas Karlman <jonas@kwiboo.se>
|
|
Date: Sat, 15 Dec 2018 22:32:16 +0100
|
|
Subject: [PATCH 05/13] Add V4L2 request API mpeg2 hwaccel
|
|
|
|
Signed-off-by: Jonas Karlman <jonas@kwiboo.se>
|
|
---
|
|
configure | 3 +
|
|
libavcodec/Makefile | 1 +
|
|
libavcodec/hwaccels.h | 1 +
|
|
libavcodec/mpeg12dec.c | 6 ++
|
|
libavcodec/v4l2_request_mpeg2.c | 159 ++++++++++++++++++++++++++++++++
|
|
5 files changed, 170 insertions(+)
|
|
create mode 100644 libavcodec/v4l2_request_mpeg2.c
|
|
|
|
diff --git a/configure b/configure
|
|
index b3ca83bf2b..4283447b2f 100755
|
|
--- a/configure
|
|
+++ b/configure
|
|
@@ -3073,6 +3073,8 @@ mpeg2_dxva2_hwaccel_deps="dxva2"
|
|
mpeg2_dxva2_hwaccel_select="mpeg2video_decoder"
|
|
mpeg2_nvdec_hwaccel_deps="nvdec"
|
|
mpeg2_nvdec_hwaccel_select="mpeg2video_decoder"
|
|
+mpeg2_v4l2request_hwaccel_deps="v4l2_request mpeg2_v4l2_request"
|
|
+mpeg2_v4l2request_hwaccel_select="mpeg2video_decoder"
|
|
mpeg2_vaapi_hwaccel_deps="vaapi"
|
|
mpeg2_vaapi_hwaccel_select="mpeg2video_decoder"
|
|
mpeg2_vdpau_hwaccel_deps="vdpau"
|
|
@@ -6890,6 +6892,7 @@ fi
|
|
|
|
check_func_headers "linux/media.h linux/videodev2.h" v4l2_timeval_to_ns
|
|
check_cc h264_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_H264_SLICE;"
|
|
+check_cc mpeg2_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG2_SLICE;"
|
|
|
|
check_headers sys/videoio.h
|
|
test_code cc sys/videoio.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width = 0;" && enable_sanitized struct_v4l2_frmivalenum_discrete
|
|
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
|
|
index 26004171b3..47cc14558c 100644
|
|
--- a/libavcodec/Makefile
|
|
+++ b/libavcodec/Makefile
|
|
@@ -1009,6 +1009,7 @@ OBJS-$(CONFIG_MPEG2_D3D11VA_HWACCEL) += dxva2_mpeg2.o
|
|
OBJS-$(CONFIG_MPEG2_DXVA2_HWACCEL) += dxva2_mpeg2.o
|
|
OBJS-$(CONFIG_MPEG2_NVDEC_HWACCEL) += nvdec_mpeg12.o
|
|
OBJS-$(CONFIG_MPEG2_QSV_HWACCEL) += qsvdec.o
|
|
+OBJS-$(CONFIG_MPEG2_V4L2REQUEST_HWACCEL) += v4l2_request_mpeg2.o
|
|
OBJS-$(CONFIG_MPEG2_VAAPI_HWACCEL) += vaapi_mpeg2.o
|
|
OBJS-$(CONFIG_MPEG2_VDPAU_HWACCEL) += vdpau_mpeg12.o
|
|
OBJS-$(CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL) += videotoolbox.o
|
|
diff --git a/libavcodec/hwaccels.h b/libavcodec/hwaccels.h
|
|
index 014b95f0c0..3b675dd9f8 100644
|
|
--- a/libavcodec/hwaccels.h
|
|
+++ b/libavcodec/hwaccels.h
|
|
@@ -53,6 +53,7 @@ extern const AVHWAccel ff_mpeg2_d3d11va_hwaccel;
|
|
extern const AVHWAccel ff_mpeg2_d3d11va2_hwaccel;
|
|
extern const AVHWAccel ff_mpeg2_nvdec_hwaccel;
|
|
extern const AVHWAccel ff_mpeg2_dxva2_hwaccel;
|
|
+extern const AVHWAccel ff_mpeg2_v4l2request_hwaccel;
|
|
extern const AVHWAccel ff_mpeg2_vaapi_hwaccel;
|
|
extern const AVHWAccel ff_mpeg2_vdpau_hwaccel;
|
|
extern const AVHWAccel ff_mpeg2_videotoolbox_hwaccel;
|
|
diff --git a/libavcodec/mpeg12dec.c b/libavcodec/mpeg12dec.c
|
|
index 457d985265..4b90f07b54 100644
|
|
--- a/libavcodec/mpeg12dec.c
|
|
+++ b/libavcodec/mpeg12dec.c
|
|
@@ -1134,6 +1134,9 @@ static const enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[] = {
|
|
#endif
|
|
#if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
|
|
AV_PIX_FMT_VIDEOTOOLBOX,
|
|
+#endif
|
|
+#if CONFIG_MPEG2_V4L2REQUEST_HWACCEL
|
|
+ AV_PIX_FMT_DRM_PRIME,
|
|
#endif
|
|
AV_PIX_FMT_YUV420P,
|
|
AV_PIX_FMT_NONE
|
|
@@ -2919,6 +2922,9 @@ const FFCodec ff_mpeg2video_decoder = {
|
|
#endif
|
|
#if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL
|
|
HWACCEL_VIDEOTOOLBOX(mpeg2),
|
|
+#endif
|
|
+#if CONFIG_MPEG2_V4L2REQUEST_HWACCEL
|
|
+ HWACCEL_V4L2REQUEST(mpeg2),
|
|
#endif
|
|
NULL
|
|
},
|
|
diff --git a/libavcodec/v4l2_request_mpeg2.c b/libavcodec/v4l2_request_mpeg2.c
|
|
new file mode 100644
|
|
index 0000000000..84d53209c7
|
|
--- /dev/null
|
|
+++ b/libavcodec/v4l2_request_mpeg2.c
|
|
@@ -0,0 +1,159 @@
|
|
+/*
|
|
+ * This file is part of FFmpeg.
|
|
+ *
|
|
+ * FFmpeg is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU Lesser General Public
|
|
+ * License as published by the Free Software Foundation; either
|
|
+ * version 2.1 of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * FFmpeg is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+ * Lesser General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU Lesser General Public
|
|
+ * License along with FFmpeg; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
+ */
|
|
+
|
|
+#include "hwconfig.h"
|
|
+#include "mpegvideo.h"
|
|
+#include "v4l2_request.h"
|
|
+
|
|
+typedef struct V4L2RequestControlsMPEG2 {
|
|
+ struct v4l2_ctrl_mpeg2_sequence sequence;
|
|
+ struct v4l2_ctrl_mpeg2_picture picture;
|
|
+ struct v4l2_ctrl_mpeg2_quantisation quantisation;
|
|
+} V4L2RequestControlsMPEG2;
|
|
+
|
|
+static int v4l2_request_mpeg2_start_frame(AVCodecContext *avctx,
|
|
+ av_unused const uint8_t *buffer,
|
|
+ av_unused uint32_t size)
|
|
+{
|
|
+ const MpegEncContext *s = avctx->priv_data;
|
|
+ V4L2RequestControlsMPEG2 *controls = s->current_picture_ptr->hwaccel_picture_private;
|
|
+ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)s->current_picture_ptr->f->data[0];
|
|
+
|
|
+ controls->sequence = (struct v4l2_ctrl_mpeg2_sequence) {
|
|
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */
|
|
+ .horizontal_size = s->width,
|
|
+ .vertical_size = s->height,
|
|
+ .vbv_buffer_size = req->output.size,
|
|
+
|
|
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */
|
|
+ .profile_and_level_indication = 0,
|
|
+ .chroma_format = s->chroma_format,
|
|
+ };
|
|
+
|
|
+ if (s->progressive_sequence)
|
|
+ controls->sequence.flags |= V4L2_MPEG2_SEQ_FLAG_PROGRESSIVE;
|
|
+
|
|
+ controls->picture = (struct v4l2_ctrl_mpeg2_picture) {
|
|
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */
|
|
+ .picture_coding_type = s->pict_type,
|
|
+
|
|
+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */
|
|
+ .f_code[0][0] = s->mpeg_f_code[0][0],
|
|
+ .f_code[0][1] = s->mpeg_f_code[0][1],
|
|
+ .f_code[1][0] = s->mpeg_f_code[1][0],
|
|
+ .f_code[1][1] = s->mpeg_f_code[1][1],
|
|
+ .picture_structure = s->picture_structure,
|
|
+ .intra_dc_precision = s->intra_dc_precision,
|
|
+ };
|
|
+
|
|
+ if (s->top_field_first)
|
|
+ controls->picture.flags |= V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST;
|
|
+
|
|
+ if (s->frame_pred_frame_dct)
|
|
+ controls->picture.flags |= V4L2_MPEG2_PIC_FLAG_FRAME_PRED_DCT;
|
|
+
|
|
+ if (s->concealment_motion_vectors)
|
|
+ controls->picture.flags |= V4L2_MPEG2_PIC_FLAG_CONCEALMENT_MV;
|
|
+
|
|
+ if (s->intra_vlc_format)
|
|
+ controls->picture.flags |= V4L2_MPEG2_PIC_FLAG_INTRA_VLC;
|
|
+
|
|
+ if (s->q_scale_type)
|
|
+ controls->picture.flags |= V4L2_MPEG2_PIC_FLAG_Q_SCALE_TYPE;
|
|
+
|
|
+ if (s->alternate_scan)
|
|
+ controls->picture.flags |= V4L2_MPEG2_PIC_FLAG_ALT_SCAN;
|
|
+
|
|
+ if (s->repeat_first_field)
|
|
+ controls->picture.flags |= V4L2_MPEG2_PIC_FLAG_REPEAT_FIRST;
|
|
+
|
|
+ if (s->progressive_frame)
|
|
+ controls->picture.flags |= V4L2_MPEG2_PIC_FLAG_PROGRESSIVE;
|
|
+
|
|
+ switch (s->pict_type) {
|
|
+ case AV_PICTURE_TYPE_B:
|
|
+ controls->picture.backward_ref_ts = ff_v4l2_request_get_capture_timestamp(s->next_picture.f);
|
|
+ // fall-through
|
|
+ case AV_PICTURE_TYPE_P:
|
|
+ controls->picture.forward_ref_ts = ff_v4l2_request_get_capture_timestamp(s->last_picture.f);
|
|
+ }
|
|
+
|
|
+ for (int i = 0; i < 64; i++) {
|
|
+ int n = s->idsp.idct_permutation[ff_zigzag_direct[i]];
|
|
+ controls->quantisation.intra_quantiser_matrix[i] = s->intra_matrix[n];
|
|
+ controls->quantisation.non_intra_quantiser_matrix[i] = s->inter_matrix[n];
|
|
+ controls->quantisation.chroma_intra_quantiser_matrix[i] = s->chroma_intra_matrix[n];
|
|
+ controls->quantisation.chroma_non_intra_quantiser_matrix[i] = s->chroma_inter_matrix[n];
|
|
+ }
|
|
+
|
|
+ return ff_v4l2_request_reset_frame(avctx, s->current_picture_ptr->f);
|
|
+}
|
|
+
|
|
+static int v4l2_request_mpeg2_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
|
|
+{
|
|
+ const MpegEncContext *s = avctx->priv_data;
|
|
+
|
|
+ return ff_v4l2_request_append_output_buffer(avctx, s->current_picture_ptr->f, buffer, size);
|
|
+}
|
|
+
|
|
+static int v4l2_request_mpeg2_end_frame(AVCodecContext *avctx)
|
|
+{
|
|
+ const MpegEncContext *s = avctx->priv_data;
|
|
+ V4L2RequestControlsMPEG2 *controls = s->current_picture_ptr->hwaccel_picture_private;
|
|
+
|
|
+ struct v4l2_ext_control control[] = {
|
|
+ {
|
|
+ .id = V4L2_CID_STATELESS_MPEG2_SEQUENCE,
|
|
+ .ptr = &controls->sequence,
|
|
+ .size = sizeof(controls->sequence),
|
|
+ },
|
|
+ {
|
|
+ .id = V4L2_CID_STATELESS_MPEG2_PICTURE,
|
|
+ .ptr = &controls->picture,
|
|
+ .size = sizeof(controls->picture),
|
|
+ },
|
|
+ {
|
|
+ .id = V4L2_CID_STATELESS_MPEG2_QUANTISATION,
|
|
+ .ptr = &controls->quantisation,
|
|
+ .size = sizeof(controls->quantisation),
|
|
+ },
|
|
+ };
|
|
+
|
|
+ return ff_v4l2_request_decode_frame(avctx, s->current_picture_ptr->f, control, FF_ARRAY_ELEMS(control));
|
|
+}
|
|
+
|
|
+static int v4l2_request_mpeg2_init(AVCodecContext *avctx)
|
|
+{
|
|
+ return ff_v4l2_request_init(avctx, V4L2_PIX_FMT_MPEG2_SLICE, 1024 * 1024, NULL, 0);
|
|
+}
|
|
+
|
|
+const AVHWAccel ff_mpeg2_v4l2request_hwaccel = {
|
|
+ .name = "mpeg2_v4l2request",
|
|
+ .type = AVMEDIA_TYPE_VIDEO,
|
|
+ .id = AV_CODEC_ID_MPEG2VIDEO,
|
|
+ .pix_fmt = AV_PIX_FMT_DRM_PRIME,
|
|
+ .start_frame = v4l2_request_mpeg2_start_frame,
|
|
+ .decode_slice = v4l2_request_mpeg2_decode_slice,
|
|
+ .end_frame = v4l2_request_mpeg2_end_frame,
|
|
+ .frame_priv_data_size = sizeof(V4L2RequestControlsMPEG2),
|
|
+ .init = v4l2_request_mpeg2_init,
|
|
+ .uninit = ff_v4l2_request_uninit,
|
|
+ .priv_data_size = sizeof(V4L2RequestContext),
|
|
+ .frame_params = ff_v4l2_request_frame_params,
|
|
+ .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
|
|
+};
|
|
|
|
From 1e84c65220903fe21301c520e4490ab3c0db9c83 Mon Sep 17 00:00:00 2001
|
|
From: Boris Brezillon <boris.brezillon@collabora.com>
|
|
Date: Wed, 22 May 2019 14:46:58 +0200
|
|
Subject: [PATCH 06/13] Add V4L2 request API vp8 hwaccel
|
|
|
|
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
|
|
Signed-off-by: Ezequiel Garcia <ezequiel@collabora.com>
|
|
Signed-off-by: Jonas Karlman <jonas@kwiboo.se>
|
|
---
|
|
configure | 3 +
|
|
libavcodec/Makefile | 1 +
|
|
libavcodec/hwaccels.h | 1 +
|
|
libavcodec/v4l2_request_vp8.c | 180 ++++++++++++++++++++++++++++++++++
|
|
libavcodec/vp8.c | 6 ++
|
|
5 files changed, 191 insertions(+)
|
|
create mode 100644 libavcodec/v4l2_request_vp8.c
|
|
|
|
diff --git a/configure b/configure
|
|
index 4283447b2f..794bd7f4d6 100755
|
|
--- a/configure
|
|
+++ b/configure
|
|
@@ -3105,6 +3105,8 @@ vc1_vdpau_hwaccel_deps="vdpau"
|
|
vc1_vdpau_hwaccel_select="vc1_decoder"
|
|
vp8_nvdec_hwaccel_deps="nvdec"
|
|
vp8_nvdec_hwaccel_select="vp8_decoder"
|
|
+vp8_v4l2request_hwaccel_deps="v4l2_request vp8_v4l2_request"
|
|
+vp8_v4l2request_hwaccel_select="vp8_decoder"
|
|
vp8_vaapi_hwaccel_deps="vaapi"
|
|
vp8_vaapi_hwaccel_select="vp8_decoder"
|
|
vp9_d3d11va_hwaccel_deps="d3d11va DXVA_PicParams_VP9"
|
|
@@ -6893,6 +6895,7 @@ fi
|
|
check_func_headers "linux/media.h linux/videodev2.h" v4l2_timeval_to_ns
|
|
check_cc h264_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_H264_SLICE;"
|
|
check_cc mpeg2_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG2_SLICE;"
|
|
+check_cc vp8_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_VP8_FRAME;"
|
|
|
|
check_headers sys/videoio.h
|
|
test_code cc sys/videoio.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width = 0;" && enable_sanitized struct_v4l2_frmivalenum_discrete
|
|
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
|
|
index 47cc14558c..7da4fd1a87 100644
|
|
--- a/libavcodec/Makefile
|
|
+++ b/libavcodec/Makefile
|
|
@@ -1024,6 +1024,7 @@ OBJS-$(CONFIG_VC1_QSV_HWACCEL) += qsvdec.o
|
|
OBJS-$(CONFIG_VC1_VAAPI_HWACCEL) += vaapi_vc1.o
|
|
OBJS-$(CONFIG_VC1_VDPAU_HWACCEL) += vdpau_vc1.o
|
|
OBJS-$(CONFIG_VP8_NVDEC_HWACCEL) += nvdec_vp8.o
|
|
+OBJS-$(CONFIG_VP8_V4L2REQUEST_HWACCEL) += v4l2_request_vp8.o
|
|
OBJS-$(CONFIG_VP8_VAAPI_HWACCEL) += vaapi_vp8.o
|
|
OBJS-$(CONFIG_VP9_D3D11VA_HWACCEL) += dxva2_vp9.o
|
|
OBJS-$(CONFIG_VP9_DXVA2_HWACCEL) += dxva2_vp9.o
|
|
diff --git a/libavcodec/hwaccels.h b/libavcodec/hwaccels.h
|
|
index 3b675dd9f8..6f9f078001 100644
|
|
--- a/libavcodec/hwaccels.h
|
|
+++ b/libavcodec/hwaccels.h
|
|
@@ -69,6 +69,7 @@ extern const AVHWAccel ff_vc1_nvdec_hwaccel;
|
|
extern const AVHWAccel ff_vc1_vaapi_hwaccel;
|
|
extern const AVHWAccel ff_vc1_vdpau_hwaccel;
|
|
extern const AVHWAccel ff_vp8_nvdec_hwaccel;
|
|
+extern const AVHWAccel ff_vp8_v4l2request_hwaccel;
|
|
extern const AVHWAccel ff_vp8_vaapi_hwaccel;
|
|
extern const AVHWAccel ff_vp9_d3d11va_hwaccel;
|
|
extern const AVHWAccel ff_vp9_d3d11va2_hwaccel;
|
|
diff --git a/libavcodec/v4l2_request_vp8.c b/libavcodec/v4l2_request_vp8.c
|
|
new file mode 100644
|
|
index 0000000000..e169030213
|
|
--- /dev/null
|
|
+++ b/libavcodec/v4l2_request_vp8.c
|
|
@@ -0,0 +1,180 @@
|
|
+/*
|
|
+ * This file is part of FFmpeg.
|
|
+ *
|
|
+ * FFmpeg is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU Lesser General Public
|
|
+ * License as published by the Free Software Foundation; either
|
|
+ * version 2.1 of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * FFmpeg is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+ * Lesser General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU Lesser General Public
|
|
+ * License along with FFmpeg; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
+ */
|
|
+
|
|
+#include "hwconfig.h"
|
|
+#include "v4l2_request.h"
|
|
+#include "vp8.h"
|
|
+
|
|
+typedef struct V4L2RequestControlsVP8 {
|
|
+ struct v4l2_ctrl_vp8_frame ctrl;
|
|
+} V4L2RequestControlsVP8;
|
|
+
|
|
+static int v4l2_request_vp8_start_frame(AVCodecContext *avctx,
|
|
+ av_unused const uint8_t *buffer,
|
|
+ av_unused uint32_t size)
|
|
+{
|
|
+ const VP8Context *s = avctx->priv_data;
|
|
+ V4L2RequestControlsVP8 *controls = s->framep[VP8_FRAME_CURRENT]->hwaccel_picture_private;
|
|
+
|
|
+ memset(&controls->ctrl, 0, sizeof(controls->ctrl));
|
|
+ return ff_v4l2_request_reset_frame(avctx, s->framep[VP8_FRAME_CURRENT]->tf.f);
|
|
+}
|
|
+
|
|
+static int v4l2_request_vp8_end_frame(AVCodecContext *avctx)
|
|
+{
|
|
+ const VP8Context *s = avctx->priv_data;
|
|
+ V4L2RequestControlsVP8 *controls = s->framep[VP8_FRAME_CURRENT]->hwaccel_picture_private;
|
|
+ struct v4l2_ext_control control[] = {
|
|
+ {
|
|
+ .id = V4L2_CID_STATELESS_VP8_FRAME,
|
|
+ .ptr = &controls->ctrl,
|
|
+ .size = sizeof(controls->ctrl),
|
|
+ },
|
|
+ };
|
|
+
|
|
+ return ff_v4l2_request_decode_frame(avctx, s->framep[VP8_FRAME_CURRENT]->tf.f,
|
|
+ control, FF_ARRAY_ELEMS(control));
|
|
+}
|
|
+
|
|
+static int v4l2_request_vp8_decode_slice(AVCodecContext *avctx,
|
|
+ const uint8_t *buffer,
|
|
+ uint32_t size)
|
|
+{
|
|
+ const VP8Context *s = avctx->priv_data;
|
|
+ V4L2RequestControlsVP8 *controls = s->framep[VP8_FRAME_CURRENT]->hwaccel_picture_private;
|
|
+ struct v4l2_ctrl_vp8_frame *frame = &controls->ctrl;
|
|
+ const uint8_t *data = buffer + 3 + 7 * s->keyframe;
|
|
+ unsigned int i, j, k;
|
|
+
|
|
+ frame->version = s->profile & 0x3;
|
|
+ frame->width = avctx->width;
|
|
+ frame->height = avctx->height;
|
|
+ /* FIXME: set ->xx_scale */
|
|
+ frame->prob_skip_false = s->prob->mbskip;
|
|
+ frame->prob_intra = s->prob->intra;
|
|
+ frame->prob_gf = s->prob->golden;
|
|
+ frame->prob_last = s->prob->last;
|
|
+ frame->first_part_size = s->header_partition_size;
|
|
+ frame->first_part_header_bits = (8 * (s->coder_state_at_header_end.input - data) -
|
|
+ s->coder_state_at_header_end.bit_count - 8);
|
|
+ frame->num_dct_parts = s->num_coeff_partitions;
|
|
+ for (i = 0; i < 8; i++)
|
|
+ frame->dct_part_sizes[i] = s->coeff_partition_size[i];
|
|
+
|
|
+ frame->coder_state.range = s->coder_state_at_header_end.range;
|
|
+ frame->coder_state.value = s->coder_state_at_header_end.value;
|
|
+ frame->coder_state.bit_count = s->coder_state_at_header_end.bit_count;
|
|
+ if (s->framep[VP8_FRAME_PREVIOUS])
|
|
+ frame->last_frame_ts = ff_v4l2_request_get_capture_timestamp(s->framep[VP8_FRAME_PREVIOUS]->tf.f);
|
|
+ if (s->framep[VP8_FRAME_GOLDEN])
|
|
+ frame->golden_frame_ts = ff_v4l2_request_get_capture_timestamp(s->framep[VP8_FRAME_GOLDEN]->tf.f);
|
|
+ if (s->framep[VP8_FRAME_ALTREF])
|
|
+ frame->alt_frame_ts = ff_v4l2_request_get_capture_timestamp(s->framep[VP8_FRAME_ALTREF]->tf.f);
|
|
+ frame->flags |= s->invisible ? 0 : V4L2_VP8_FRAME_FLAG_SHOW_FRAME;
|
|
+ frame->flags |= s->mbskip_enabled ? V4L2_VP8_FRAME_FLAG_MB_NO_SKIP_COEFF : 0;
|
|
+ frame->flags |= (s->profile & 0x4) ? V4L2_VP8_FRAME_FLAG_EXPERIMENTAL : 0;
|
|
+ frame->flags |= s->keyframe ? V4L2_VP8_FRAME_FLAG_KEY_FRAME : 0;
|
|
+ frame->flags |= s->sign_bias[VP8_FRAME_GOLDEN] ? V4L2_VP8_FRAME_FLAG_SIGN_BIAS_GOLDEN : 0;
|
|
+ frame->flags |= s->sign_bias[VP8_FRAME_ALTREF] ? V4L2_VP8_FRAME_FLAG_SIGN_BIAS_ALT : 0;
|
|
+ frame->segment.flags |= s->segmentation.enabled ? V4L2_VP8_SEGMENT_FLAG_ENABLED : 0;
|
|
+ frame->segment.flags |= s->segmentation.update_map ? V4L2_VP8_SEGMENT_FLAG_UPDATE_MAP : 0;
|
|
+ frame->segment.flags |= s->segmentation.update_feature_data ? V4L2_VP8_SEGMENT_FLAG_UPDATE_FEATURE_DATA : 0;
|
|
+ frame->segment.flags |= s->segmentation.absolute_vals ? 0 : V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE;
|
|
+ for (i = 0; i < 4; i++) {
|
|
+ frame->segment.quant_update[i] = s->segmentation.base_quant[i];
|
|
+ frame->segment.lf_update[i] = s->segmentation.filter_level[i];
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < 3; i++)
|
|
+ frame->segment.segment_probs[i] = s->prob->segmentid[i];
|
|
+
|
|
+ frame->lf.level = s->filter.level;
|
|
+ frame->lf.sharpness_level = s->filter.sharpness;
|
|
+ frame->lf.flags |= s->lf_delta.enabled ? V4L2_VP8_LF_ADJ_ENABLE : 0;
|
|
+ frame->lf.flags |= s->lf_delta.update ? V4L2_VP8_LF_DELTA_UPDATE : 0;
|
|
+ frame->lf.flags |= s->filter.simple ? V4L2_VP8_LF_FILTER_TYPE_SIMPLE : 0;
|
|
+ for (i = 0; i < 4; i++) {
|
|
+ frame->lf.ref_frm_delta[i] = s->lf_delta.ref[i];
|
|
+ frame->lf.mb_mode_delta[i] = s->lf_delta.mode[i + MODE_I4x4];
|
|
+ }
|
|
+
|
|
+ // Probabilites
|
|
+ if (s->keyframe) {
|
|
+ static const uint8_t keyframe_y_mode_probs[4] = {
|
|
+ 145, 156, 163, 128
|
|
+ };
|
|
+ static const uint8_t keyframe_uv_mode_probs[3] = {
|
|
+ 142, 114, 183
|
|
+ };
|
|
+
|
|
+ memcpy(frame->entropy.y_mode_probs, keyframe_y_mode_probs, 4);
|
|
+ memcpy(frame->entropy.uv_mode_probs, keyframe_uv_mode_probs, 3);
|
|
+ } else {
|
|
+ for (i = 0; i < 4; i++)
|
|
+ frame->entropy.y_mode_probs[i] = s->prob->pred16x16[i];
|
|
+ for (i = 0; i < 3; i++)
|
|
+ frame->entropy.uv_mode_probs[i] = s->prob->pred8x8c[i];
|
|
+ }
|
|
+ for (i = 0; i < 2; i++)
|
|
+ for (j = 0; j < 19; j++)
|
|
+ frame->entropy.mv_probs[i][j] = s->prob->mvc[i][j];
|
|
+
|
|
+ for (i = 0; i < 4; i++) {
|
|
+ for (j = 0; j < 8; j++) {
|
|
+ static const int coeff_bands_inverse[8] = {
|
|
+ 0, 1, 2, 3, 5, 6, 4, 15
|
|
+ };
|
|
+ int coeff_pos = coeff_bands_inverse[j];
|
|
+
|
|
+ for (k = 0; k < 3; k++) {
|
|
+ memcpy(frame->entropy.coeff_probs[i][j][k],
|
|
+ s->prob->token[i][coeff_pos][k], 11);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ frame->quant.y_ac_qi = s->quant.yac_qi;
|
|
+ frame->quant.y_dc_delta = s->quant.ydc_delta;
|
|
+ frame->quant.y2_dc_delta = s->quant.y2dc_delta;
|
|
+ frame->quant.y2_ac_delta = s->quant.y2ac_delta;
|
|
+ frame->quant.uv_dc_delta = s->quant.uvdc_delta;
|
|
+ frame->quant.uv_ac_delta = s->quant.uvac_delta;
|
|
+
|
|
+ return ff_v4l2_request_append_output_buffer(avctx, s->framep[VP8_FRAME_CURRENT]->tf.f, buffer, size);
|
|
+}
|
|
+
|
|
+static int v4l2_request_vp8_init(AVCodecContext *avctx)
|
|
+{
|
|
+ return ff_v4l2_request_init(avctx, V4L2_PIX_FMT_VP8_FRAME, 2 * 1024 * 1024, NULL, 0);
|
|
+}
|
|
+
|
|
+const AVHWAccel ff_vp8_v4l2request_hwaccel = {
|
|
+ .name = "vp8_v4l2request",
|
|
+ .type = AVMEDIA_TYPE_VIDEO,
|
|
+ .id = AV_CODEC_ID_VP8,
|
|
+ .pix_fmt = AV_PIX_FMT_DRM_PRIME,
|
|
+ .start_frame = v4l2_request_vp8_start_frame,
|
|
+ .decode_slice = v4l2_request_vp8_decode_slice,
|
|
+ .end_frame = v4l2_request_vp8_end_frame,
|
|
+ .frame_priv_data_size = sizeof(V4L2RequestControlsVP8),
|
|
+ .init = v4l2_request_vp8_init,
|
|
+ .uninit = ff_v4l2_request_uninit,
|
|
+ .priv_data_size = sizeof(V4L2RequestContext),
|
|
+ .frame_params = ff_v4l2_request_frame_params,
|
|
+ .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
|
|
+};
|
|
diff --git a/libavcodec/vp8.c b/libavcodec/vp8.c
|
|
index db2419deaf..ad5e6e8f2b 100644
|
|
--- a/libavcodec/vp8.c
|
|
+++ b/libavcodec/vp8.c
|
|
@@ -206,6 +206,9 @@ static enum AVPixelFormat get_pixel_format(VP8Context *s)
|
|
#endif
|
|
#if CONFIG_VP8_NVDEC_HWACCEL
|
|
AV_PIX_FMT_CUDA,
|
|
+#endif
|
|
+#if CONFIG_VP8_V4L2REQUEST_HWACCEL
|
|
+ AV_PIX_FMT_DRM_PRIME,
|
|
#endif
|
|
AV_PIX_FMT_YUV420P,
|
|
AV_PIX_FMT_NONE,
|
|
@@ -3007,6 +3010,9 @@ const FFCodec ff_vp8_decoder = {
|
|
#endif
|
|
#if CONFIG_VP8_NVDEC_HWACCEL
|
|
HWACCEL_NVDEC(vp8),
|
|
+#endif
|
|
+#if CONFIG_VP8_V4L2REQUEST_HWACCEL
|
|
+ HWACCEL_V4L2REQUEST(vp8),
|
|
#endif
|
|
NULL
|
|
},
|
|
|
|
From e5df38921bf3a7f918c7e05b0207e3fde6c65017 Mon Sep 17 00:00:00 2001
|
|
From: Jernej Skrabec <jernej.skrabec@siol.net>
|
|
Date: Sat, 15 Dec 2018 22:32:16 +0100
|
|
Subject: [PATCH 07/13] Add V4L2 request API hevc hwaccel
|
|
|
|
Signed-off-by: Jernej Skrabec <jernej.skrabec@siol.net>
|
|
Signed-off-by: Jonas Karlman <jonas@kwiboo.se>
|
|
Signed-off-by: Benjamin Gaignard <benjamin.gaignard@collabora.com>
|
|
Signed-off-by: Alex Bee <knaerzche@gmail.com>
|
|
---
|
|
configure | 3 +
|
|
libavcodec/Makefile | 1 +
|
|
libavcodec/hevcdec.c | 10 +
|
|
libavcodec/hwaccels.h | 1 +
|
|
libavcodec/v4l2_request_hevc.c | 679 +++++++++++++++++++++++++++++++++
|
|
5 files changed, 694 insertions(+)
|
|
create mode 100644 libavcodec/v4l2_request_hevc.c
|
|
|
|
diff --git a/configure b/configure
|
|
index 794bd7f4d6..2565ce8d7c 100755
|
|
--- a/configure
|
|
+++ b/configure
|
|
@@ -3049,6 +3049,8 @@ hevc_dxva2_hwaccel_deps="dxva2 DXVA_PicParams_HEVC"
|
|
hevc_dxva2_hwaccel_select="hevc_decoder"
|
|
hevc_nvdec_hwaccel_deps="nvdec"
|
|
hevc_nvdec_hwaccel_select="hevc_decoder"
|
|
+hevc_v4l2request_hwaccel_deps="v4l2_request hevc_v4l2_request"
|
|
+hevc_v4l2request_hwaccel_select="hevc_decoder"
|
|
hevc_vaapi_hwaccel_deps="vaapi VAPictureParameterBufferHEVC"
|
|
hevc_vaapi_hwaccel_select="hevc_decoder"
|
|
hevc_vdpau_hwaccel_deps="vdpau VdpPictureInfoHEVC"
|
|
@@ -6894,6 +6896,7 @@ fi
|
|
|
|
check_func_headers "linux/media.h linux/videodev2.h" v4l2_timeval_to_ns
|
|
check_cc h264_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_H264_SLICE;"
|
|
+check_cc hevc_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_HEVC_SLICE;"
|
|
check_cc mpeg2_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG2_SLICE;"
|
|
check_cc vp8_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_VP8_FRAME;"
|
|
|
|
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
|
|
index 7da4fd1a87..cd08740e75 100644
|
|
--- a/libavcodec/Makefile
|
|
+++ b/libavcodec/Makefile
|
|
@@ -998,6 +998,7 @@ OBJS-$(CONFIG_HEVC_D3D11VA_HWACCEL) += dxva2_hevc.o
|
|
OBJS-$(CONFIG_HEVC_DXVA2_HWACCEL) += dxva2_hevc.o
|
|
OBJS-$(CONFIG_HEVC_NVDEC_HWACCEL) += nvdec_hevc.o
|
|
OBJS-$(CONFIG_HEVC_QSV_HWACCEL) += qsvdec.o
|
|
+OBJS-$(CONFIG_HEVC_V4L2REQUEST_HWACCEL) += v4l2_request_hevc.o
|
|
OBJS-$(CONFIG_HEVC_VAAPI_HWACCEL) += vaapi_hevc.o h265_profile_level.o
|
|
OBJS-$(CONFIG_HEVC_VDPAU_HWACCEL) += vdpau_hevc.o h265_profile_level.o
|
|
OBJS-$(CONFIG_MJPEG_NVDEC_HWACCEL) += nvdec_mjpeg.o
|
|
diff --git a/libavcodec/hevcdec.c b/libavcodec/hevcdec.c
|
|
index 567e8d81d4..79b821e7e5 100644
|
|
--- a/libavcodec/hevcdec.c
|
|
+++ b/libavcodec/hevcdec.c
|
|
@@ -403,6 +403,7 @@ static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
|
|
#define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \
|
|
CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \
|
|
CONFIG_HEVC_NVDEC_HWACCEL + \
|
|
+ CONFIG_HEVC_V4L2REQUEST_HWACCEL + \
|
|
CONFIG_HEVC_VAAPI_HWACCEL + \
|
|
CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \
|
|
CONFIG_HEVC_VDPAU_HWACCEL)
|
|
@@ -429,6 +430,9 @@ static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
|
|
#endif
|
|
#if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
|
|
*fmt++ = AV_PIX_FMT_VIDEOTOOLBOX;
|
|
+#endif
|
|
+#if CONFIG_HEVC_V4L2REQUEST_HWACCEL
|
|
+ *fmt++ = AV_PIX_FMT_DRM_PRIME;
|
|
#endif
|
|
break;
|
|
case AV_PIX_FMT_YUV420P10:
|
|
@@ -450,6 +454,9 @@ static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
|
|
#endif
|
|
#if CONFIG_HEVC_NVDEC_HWACCEL
|
|
*fmt++ = AV_PIX_FMT_CUDA;
|
|
+#endif
|
|
+#if CONFIG_HEVC_V4L2REQUEST_HWACCEL
|
|
+ *fmt++ = AV_PIX_FMT_DRM_PRIME;
|
|
#endif
|
|
break;
|
|
case AV_PIX_FMT_YUV444P:
|
|
@@ -3739,6 +3746,9 @@ const FFCodec ff_hevc_decoder = {
|
|
#endif
|
|
#if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL
|
|
HWACCEL_VIDEOTOOLBOX(hevc),
|
|
+#endif
|
|
+#if CONFIG_HEVC_V4L2REQUEST_HWACCEL
|
|
+ HWACCEL_V4L2REQUEST(hevc),
|
|
#endif
|
|
NULL
|
|
},
|
|
diff --git a/libavcodec/hwaccels.h b/libavcodec/hwaccels.h
|
|
index 6f9f078001..e4e4abc060 100644
|
|
--- a/libavcodec/hwaccels.h
|
|
+++ b/libavcodec/hwaccels.h
|
|
@@ -41,6 +41,7 @@ extern const AVHWAccel ff_hevc_d3d11va_hwaccel;
|
|
extern const AVHWAccel ff_hevc_d3d11va2_hwaccel;
|
|
extern const AVHWAccel ff_hevc_dxva2_hwaccel;
|
|
extern const AVHWAccel ff_hevc_nvdec_hwaccel;
|
|
+extern const AVHWAccel ff_hevc_v4l2request_hwaccel;
|
|
extern const AVHWAccel ff_hevc_vaapi_hwaccel;
|
|
extern const AVHWAccel ff_hevc_vdpau_hwaccel;
|
|
extern const AVHWAccel ff_hevc_videotoolbox_hwaccel;
|
|
diff --git a/libavcodec/v4l2_request_hevc.c b/libavcodec/v4l2_request_hevc.c
|
|
new file mode 100644
|
|
index 0000000000..3e2b9a575e
|
|
--- /dev/null
|
|
+++ b/libavcodec/v4l2_request_hevc.c
|
|
@@ -0,0 +1,679 @@
|
|
+/*
|
|
+ * This file is part of FFmpeg.
|
|
+ *
|
|
+ * FFmpeg is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU Lesser General Public
|
|
+ * License as published by the Free Software Foundation; either
|
|
+ * version 2.1 of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * FFmpeg is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+ * Lesser General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU Lesser General Public
|
|
+ * License along with FFmpeg; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
+ */
|
|
+
|
|
+#include "hevcdec.h"
|
|
+#include "hwconfig.h"
|
|
+#include "internal.h"
|
|
+#include "v4l2_request.h"
|
|
+
|
|
+#define MAX_SLICES 600 // as per HEVC spec ?
|
|
+#define V4L2_HEVC_CONTROLS_MAX 6
|
|
+
|
|
+typedef struct V4L2RequestControlsHEVC {
|
|
+ struct v4l2_ctrl_hevc_sps sps;
|
|
+ struct v4l2_ctrl_hevc_pps pps;
|
|
+ struct v4l2_ctrl_hevc_decode_params dec_params;
|
|
+ struct v4l2_ctrl_hevc_scaling_matrix scaling_matrix;
|
|
+ struct v4l2_ctrl_hevc_slice_params slice_params[MAX_SLICES];
|
|
+ __u32 *entry_point_offsets;
|
|
+ unsigned int num_entry_point_offsets;
|
|
+ int first_slice;
|
|
+ int num_slices;
|
|
+} V4L2RequestControlsHEVC;
|
|
+
|
|
+typedef struct V4L2RequestContextHEVC {
|
|
+ V4L2RequestContext base;
|
|
+ unsigned int decode_mode;
|
|
+ unsigned int start_code;
|
|
+ __u32 max_slices;
|
|
+ unsigned int supports_entry_point_offsets;
|
|
+ unsigned int supports_slices;
|
|
+ unsigned int supports_scaling_matrix;
|
|
+} V4L2RequestContextHEVC;
|
|
+
|
|
+static uint8_t nalu_slice_start_code[] = { 0x00, 0x00, 0x01 };
|
|
+
|
|
+static void v4l2_request_hevc_fill_pred_table(const HEVCContext *h, struct v4l2_hevc_pred_weight_table *table)
|
|
+{
|
|
+ int32_t luma_weight_denom, chroma_weight_denom;
|
|
+ const SliceHeader *sh = &h->sh;
|
|
+
|
|
+ if (sh->slice_type == HEVC_SLICE_I ||
|
|
+ (sh->slice_type == HEVC_SLICE_P && !h->ps.pps->weighted_pred_flag) ||
|
|
+ (sh->slice_type == HEVC_SLICE_B && !h->ps.pps->weighted_bipred_flag))
|
|
+ return;
|
|
+
|
|
+ table->luma_log2_weight_denom = sh->luma_log2_weight_denom;
|
|
+
|
|
+ if (h->ps.sps->chroma_format_idc)
|
|
+ table->delta_chroma_log2_weight_denom = sh->chroma_log2_weight_denom - sh->luma_log2_weight_denom;
|
|
+
|
|
+ luma_weight_denom = (1 << sh->luma_log2_weight_denom);
|
|
+ chroma_weight_denom = (1 << sh->chroma_log2_weight_denom);
|
|
+
|
|
+ for (int i = 0; i < 15 && i < sh->nb_refs[L0]; i++) {
|
|
+ table->delta_luma_weight_l0[i] = sh->luma_weight_l0[i] - luma_weight_denom;
|
|
+ table->luma_offset_l0[i] = sh->luma_offset_l0[i];
|
|
+ table->delta_chroma_weight_l0[i][0] = sh->chroma_weight_l0[i][0] - chroma_weight_denom;
|
|
+ table->delta_chroma_weight_l0[i][1] = sh->chroma_weight_l0[i][1] - chroma_weight_denom;
|
|
+ table->chroma_offset_l0[i][0] = sh->chroma_offset_l0[i][0];
|
|
+ table->chroma_offset_l0[i][1] = sh->chroma_offset_l0[i][1];
|
|
+ }
|
|
+
|
|
+ if (sh->slice_type != HEVC_SLICE_B)
|
|
+ return;
|
|
+
|
|
+ for (int i = 0; i < 15 && i < sh->nb_refs[L1]; i++) {
|
|
+ table->delta_luma_weight_l1[i] = sh->luma_weight_l1[i] - luma_weight_denom;
|
|
+ table->luma_offset_l1[i] = sh->luma_offset_l1[i];
|
|
+ table->delta_chroma_weight_l1[i][0] = sh->chroma_weight_l1[i][0] - chroma_weight_denom;
|
|
+ table->delta_chroma_weight_l1[i][1] = sh->chroma_weight_l1[i][1] - chroma_weight_denom;
|
|
+ table->chroma_offset_l1[i][0] = sh->chroma_offset_l1[i][0];
|
|
+ table->chroma_offset_l1[i][1] = sh->chroma_offset_l1[i][1];
|
|
+ }
|
|
+}
|
|
+
|
|
+static uint8_t get_ref_pic_index(const HEVCContext *h, const HEVCFrame *frame,
|
|
+ struct v4l2_ctrl_hevc_decode_params *dec_params)
|
|
+{
|
|
+ uint64_t timestamp;
|
|
+
|
|
+ if (!frame)
|
|
+ return 0;
|
|
+
|
|
+ timestamp = ff_v4l2_request_get_capture_timestamp(frame->frame);
|
|
+
|
|
+ for (uint8_t i = 0; i < dec_params->num_active_dpb_entries; i++) {
|
|
+ struct v4l2_hevc_dpb_entry *entry = &dec_params->dpb[i];
|
|
+ if (entry->timestamp == timestamp)
|
|
+ return i;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void fill_dec_params(struct v4l2_ctrl_hevc_decode_params *dec_params, const HEVCContext *h)
|
|
+{
|
|
+ const HEVCFrame *pic = h->ref;
|
|
+ const SliceHeader *sh = &h->sh;
|
|
+ int i, entries = 0;
|
|
+
|
|
+ *dec_params = (struct v4l2_ctrl_hevc_decode_params) {
|
|
+ .pic_order_cnt_val = pic->poc, /* FIXME: is this same as slice_params->slice_pic_order_cnt ? */
|
|
+ .short_term_ref_pic_set_size = sh->short_term_ref_pic_set_size,
|
|
+ .long_term_ref_pic_set_size = sh->long_term_ref_pic_set_size,
|
|
+ .num_poc_st_curr_before = h->rps[ST_CURR_BEF].nb_refs,
|
|
+ .num_poc_st_curr_after = h->rps[ST_CURR_AFT].nb_refs,
|
|
+ .num_poc_lt_curr = h->rps[LT_CURR].nb_refs,
|
|
+ };
|
|
+
|
|
+ for (i = 0; i < FF_ARRAY_ELEMS(h->DPB); i++) {
|
|
+ const HEVCFrame *frame = &h->DPB[i];
|
|
+ if (frame != pic && (frame->flags & (HEVC_FRAME_FLAG_LONG_REF | HEVC_FRAME_FLAG_SHORT_REF))) {
|
|
+ struct v4l2_hevc_dpb_entry *entry = &dec_params->dpb[entries++];
|
|
+
|
|
+ entry->timestamp = ff_v4l2_request_get_capture_timestamp(frame->frame);
|
|
+ entry->field_pic = frame->frame->interlaced_frame;
|
|
+ entry->flags = 0;
|
|
+ if (frame->flags & HEVC_FRAME_FLAG_LONG_REF)
|
|
+ entry->flags |= V4L2_HEVC_DPB_ENTRY_LONG_TERM_REFERENCE;
|
|
+
|
|
+ entry->pic_order_cnt_val = frame->poc;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ dec_params->num_active_dpb_entries = entries;
|
|
+
|
|
+ if (IS_IRAP(h))
|
|
+ dec_params->flags |= V4L2_HEVC_DECODE_PARAM_FLAG_IRAP_PIC;
|
|
+
|
|
+ if (IS_IDR(h))
|
|
+ dec_params->flags |= V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC;
|
|
+
|
|
+ if (sh->no_output_of_prior_pics_flag)
|
|
+ dec_params->flags |= V4L2_HEVC_DECODE_PARAM_FLAG_NO_OUTPUT_OF_PRIOR;
|
|
+
|
|
+ for (i = 0; i < V4L2_HEVC_DPB_ENTRIES_NUM_MAX; i++) {
|
|
+ dec_params->poc_st_curr_before[i] = get_ref_pic_index(h, h->rps[ST_CURR_BEF].ref[i], dec_params);
|
|
+ dec_params->poc_st_curr_after[i] = get_ref_pic_index(h, h->rps[ST_CURR_AFT].ref[i], dec_params);
|
|
+ dec_params->poc_lt_curr[i] = get_ref_pic_index(h, h->rps[LT_CURR].ref[i], dec_params);
|
|
+ }
|
|
+}
|
|
+
|
|
+static int v4l2_request_hevc_fill_slice_params(const HEVCContext *h,
|
|
+ V4L2RequestControlsHEVC *controls,
|
|
+ int slice)
|
|
+{
|
|
+ struct v4l2_ctrl_hevc_slice_params *slice_params = &controls->slice_params[slice];
|
|
+ struct v4l2_ctrl_hevc_decode_params *dec_params = &controls->dec_params;
|
|
+ const HEVCFrame *pic = h->ref;
|
|
+ const SliceHeader *sh = &h->sh;
|
|
+ RefPicList *rpl;
|
|
+ int i;
|
|
+
|
|
+ *slice_params = (struct v4l2_ctrl_hevc_slice_params) {
|
|
+ .bit_size = 0,
|
|
+ .data_byte_offset = (get_bits_count(&h->HEVClc->gb) + 1 + 7) / 8,
|
|
+ .num_entry_point_offsets = sh->num_entry_point_offsets,
|
|
+
|
|
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: NAL unit header */
|
|
+ .nal_unit_type = h->nal_unit_type,
|
|
+ .nuh_temporal_id_plus1 = h->temporal_id + 1,
|
|
+
|
|
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
|
|
+ .slice_type = sh->slice_type,
|
|
+ .colour_plane_id = sh->colour_plane_id,
|
|
+ .slice_pic_order_cnt = pic->poc,
|
|
+ .num_ref_idx_l0_active_minus1 = sh->nb_refs[L0] ? sh->nb_refs[L0] - 1 : 0,
|
|
+ .num_ref_idx_l1_active_minus1 = sh->nb_refs[L1] ? sh->nb_refs[L1] - 1 : 0,
|
|
+ .collocated_ref_idx = sh->slice_temporal_mvp_enabled_flag ? sh->collocated_ref_idx : 0,
|
|
+ .five_minus_max_num_merge_cand = sh->slice_type == HEVC_SLICE_I ? 0 : 5 - sh->max_num_merge_cand,
|
|
+ .slice_qp_delta = sh->slice_qp_delta,
|
|
+ .slice_cb_qp_offset = sh->slice_cb_qp_offset,
|
|
+ .slice_cr_qp_offset = sh->slice_cr_qp_offset,
|
|
+ .slice_act_y_qp_offset = 0,
|
|
+ .slice_act_cb_qp_offset = 0,
|
|
+ .slice_act_cr_qp_offset = 0,
|
|
+ .slice_beta_offset_div2 = sh->beta_offset / 2,
|
|
+ .slice_tc_offset_div2 = sh->tc_offset / 2,
|
|
+
|
|
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture timing SEI message */
|
|
+ .pic_struct = h->sei.picture_timing.picture_struct,
|
|
+
|
|
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
|
|
+ .slice_segment_addr = sh->slice_segment_addr,
|
|
+ .short_term_ref_pic_set_size = sh->short_term_ref_pic_set_size,
|
|
+ .long_term_ref_pic_set_size = sh->long_term_ref_pic_set_size,
|
|
+ };
|
|
+
|
|
+ if (sh->slice_sample_adaptive_offset_flag[0])
|
|
+ slice_params->flags |= V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA;
|
|
+
|
|
+ if (sh->slice_sample_adaptive_offset_flag[1])
|
|
+ slice_params->flags |= V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA;
|
|
+
|
|
+ if (sh->slice_temporal_mvp_enabled_flag)
|
|
+ slice_params->flags |= V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED;
|
|
+
|
|
+ if (sh->mvd_l1_zero_flag)
|
|
+ slice_params->flags |= V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO;
|
|
+
|
|
+ if (sh->cabac_init_flag)
|
|
+ slice_params->flags |= V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT;
|
|
+
|
|
+ if (sh->collocated_list == L0)
|
|
+ slice_params->flags |= V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0;
|
|
+
|
|
+ /* TODO: V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV */
|
|
+
|
|
+ if (sh->disable_deblocking_filter_flag)
|
|
+ slice_params->flags |= V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED;
|
|
+
|
|
+ if (sh->slice_loop_filter_across_slices_enabled_flag)
|
|
+ slice_params->flags |= V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED;
|
|
+
|
|
+ if (sh->dependent_slice_segment_flag)
|
|
+ slice_params->flags |= V4L2_HEVC_SLICE_PARAMS_FLAG_DEPENDENT_SLICE_SEGMENT;
|
|
+
|
|
+ if (sh->slice_type != HEVC_SLICE_I) {
|
|
+ rpl = &h->ref->refPicList[0];
|
|
+ for (i = 0; i < rpl->nb_refs; i++)
|
|
+ slice_params->ref_idx_l0[i] = get_ref_pic_index(h, rpl->ref[i], dec_params);
|
|
+ }
|
|
+
|
|
+ if (sh->slice_type == HEVC_SLICE_B) {
|
|
+ rpl = &h->ref->refPicList[1];
|
|
+ for (i = 0; i < rpl->nb_refs; i++)
|
|
+ slice_params->ref_idx_l1[i] = get_ref_pic_index(h, rpl->ref[i], dec_params);
|
|
+ }
|
|
+
|
|
+ v4l2_request_hevc_fill_pred_table(h, &slice_params->pred_weight_table);
|
|
+
|
|
+ if (controls->num_entry_point_offsets < sh->num_entry_point_offsets) {
|
|
+ av_freep(&controls->entry_point_offsets);
|
|
+ controls->entry_point_offsets = av_mallocz(sizeof(*controls->entry_point_offsets) * sh->num_entry_point_offsets);
|
|
+ if (!controls->entry_point_offsets)
|
|
+ return AVERROR(ENOMEM);
|
|
+ controls->num_entry_point_offsets = sh->num_entry_point_offsets;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < sh->num_entry_point_offsets; i++)
|
|
+ controls->entry_point_offsets[i] = sh->entry_point_offset[i];
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void fill_sps(struct v4l2_ctrl_hevc_sps *ctrl, const HEVCContext *h)
|
|
+{
|
|
+ const HEVCSPS *sps = h->ps.sps;
|
|
+ const HEVCPPS *pps = h->ps.pps;
|
|
+
|
|
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Sequence parameter set */
|
|
+ *ctrl = (struct v4l2_ctrl_hevc_sps) {
|
|
+ .video_parameter_set_id = sps->vps_id,
|
|
+ .seq_parameter_set_id = pps->sps_id,
|
|
+ .pic_width_in_luma_samples = sps->width,
|
|
+ .pic_height_in_luma_samples = sps->height,
|
|
+ .bit_depth_luma_minus8 = sps->bit_depth - 8,
|
|
+ .bit_depth_chroma_minus8 = sps->bit_depth_chroma - 8,
|
|
+ .log2_max_pic_order_cnt_lsb_minus4 = sps->log2_max_poc_lsb - 4,
|
|
+ .sps_max_dec_pic_buffering_minus1 = sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering - 1,
|
|
+ .sps_max_num_reorder_pics = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics,
|
|
+ .sps_max_latency_increase_plus1 = sps->temporal_layer[sps->max_sub_layers - 1].max_latency_increase + 1,
|
|
+ .log2_min_luma_coding_block_size_minus3 = sps->log2_min_cb_size - 3,
|
|
+ .log2_diff_max_min_luma_coding_block_size = sps->log2_diff_max_min_coding_block_size,
|
|
+ .log2_min_luma_transform_block_size_minus2 = sps->log2_min_tb_size - 2,
|
|
+ .log2_diff_max_min_luma_transform_block_size = sps->log2_max_trafo_size - sps->log2_min_tb_size,
|
|
+ .max_transform_hierarchy_depth_inter = sps->max_transform_hierarchy_depth_inter,
|
|
+ .max_transform_hierarchy_depth_intra = sps->max_transform_hierarchy_depth_intra,
|
|
+ .pcm_sample_bit_depth_luma_minus1 = sps->pcm.bit_depth - 1,
|
|
+ .pcm_sample_bit_depth_chroma_minus1 = sps->pcm.bit_depth_chroma - 1,
|
|
+ .log2_min_pcm_luma_coding_block_size_minus3 = sps->pcm.log2_min_pcm_cb_size - 3,
|
|
+ .log2_diff_max_min_pcm_luma_coding_block_size = sps->pcm.log2_max_pcm_cb_size - sps->pcm.log2_min_pcm_cb_size,
|
|
+ .num_short_term_ref_pic_sets = sps->nb_st_rps,
|
|
+ .num_long_term_ref_pics_sps = sps->num_long_term_ref_pics_sps,
|
|
+ .chroma_format_idc = sps->chroma_format_idc,
|
|
+ .sps_max_sub_layers_minus1 = sps->max_sub_layers - 1,
|
|
+ };
|
|
+
|
|
+ if (sps->separate_colour_plane_flag)
|
|
+ ctrl->flags |= V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE;
|
|
+
|
|
+ if (sps->scaling_list_enable_flag)
|
|
+ ctrl->flags |= V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED;
|
|
+
|
|
+ if (sps->amp_enabled_flag)
|
|
+ ctrl->flags |= V4L2_HEVC_SPS_FLAG_AMP_ENABLED;
|
|
+
|
|
+ if (sps->sao_enabled)
|
|
+ ctrl->flags |= V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET;
|
|
+
|
|
+ if (sps->pcm_enabled_flag)
|
|
+ ctrl->flags |= V4L2_HEVC_SPS_FLAG_PCM_ENABLED;
|
|
+
|
|
+ if (sps->pcm.loop_filter_disable_flag)
|
|
+ ctrl->flags |= V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED;
|
|
+
|
|
+ if (sps->long_term_ref_pics_present_flag)
|
|
+ ctrl->flags |= V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT;
|
|
+
|
|
+ if (sps->sps_temporal_mvp_enabled_flag)
|
|
+ ctrl->flags |= V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED;
|
|
+
|
|
+ if (sps->sps_strong_intra_smoothing_enable_flag)
|
|
+ ctrl->flags |= V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED;
|
|
+}
|
|
+
|
|
+static int v4l2_request_hevc_start_frame(AVCodecContext *avctx,
|
|
+ av_unused const uint8_t *buffer,
|
|
+ av_unused uint32_t size)
|
|
+{
|
|
+ const HEVCContext *h = avctx->priv_data;
|
|
+ const HEVCPPS *pps = h->ps.pps;
|
|
+ const HEVCSPS *sps = h->ps.sps;
|
|
+ const ScalingList *sl = pps->scaling_list_data_present_flag ?
|
|
+ &pps->scaling_list :
|
|
+ sps->scaling_list_enable_flag ?
|
|
+ &sps->scaling_list : NULL;
|
|
+ V4L2RequestControlsHEVC *controls = h->ref->hwaccel_picture_private;
|
|
+ const SliceHeader *sh = &h->sh;
|
|
+
|
|
+ fill_sps(&controls->sps, h);
|
|
+ fill_dec_params(&controls->dec_params, h);
|
|
+
|
|
+ if (sl) {
|
|
+ for (int i = 0; i < 6; i++) {
|
|
+ for (int j = 0; j < 16; j++)
|
|
+ controls->scaling_matrix.scaling_list_4x4[i][j] = sl->sl[0][i][j];
|
|
+ for (int j = 0; j < 64; j++) {
|
|
+ controls->scaling_matrix.scaling_list_8x8[i][j] = sl->sl[1][i][j];
|
|
+ controls->scaling_matrix.scaling_list_16x16[i][j] = sl->sl[2][i][j];
|
|
+ if (i < 2)
|
|
+ controls->scaling_matrix.scaling_list_32x32[i][j] = sl->sl[3][i * 3][j];
|
|
+ }
|
|
+ controls->scaling_matrix.scaling_list_dc_coef_16x16[i] = sl->sl_dc[0][i];
|
|
+ if (i < 2)
|
|
+ controls->scaling_matrix.scaling_list_dc_coef_32x32[i] = sl->sl_dc[1][i * 3];
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture parameter set */
|
|
+ controls->pps = (struct v4l2_ctrl_hevc_pps) {
|
|
+ .pic_parameter_set_id = sh->pps_id,
|
|
+ .num_extra_slice_header_bits = pps->num_extra_slice_header_bits,
|
|
+ .num_ref_idx_l0_default_active_minus1 = pps->num_ref_idx_l0_default_active - 1,
|
|
+ .num_ref_idx_l1_default_active_minus1 = pps->num_ref_idx_l1_default_active - 1,
|
|
+ .init_qp_minus26 = pps->pic_init_qp_minus26,
|
|
+ .diff_cu_qp_delta_depth = pps->diff_cu_qp_delta_depth,
|
|
+ .pps_cb_qp_offset = pps->cb_qp_offset,
|
|
+ .pps_cr_qp_offset = pps->cr_qp_offset,
|
|
+ .pps_beta_offset_div2 = pps->beta_offset / 2,
|
|
+ .pps_tc_offset_div2 = pps->tc_offset / 2,
|
|
+ .log2_parallel_merge_level_minus2 = pps->log2_parallel_merge_level - 2,
|
|
+ };
|
|
+
|
|
+ if (pps->dependent_slice_segments_enabled_flag)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED;
|
|
+
|
|
+ if (pps->output_flag_present_flag)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT;
|
|
+
|
|
+ if (pps->sign_data_hiding_flag)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED;
|
|
+
|
|
+ if (pps->cabac_init_present_flag)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT;
|
|
+
|
|
+ if (pps->constrained_intra_pred_flag)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED;
|
|
+
|
|
+ if (pps->transform_skip_enabled_flag)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED;
|
|
+
|
|
+ if (pps->cu_qp_delta_enabled_flag)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED;
|
|
+
|
|
+ if (pps->pic_slice_level_chroma_qp_offsets_present_flag)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT;
|
|
+
|
|
+ if (pps->weighted_pred_flag)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED;
|
|
+
|
|
+ if (pps->weighted_bipred_flag)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED;
|
|
+
|
|
+ if (pps->transquant_bypass_enable_flag)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED;
|
|
+
|
|
+ if (pps->tiles_enabled_flag)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_TILES_ENABLED;
|
|
+
|
|
+ if (pps->entropy_coding_sync_enabled_flag)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED;
|
|
+
|
|
+ if (pps->loop_filter_across_tiles_enabled_flag)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED;
|
|
+
|
|
+ if (pps->seq_loop_filter_across_slices_enabled_flag)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED;
|
|
+
|
|
+ if (pps->deblocking_filter_override_enabled_flag)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED;
|
|
+
|
|
+ if (pps->disable_dbf)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER;
|
|
+
|
|
+ if (pps->lists_modification_present_flag)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT;
|
|
+
|
|
+ if (pps->slice_header_extension_present_flag)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT;
|
|
+
|
|
+ if (pps->deblocking_filter_control_present_flag)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT;
|
|
+
|
|
+ if (pps->uniform_spacing_flag)
|
|
+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_UNIFORM_SPACING;
|
|
+
|
|
+ if (pps->tiles_enabled_flag) {
|
|
+ controls->pps.num_tile_columns_minus1 = pps->num_tile_columns - 1;
|
|
+ controls->pps.num_tile_rows_minus1 = pps->num_tile_rows - 1;
|
|
+
|
|
+ for (int i = 0; i < pps->num_tile_columns; i++)
|
|
+ controls->pps.column_width_minus1[i] = pps->column_width[i] - 1;
|
|
+
|
|
+ for (int i = 0; i < pps->num_tile_rows; i++)
|
|
+ controls->pps.row_height_minus1[i] = pps->row_height[i] - 1;
|
|
+ }
|
|
+
|
|
+ controls->first_slice = 1;
|
|
+ controls->num_slices = 0;
|
|
+ controls->num_entry_point_offsets = 0;
|
|
+
|
|
+ return ff_v4l2_request_reset_frame(avctx, h->ref->frame);
|
|
+}
|
|
+
|
|
+static int v4l2_request_hevc_queue_decode(AVCodecContext *avctx, int last_slice)
|
|
+{
|
|
+ const HEVCContext *h = avctx->priv_data;
|
|
+ V4L2RequestControlsHEVC *controls = h->ref->hwaccel_picture_private;
|
|
+ struct v4l2_ctrl_hevc_slice_params *first_slice_params = &controls->slice_params[0];
|
|
+ V4L2RequestContextHEVC *ctx = avctx->internal->hwaccel_priv_data;
|
|
+ int num_controls = 0;
|
|
+
|
|
+ struct v4l2_ext_control control[V4L2_HEVC_CONTROLS_MAX] = {};
|
|
+
|
|
+ control[num_controls++] = (struct v4l2_ext_control) {
|
|
+ .id = V4L2_CID_STATELESS_HEVC_SPS,
|
|
+ .ptr = &controls->sps,
|
|
+ .size = sizeof(controls->sps),
|
|
+ };
|
|
+
|
|
+ control[num_controls++] = (struct v4l2_ext_control) {
|
|
+ .id = V4L2_CID_STATELESS_HEVC_PPS,
|
|
+ .ptr = &controls->pps,
|
|
+ .size = sizeof(controls->pps),
|
|
+ };
|
|
+
|
|
+ control[num_controls++] = (struct v4l2_ext_control) {
|
|
+ .id = V4L2_CID_STATELESS_HEVC_DECODE_PARAMS,
|
|
+ .ptr = &controls->dec_params,
|
|
+ .size = sizeof(controls->dec_params),
|
|
+ };
|
|
+
|
|
+ if (ctx->supports_scaling_matrix)
|
|
+ control[num_controls++] = (struct v4l2_ext_control) {
|
|
+ .id = V4L2_CID_STATELESS_HEVC_SCALING_MATRIX,
|
|
+ .ptr = &controls->scaling_matrix,
|
|
+ .size = sizeof(controls->scaling_matrix),
|
|
+ };
|
|
+
|
|
+ if (ctx->supports_slices)
|
|
+ control[num_controls++] = (struct v4l2_ext_control) {
|
|
+ .id = V4L2_CID_STATELESS_HEVC_SLICE_PARAMS,
|
|
+ .ptr = &controls->slice_params,
|
|
+ .size = sizeof(*first_slice_params) * controls->num_slices,
|
|
+ };
|
|
+
|
|
+ //this assumes that decoders supporting entry_point_offsets submit a single slice per request
|
|
+ if (ctx->supports_entry_point_offsets && first_slice_params->num_entry_point_offsets > 0)
|
|
+ control[num_controls++] = (struct v4l2_ext_control) {
|
|
+ .id = V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS,
|
|
+ .ptr = controls->entry_point_offsets,
|
|
+ .size = sizeof(*controls->entry_point_offsets) * first_slice_params->num_entry_point_offsets,
|
|
+ };
|
|
+
|
|
+ if (ctx->decode_mode == V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED)
|
|
+ return ff_v4l2_request_decode_slice(avctx, h->ref->frame, control, num_controls, controls->first_slice, last_slice);
|
|
+
|
|
+ return ff_v4l2_request_decode_frame(avctx, h->ref->frame, control, num_controls);
|
|
+}
|
|
+
|
|
+static int v4l2_request_hevc_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
|
|
+{
|
|
+ const HEVCContext *h = avctx->priv_data;
|
|
+ V4L2RequestControlsHEVC *controls = h->ref->hwaccel_picture_private;
|
|
+ V4L2RequestContextHEVC *ctx = avctx->internal->hwaccel_priv_data;
|
|
+ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)h->ref->frame->data[0];
|
|
+ int ret, slice = FFMIN(controls->num_slices, MAX_SLICES - 1);
|
|
+
|
|
+ if (ctx->decode_mode == V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED && slice) {
|
|
+ ret = v4l2_request_hevc_queue_decode(avctx, 0);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ ff_v4l2_request_reset_frame(avctx, h->ref->frame);
|
|
+ slice = controls->num_slices = 0;
|
|
+ controls->first_slice = 0;
|
|
+ }
|
|
+
|
|
+ ret = v4l2_request_hevc_fill_slice_params(h, controls, slice);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ if (ctx->start_code == V4L2_STATELESS_HEVC_START_CODE_ANNEX_B) {
|
|
+ ret = ff_v4l2_request_append_output_buffer(avctx, h->ref->frame, nalu_slice_start_code, 3);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = ff_v4l2_request_append_output_buffer(avctx, h->ref->frame, buffer, size);
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ controls->slice_params[slice].bit_size = req->output.used * 8; //FIXME
|
|
+ controls->num_slices++;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int v4l2_request_hevc_end_frame(AVCodecContext *avctx)
|
|
+{
|
|
+ const HEVCContext *h = avctx->priv_data;
|
|
+ V4L2RequestControlsHEVC *controls = h->ref->hwaccel_picture_private;
|
|
+ int ret;
|
|
+
|
|
+ ret = v4l2_request_hevc_queue_decode(avctx, 1);
|
|
+
|
|
+ av_freep(&controls->entry_point_offsets);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int v4l2_request_hevc_set_controls(AVCodecContext *avctx)
|
|
+{
|
|
+ V4L2RequestContextHEVC *ctx = avctx->internal->hwaccel_priv_data;
|
|
+ int ret;
|
|
+
|
|
+ struct v4l2_ext_control control[] = {
|
|
+ { .id = V4L2_CID_STATELESS_HEVC_DECODE_MODE, },
|
|
+ { .id = V4L2_CID_STATELESS_HEVC_START_CODE, },
|
|
+ };
|
|
+ struct v4l2_query_ext_ctrl entry_point_offsets = {
|
|
+ .id = V4L2_CID_STATELESS_HEVC_ENTRY_POINT_OFFSETS,
|
|
+ };
|
|
+ struct v4l2_query_ext_ctrl slice_params = {
|
|
+ .id = V4L2_CID_STATELESS_HEVC_SLICE_PARAMS,
|
|
+ };
|
|
+ struct v4l2_query_ext_ctrl scaling_matrix = {
|
|
+ .id = V4L2_CID_STATELESS_HEVC_SCALING_MATRIX,
|
|
+ };
|
|
+
|
|
+ ctx->decode_mode = ff_v4l2_request_query_control_default_value(avctx, V4L2_CID_STATELESS_HEVC_DECODE_MODE);
|
|
+ if (ctx->decode_mode != V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED &&
|
|
+ ctx->decode_mode != V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: unsupported decode mode, %d\n", __func__, ctx->decode_mode);
|
|
+ return AVERROR(EINVAL);
|
|
+ }
|
|
+
|
|
+ ctx->start_code = ff_v4l2_request_query_control_default_value(avctx, V4L2_CID_STATELESS_HEVC_START_CODE);
|
|
+ if (ctx->start_code != V4L2_STATELESS_HEVC_START_CODE_NONE &&
|
|
+ ctx->start_code != V4L2_STATELESS_HEVC_START_CODE_ANNEX_B) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: unsupported start code, %d\n", __func__, ctx->start_code);
|
|
+ return AVERROR(EINVAL);
|
|
+ }
|
|
+
|
|
+ ret = ff_v4l2_request_query_control(avctx, &entry_point_offsets);
|
|
+ if (ret) {
|
|
+ ctx->supports_entry_point_offsets = 0;
|
|
+ } else {
|
|
+ ctx->supports_entry_point_offsets = 1;
|
|
+ }
|
|
+
|
|
+ ret = ff_v4l2_request_query_control(avctx, &slice_params);
|
|
+ if (ret) {
|
|
+ ctx->supports_slices = 0;
|
|
+ ctx->max_slices = 0;
|
|
+ if (ctx->decode_mode == V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: decoder is slice-based, \
|
|
+ but doesn't support V4L2_CID_STATELESS_HEVC_SLICE_PARAMS control \n", __func__);
|
|
+ return AVERROR(EINVAL);
|
|
+ }
|
|
+
|
|
+ if (ctx->supports_entry_point_offsets) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: decoder supports entry_point_offsets, \
|
|
+ but doesn't support V4L2_CID_STATELESS_HEVC_SLICE_PARAMS control \n", __func__);
|
|
+ return AVERROR(EINVAL);
|
|
+ }
|
|
+ } else {
|
|
+ ctx->supports_slices = 1;
|
|
+ ctx->max_slices = slice_params.dims[0];
|
|
+ if (ctx->max_slices > MAX_SLICES) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "%s: unsupported max slices, %u\n", __func__, ctx->max_slices);
|
|
+ return AVERROR(EINVAL);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ ret = ff_v4l2_request_query_control(avctx, &scaling_matrix);
|
|
+ if (ret)
|
|
+ ctx->supports_scaling_matrix = 0;
|
|
+ else
|
|
+ ctx->supports_scaling_matrix = 1;
|
|
+
|
|
+ av_log(avctx, AV_LOG_DEBUG, "%s: decoder is %s and supports slices %d, supports entry_point_offsets: %d supports scaling_matrix: %d max slices: %u\n",
|
|
+ __func__,
|
|
+ ctx->decode_mode == V4L2_STATELESS_HEVC_DECODE_MODE_SLICE_BASED ? "slice based" : "frame based",
|
|
+ ctx->supports_slices,
|
|
+ ctx->supports_entry_point_offsets,
|
|
+ ctx->supports_scaling_matrix,
|
|
+ ctx->max_slices
|
|
+ );
|
|
+
|
|
+ control[0].value = ctx->decode_mode;
|
|
+ control[1].value = ctx->start_code;
|
|
+
|
|
+ return ff_v4l2_request_set_controls(avctx, control, FF_ARRAY_ELEMS(control));
|
|
+}
|
|
+
|
|
+static int v4l2_request_hevc_init(AVCodecContext *avctx)
|
|
+{
|
|
+ const HEVCContext *h = avctx->priv_data;
|
|
+ struct v4l2_ctrl_hevc_sps sps;
|
|
+ int ret;
|
|
+
|
|
+ struct v4l2_ext_control control[] = {
|
|
+ {
|
|
+ .id = V4L2_CID_STATELESS_HEVC_SPS,
|
|
+ .ptr = &sps,
|
|
+ .size = sizeof(sps),
|
|
+ },
|
|
+ };
|
|
+
|
|
+ fill_sps(&sps, h);
|
|
+
|
|
+ ret = ff_v4l2_request_init(avctx, V4L2_PIX_FMT_HEVC_SLICE, 4 * 1024 * 1024, control, FF_ARRAY_ELEMS(control));
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ return v4l2_request_hevc_set_controls(avctx);
|
|
+}
|
|
+
|
|
+const AVHWAccel ff_hevc_v4l2request_hwaccel = {
|
|
+ .name = "hevc_v4l2request",
|
|
+ .type = AVMEDIA_TYPE_VIDEO,
|
|
+ .id = AV_CODEC_ID_HEVC,
|
|
+ .pix_fmt = AV_PIX_FMT_DRM_PRIME,
|
|
+ .start_frame = v4l2_request_hevc_start_frame,
|
|
+ .decode_slice = v4l2_request_hevc_decode_slice,
|
|
+ .end_frame = v4l2_request_hevc_end_frame,
|
|
+ .frame_priv_data_size = sizeof(V4L2RequestControlsHEVC),
|
|
+ .init = v4l2_request_hevc_init,
|
|
+ .uninit = ff_v4l2_request_uninit,
|
|
+ .priv_data_size = sizeof(V4L2RequestContextHEVC),
|
|
+ .frame_params = ff_v4l2_request_frame_params,
|
|
+ .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
|
|
+};
|
|
|
|
From 760b6f7d4662d5c5d3bc6292a53a40e8fc2fc18d Mon Sep 17 00:00:00 2001
|
|
From: Boris Brezillon <boris.brezillon@collabora.com>
|
|
Date: Thu, 12 Dec 2019 16:13:55 +0100
|
|
Subject: [PATCH 08/13] Add V4L2 request API VP9 hwaccel
|
|
|
|
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
|
|
Signed-off-by: Jernej Skrabec <jernej.skrabec@gmail.com>
|
|
---
|
|
configure | 3 +
|
|
libavcodec/Makefile | 1 +
|
|
libavcodec/hwaccels.h | 1 +
|
|
libavcodec/v4l2_request_vp9.c | 282 ++++++++++++++++++++++++++++++++++
|
|
libavcodec/vp9.c | 192 ++++++++++++++++-------
|
|
libavcodec/vp9dec.h | 4 +
|
|
libavcodec/vp9shared.h | 1 +
|
|
7 files changed, 429 insertions(+), 55 deletions(-)
|
|
create mode 100644 libavcodec/v4l2_request_vp9.c
|
|
|
|
diff --git a/configure b/configure
|
|
index 2565ce8d7c..081174babb 100755
|
|
--- a/configure
|
|
+++ b/configure
|
|
@@ -3119,6 +3119,8 @@ vp9_dxva2_hwaccel_deps="dxva2 DXVA_PicParams_VP9"
|
|
vp9_dxva2_hwaccel_select="vp9_decoder"
|
|
vp9_nvdec_hwaccel_deps="nvdec"
|
|
vp9_nvdec_hwaccel_select="vp9_decoder"
|
|
+vp9_v4l2request_hwaccel_deps="v4l2_request"
|
|
+vp9_v4l2request_hwaccel_select="vp9_decoder"
|
|
vp9_vaapi_hwaccel_deps="vaapi VADecPictureParameterBufferVP9_bit_depth"
|
|
vp9_vaapi_hwaccel_select="vp9_decoder"
|
|
vp9_vdpau_hwaccel_deps="vdpau VdpPictureInfoVP9"
|
|
@@ -6899,6 +6901,7 @@ check_cc h264_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_H264_SLICE;"
|
|
check_cc hevc_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_HEVC_SLICE;"
|
|
check_cc mpeg2_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG2_SLICE;"
|
|
check_cc vp8_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_VP8_FRAME;"
|
|
+check_cc vp9_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_VP9_FRAME;"
|
|
|
|
check_headers sys/videoio.h
|
|
test_code cc sys/videoio.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width = 0;" && enable_sanitized struct_v4l2_frmivalenum_discrete
|
|
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
|
|
index cd08740e75..e2c957763a 100644
|
|
--- a/libavcodec/Makefile
|
|
+++ b/libavcodec/Makefile
|
|
@@ -1030,6 +1030,7 @@ OBJS-$(CONFIG_VP8_VAAPI_HWACCEL) += vaapi_vp8.o
|
|
OBJS-$(CONFIG_VP9_D3D11VA_HWACCEL) += dxva2_vp9.o
|
|
OBJS-$(CONFIG_VP9_DXVA2_HWACCEL) += dxva2_vp9.o
|
|
OBJS-$(CONFIG_VP9_NVDEC_HWACCEL) += nvdec_vp9.o
|
|
+OBJS-$(CONFIG_VP9_V4L2REQUEST_HWACCEL) += v4l2_request_vp9.o
|
|
OBJS-$(CONFIG_VP9_VAAPI_HWACCEL) += vaapi_vp9.o
|
|
OBJS-$(CONFIG_VP9_VDPAU_HWACCEL) += vdpau_vp9.o
|
|
OBJS-$(CONFIG_VP9_VIDEOTOOLBOX_HWACCEL) += videotoolbox_vp9.o
|
|
diff --git a/libavcodec/hwaccels.h b/libavcodec/hwaccels.h
|
|
index e4e4abc060..53f4f61fc5 100644
|
|
--- a/libavcodec/hwaccels.h
|
|
+++ b/libavcodec/hwaccels.h
|
|
@@ -76,6 +76,7 @@ extern const AVHWAccel ff_vp9_d3d11va_hwaccel;
|
|
extern const AVHWAccel ff_vp9_d3d11va2_hwaccel;
|
|
extern const AVHWAccel ff_vp9_dxva2_hwaccel;
|
|
extern const AVHWAccel ff_vp9_nvdec_hwaccel;
|
|
+extern const AVHWAccel ff_vp9_v4l2request_hwaccel;
|
|
extern const AVHWAccel ff_vp9_vaapi_hwaccel;
|
|
extern const AVHWAccel ff_vp9_vdpau_hwaccel;
|
|
extern const AVHWAccel ff_vp9_videotoolbox_hwaccel;
|
|
diff --git a/libavcodec/v4l2_request_vp9.c b/libavcodec/v4l2_request_vp9.c
|
|
new file mode 100644
|
|
index 0000000000..ec0300f66d
|
|
--- /dev/null
|
|
+++ b/libavcodec/v4l2_request_vp9.c
|
|
@@ -0,0 +1,282 @@
|
|
+/*
|
|
+ * This file is part of FFmpeg.
|
|
+ *
|
|
+ * FFmpeg is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU Lesser General Public
|
|
+ * License as published by the Free Software Foundation; either
|
|
+ * version 2.1 of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * FFmpeg is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+ * Lesser General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU Lesser General Public
|
|
+ * License along with FFmpeg; if not, write to the Free Software
|
|
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
+ */
|
|
+
|
|
+#include "hwconfig.h"
|
|
+#include "v4l2_request.h"
|
|
+#include "vp9dec.h"
|
|
+
|
|
+typedef struct V4L2RequestControlsVP9 {
|
|
+ struct v4l2_ctrl_vp9_frame decode_params;
|
|
+ struct v4l2_ctrl_vp9_compressed_hdr chp;
|
|
+} V4L2RequestControlsVP9;
|
|
+
|
|
+static void v4l2_request_vp9_set_frame_ctx(AVCodecContext *avctx)
|
|
+{
|
|
+ VP9Context *s = avctx->priv_data;
|
|
+ const VP9Frame *f = &s->s.frames[CUR_FRAME];
|
|
+ V4L2RequestControlsVP9 *controls = f->hwaccel_picture_private;
|
|
+ struct v4l2_ctrl_vp9_compressed_hdr *chp = &controls->chp;
|
|
+
|
|
+ memset(chp, 0, sizeof(&chp));
|
|
+
|
|
+ chp->tx_mode = s->s.h.txfmmode;
|
|
+ memcpy(chp->tx8, s->prob_raw.p.tx8p, sizeof(s->prob_raw.p.tx8p));
|
|
+ memcpy(chp->tx16, s->prob_raw.p.tx16p, sizeof(s->prob_raw.p.tx16p));
|
|
+ memcpy(chp->tx32, s->prob_raw.p.tx32p, sizeof(s->prob_raw.p.tx32p));
|
|
+ for (unsigned i = 0; i < 4; i++) {
|
|
+ for (unsigned j = 0; j < 2; j++) {
|
|
+ for (unsigned k = 0; k < 2; k++) {
|
|
+ for (unsigned l = 0; l < 6; l++) {
|
|
+ for (unsigned m = 0; m < 6; m++) {
|
|
+ memcpy(chp->coef[i][j][k][l][m], s->prob_raw.coef[i][j][k][l][m], sizeof(chp->coef[0][0][0][0][0]));
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ memcpy(chp->skip, s->prob_raw.p.skip, sizeof(s->prob_raw.p.skip));
|
|
+ memcpy(chp->inter_mode, s->prob_raw.p.mv_mode, sizeof(s->prob_raw.p.mv_mode));
|
|
+ memcpy(chp->interp_filter, s->prob_raw.p.filter, sizeof(s->prob_raw.p.filter));
|
|
+ memcpy(chp->is_inter, s->prob_raw.p.intra, sizeof(s->prob_raw.p.intra));
|
|
+ memcpy(chp->comp_mode, s->prob_raw.p.comp, sizeof(s->prob_raw.p.comp));
|
|
+ memcpy(chp->single_ref, s->prob_raw.p.single_ref, sizeof(s->prob_raw.p.single_ref));
|
|
+ memcpy(chp->comp_ref, s->prob_raw.p.comp_ref, sizeof(s->prob_raw.p.comp_ref));
|
|
+ memcpy(chp->y_mode, s->prob_raw.p.y_mode, sizeof(s->prob_raw.p.y_mode));
|
|
+ for (unsigned i = 0; i < 10; i++)
|
|
+ memcpy(chp->uv_mode[i], s->prob.p.uv_mode[i], sizeof(s->prob.p.uv_mode[0]));
|
|
+ for (unsigned i = 0; i < 4; i++)
|
|
+ memcpy(chp->partition[i * 4], s->prob_raw.p.partition[i], sizeof(s->prob_raw.p.partition[0]));
|
|
+ memcpy(chp->mv.joint, s->prob_raw.p.mv_joint, sizeof(s->prob_raw.p.mv_joint));
|
|
+ for (unsigned i = 0; i < 2; i++) {
|
|
+ chp->mv.sign[i] = s->prob_raw.p.mv_comp[i].sign;
|
|
+ memcpy(chp->mv.classes[i], s->prob_raw.p.mv_comp[i].classes, sizeof(s->prob_raw.p.mv_comp[0].classes));
|
|
+ chp->mv.class0_bit[i] = s->prob_raw.p.mv_comp[i].class0;
|
|
+ memcpy(chp->mv.bits[i], s->prob_raw.p.mv_comp[i].bits, sizeof(s->prob_raw.p.mv_comp[0].bits));
|
|
+ memcpy(chp->mv.class0_fr[i], s->prob_raw.p.mv_comp[i].class0_fp, sizeof(s->prob_raw.p.mv_comp[0].class0_fp));
|
|
+ memcpy(chp->mv.fr[i], s->prob_raw.p.mv_comp[i].fp, sizeof(s->prob_raw.p.mv_comp[0].fp));
|
|
+ chp->mv.class0_hp[i] = s->prob_raw.p.mv_comp[i].class0_hp;
|
|
+ chp->mv.hp[i] = s->prob_raw.p.mv_comp[i].hp;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void fill_frame(struct v4l2_ctrl_vp9_frame *dec_params, AVCodecContext *avctx)
|
|
+{
|
|
+ const VP9Context *s = avctx->priv_data;
|
|
+ const ThreadFrame *ref;
|
|
+
|
|
+ memset(dec_params, 0, sizeof(*dec_params));
|
|
+
|
|
+ if (s->s.h.keyframe)
|
|
+ dec_params->flags |= V4L2_VP9_FRAME_FLAG_KEY_FRAME;
|
|
+ if (!s->s.h.invisible)
|
|
+ dec_params->flags |= V4L2_VP9_FRAME_FLAG_SHOW_FRAME;
|
|
+ if (s->s.h.errorres)
|
|
+ dec_params->flags |= V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT;
|
|
+ if (s->s.h.intraonly)
|
|
+ dec_params->flags |= V4L2_VP9_FRAME_FLAG_INTRA_ONLY;
|
|
+ if (!s->s.h.keyframe && s->s.h.highprecisionmvs)
|
|
+ dec_params->flags |= V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV;
|
|
+ if (s->s.h.refreshctx)
|
|
+ dec_params->flags |= V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX;
|
|
+ if (s->s.h.parallelmode)
|
|
+ dec_params->flags |= V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE;
|
|
+ if (s->ss_h)
|
|
+ dec_params->flags |= V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING;
|
|
+ if (s->ss_v)
|
|
+ dec_params->flags |= V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING;
|
|
+ if (avctx->color_range == AVCOL_RANGE_JPEG)
|
|
+ dec_params->flags |= V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING;
|
|
+
|
|
+ dec_params->compressed_header_size = s->s.h.compressed_header_size;
|
|
+ dec_params->uncompressed_header_size = s->s.h.uncompressed_header_size;
|
|
+ dec_params->profile = s->s.h.profile;
|
|
+ dec_params->reset_frame_context = s->s.h.resetctx > 0 ? s->s.h.resetctx - 1 : 0;
|
|
+ dec_params->frame_context_idx = s->s.h.framectxid;
|
|
+ dec_params->bit_depth = s->s.h.bpp;
|
|
+
|
|
+ dec_params->interpolation_filter = s->s.h.filtermode ^ (s->s.h.filtermode <= 1);
|
|
+ dec_params->tile_cols_log2 = s->s.h.tiling.log2_tile_cols;
|
|
+ dec_params->tile_rows_log2 = s->s.h.tiling.log2_tile_rows;
|
|
+ dec_params->reference_mode = s->s.h.comppredmode;
|
|
+ dec_params->frame_width_minus_1 = s->w - 1;
|
|
+ dec_params->frame_height_minus_1 = s->h - 1;
|
|
+ //dec_params->render_width_minus_1 = avctx->width - 1;
|
|
+ //dec_params->render_height_minus_1 = avctx->height - 1;
|
|
+
|
|
+ ref = &s->s.refs[s->s.h.refidx[0]];
|
|
+ if (ref->f && ref->f->buf[0])
|
|
+ dec_params->last_frame_ts = ff_v4l2_request_get_capture_timestamp(ref->f);
|
|
+ ref = &s->s.refs[s->s.h.refidx[1]];
|
|
+ if (ref->f && ref->f->buf[0])
|
|
+ dec_params->golden_frame_ts = ff_v4l2_request_get_capture_timestamp(ref->f);
|
|
+ ref = &s->s.refs[s->s.h.refidx[2]];
|
|
+ if (ref->f && ref->f->buf[0])
|
|
+ dec_params->alt_frame_ts = ff_v4l2_request_get_capture_timestamp(ref->f);
|
|
+
|
|
+ if (s->s.h.signbias[0])
|
|
+ dec_params->ref_frame_sign_bias |= V4L2_VP9_SIGN_BIAS_LAST;
|
|
+ if (s->s.h.signbias[1])
|
|
+ dec_params->ref_frame_sign_bias |= V4L2_VP9_SIGN_BIAS_GOLDEN;
|
|
+ if (s->s.h.signbias[2])
|
|
+ dec_params->ref_frame_sign_bias |= V4L2_VP9_SIGN_BIAS_ALT;
|
|
+
|
|
+ if (s->s.h.lf_delta.enabled)
|
|
+ dec_params->lf.flags |= V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED;
|
|
+ if (s->s.h.lf_delta.updated)
|
|
+ dec_params->lf.flags |= V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE;
|
|
+
|
|
+ dec_params->lf.level = s->s.h.filter.level;
|
|
+ dec_params->lf.sharpness = s->s.h.filter.sharpness;
|
|
+ for (unsigned i = 0; i < 4; i++)
|
|
+ dec_params->lf.ref_deltas[i] = s->s.h.lf_delta.ref[i];
|
|
+ for (unsigned i = 0; i < 2; i++)
|
|
+ dec_params->lf.mode_deltas[i] = s->s.h.lf_delta.mode[i];
|
|
+
|
|
+ dec_params->quant.base_q_idx = s->s.h.yac_qi;
|
|
+ dec_params->quant.delta_q_y_dc = s->s.h.ydc_qdelta;
|
|
+ dec_params->quant.delta_q_uv_dc = s->s.h.uvdc_qdelta;
|
|
+ dec_params->quant.delta_q_uv_ac = s->s.h.uvac_qdelta;
|
|
+
|
|
+ if (s->s.h.segmentation.enabled)
|
|
+ dec_params->seg.flags |= V4L2_VP9_SEGMENTATION_FLAG_ENABLED;
|
|
+ if (s->s.h.segmentation.update_map)
|
|
+ dec_params->seg.flags |= V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP;
|
|
+ if (s->s.h.segmentation.temporal)
|
|
+ dec_params->seg.flags |= V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE;
|
|
+ if (s->s.h.segmentation.update_data)
|
|
+ dec_params->seg.flags |= V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA;
|
|
+ if (s->s.h.segmentation.absolute_vals)
|
|
+ dec_params->seg.flags |= V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE;
|
|
+
|
|
+ for (unsigned i = 0; i < 7; i++)
|
|
+ dec_params->seg.tree_probs[i] = s->s.h.segmentation.prob[i];
|
|
+
|
|
+ if (s->s.h.segmentation.temporal) {
|
|
+ for (unsigned i = 0; i < 3; i++)
|
|
+ dec_params->seg.pred_probs[i] = s->s.h.segmentation.pred_prob[i];
|
|
+ } else {
|
|
+ memset(dec_params->seg.pred_probs, 255, sizeof(dec_params->seg.pred_probs));
|
|
+ }
|
|
+
|
|
+ for (unsigned i = 0; i < 8; i++) {
|
|
+ if (s->s.h.segmentation.feat[i].q_enabled) {
|
|
+ dec_params->seg.feature_enabled[i] |= 1 << V4L2_VP9_SEG_LVL_ALT_Q;
|
|
+ dec_params->seg.feature_data[i][V4L2_VP9_SEG_LVL_ALT_Q] = s->s.h.segmentation.feat[i].q_val;
|
|
+ }
|
|
+
|
|
+ if (s->s.h.segmentation.feat[i].lf_enabled) {
|
|
+ dec_params->seg.feature_enabled[i] |= 1 << V4L2_VP9_SEG_LVL_ALT_L;
|
|
+ dec_params->seg.feature_data[i][V4L2_VP9_SEG_LVL_ALT_L] = s->s.h.segmentation.feat[i].lf_val;
|
|
+ }
|
|
+
|
|
+ if (s->s.h.segmentation.feat[i].ref_enabled) {
|
|
+ dec_params->seg.feature_enabled[i] |= 1 << V4L2_VP9_SEG_LVL_REF_FRAME;
|
|
+ dec_params->seg.feature_data[i][V4L2_VP9_SEG_LVL_REF_FRAME] = s->s.h.segmentation.feat[i].ref_val;
|
|
+ }
|
|
+
|
|
+ if (s->s.h.segmentation.feat[i].skip_enabled)
|
|
+ dec_params->seg.feature_enabled[i] |= 1 << V4L2_VP9_SEG_LVL_SKIP;
|
|
+ }
|
|
+}
|
|
+
|
|
+static int v4l2_request_vp9_start_frame(AVCodecContext *avctx,
|
|
+ av_unused const uint8_t *buffer,
|
|
+ av_unused uint32_t size)
|
|
+{
|
|
+ const VP9Context *s = avctx->priv_data;
|
|
+ const VP9Frame *f = &s->s.frames[CUR_FRAME];
|
|
+ V4L2RequestControlsVP9 *controls = f->hwaccel_picture_private;
|
|
+
|
|
+ v4l2_request_vp9_set_frame_ctx(avctx);
|
|
+
|
|
+ fill_frame(&controls->decode_params, avctx);
|
|
+
|
|
+ return ff_v4l2_request_reset_frame(avctx, f->tf.f);
|
|
+}
|
|
+
|
|
+static int v4l2_request_vp9_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
|
|
+{
|
|
+ const VP9Context *s = avctx->priv_data;
|
|
+ const VP9Frame *f = &s->s.frames[CUR_FRAME];
|
|
+
|
|
+ return ff_v4l2_request_append_output_buffer(avctx, f->tf.f, buffer, size);
|
|
+}
|
|
+
|
|
+static int v4l2_request_vp9_end_frame(AVCodecContext *avctx)
|
|
+{
|
|
+ const VP9Context *s = avctx->priv_data;
|
|
+ const VP9Frame *f = &s->s.frames[CUR_FRAME];
|
|
+ V4L2RequestControlsVP9 *controls = f->hwaccel_picture_private;
|
|
+ int ret;
|
|
+
|
|
+ struct v4l2_ext_control control[] = {
|
|
+ {
|
|
+ .id = V4L2_CID_STATELESS_VP9_FRAME,
|
|
+ .ptr = &controls->decode_params,
|
|
+ .size = sizeof(controls->decode_params),
|
|
+ },
|
|
+ {
|
|
+ .id = V4L2_CID_STATELESS_VP9_COMPRESSED_HDR,
|
|
+ .ptr = &controls->chp,
|
|
+ .size = sizeof(controls->chp),
|
|
+ },
|
|
+ };
|
|
+
|
|
+ ret = ff_v4l2_request_decode_frame(avctx, f->tf.f, control, FF_ARRAY_ELEMS(control));
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ if (!s->s.h.refreshctx)
|
|
+ return 0;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int v4l2_request_vp9_init(AVCodecContext *avctx)
|
|
+{
|
|
+ struct v4l2_ctrl_vp9_frame frame;
|
|
+
|
|
+ struct v4l2_ext_control control[] = {
|
|
+ {
|
|
+ .id = V4L2_CID_STATELESS_VP9_FRAME,
|
|
+ .ptr = &frame,
|
|
+ .size = sizeof(frame),
|
|
+ },
|
|
+ };
|
|
+
|
|
+ fill_frame(&frame, avctx);
|
|
+
|
|
+ // TODO: check V4L2_CID_MPEG_VIDEO_VP9_PROFILE
|
|
+ return ff_v4l2_request_init(avctx, V4L2_PIX_FMT_VP9_FRAME, 3 * 1024 * 1024, control, FF_ARRAY_ELEMS(control));
|
|
+}
|
|
+
|
|
+const AVHWAccel ff_vp9_v4l2request_hwaccel = {
|
|
+ .name = "vp9_v4l2request",
|
|
+ .type = AVMEDIA_TYPE_VIDEO,
|
|
+ .id = AV_CODEC_ID_VP9,
|
|
+ .pix_fmt = AV_PIX_FMT_DRM_PRIME,
|
|
+ .start_frame = v4l2_request_vp9_start_frame,
|
|
+ .decode_slice = v4l2_request_vp9_decode_slice,
|
|
+ .end_frame = v4l2_request_vp9_end_frame,
|
|
+ .frame_priv_data_size = sizeof(V4L2RequestControlsVP9),
|
|
+ .init = v4l2_request_vp9_init,
|
|
+ .uninit = ff_v4l2_request_uninit,
|
|
+ .priv_data_size = sizeof(V4L2RequestContext),
|
|
+ .frame_params = ff_v4l2_request_frame_params,
|
|
+ .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
|
|
+};
|
|
diff --git a/libavcodec/vp9.c b/libavcodec/vp9.c
|
|
index 7c0a246446..9cc36960eb 100644
|
|
--- a/libavcodec/vp9.c
|
|
+++ b/libavcodec/vp9.c
|
|
@@ -185,6 +185,7 @@ static int update_size(AVCodecContext *avctx, int w, int h)
|
|
#define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \
|
|
CONFIG_VP9_D3D11VA_HWACCEL * 2 + \
|
|
CONFIG_VP9_NVDEC_HWACCEL + \
|
|
+ CONFIG_VP9_V4L2REQUEST_HWACCEL + \
|
|
CONFIG_VP9_VAAPI_HWACCEL + \
|
|
CONFIG_VP9_VDPAU_HWACCEL + \
|
|
CONFIG_VP9_VIDEOTOOLBOX_HWACCEL)
|
|
@@ -213,6 +214,9 @@ static int update_size(AVCodecContext *avctx, int w, int h)
|
|
#if CONFIG_VP9_NVDEC_HWACCEL
|
|
*fmtp++ = AV_PIX_FMT_CUDA;
|
|
#endif
|
|
+#if CONFIG_VP9_V4L2REQUEST_HWACCEL
|
|
+ *fmtp++ = AV_PIX_FMT_DRM_PRIME;
|
|
+#endif
|
|
#if CONFIG_VP9_VAAPI_HWACCEL
|
|
*fmtp++ = AV_PIX_FMT_VAAPI;
|
|
#endif
|
|
@@ -227,6 +231,9 @@ static int update_size(AVCodecContext *avctx, int w, int h)
|
|
#if CONFIG_VP9_NVDEC_HWACCEL
|
|
*fmtp++ = AV_PIX_FMT_CUDA;
|
|
#endif
|
|
+#if CONFIG_VP9_V4L2REQUEST_HWACCEL
|
|
+ *fmtp++ = AV_PIX_FMT_DRM_PRIME;
|
|
+#endif
|
|
#if CONFIG_VP9_VAAPI_HWACCEL
|
|
*fmtp++ = AV_PIX_FMT_VAAPI;
|
|
#endif
|
|
@@ -387,7 +394,7 @@ static av_always_inline int inv_recenter_nonneg(int v, int m)
|
|
}
|
|
|
|
// differential forward probability updates
|
|
-static int update_prob(VPXRangeCoder *c, int p)
|
|
+static int read_prob_delta(VPXRangeCoder *c)
|
|
{
|
|
static const uint8_t inv_map_table[255] = {
|
|
7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
|
|
@@ -441,8 +448,13 @@ static int update_prob(VPXRangeCoder *c, int p)
|
|
av_assert2(d < FF_ARRAY_ELEMS(inv_map_table));
|
|
}
|
|
|
|
- return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
|
|
- 255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
|
|
+ return inv_map_table[d];
|
|
+}
|
|
+
|
|
+static int update_prob(int p, int d)
|
|
+{
|
|
+ return p <= 128 ? 1 + inv_recenter_nonneg(d, p - 1) :
|
|
+ 255 - inv_recenter_nonneg(d, 255 - p);
|
|
}
|
|
|
|
static int read_colorspace_details(AVCodecContext *avctx)
|
|
@@ -708,7 +720,8 @@ static int decode_frame_header(AVCodecContext *avctx,
|
|
get_bits(&s->gb, 8) : 255;
|
|
}
|
|
|
|
- if (get_bits1(&s->gb)) {
|
|
+ s->s.h.segmentation.update_data = get_bits1(&s->gb);
|
|
+ if (s->s.h.segmentation.update_data) {
|
|
s->s.h.segmentation.absolute_vals = get_bits1(&s->gb);
|
|
for (i = 0; i < 8; i++) {
|
|
if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
|
|
@@ -908,6 +921,8 @@ static int decode_frame_header(AVCodecContext *avctx,
|
|
* as explicit copies if the fw update is missing (and skip the copy upon
|
|
* fw update)? */
|
|
s->prob.p = s->prob_ctx[c].p;
|
|
+ memset(&s->prob_raw.p, 0, sizeof(s->prob_raw.p));
|
|
+ memset(&s->prob_raw.coef, 0, sizeof(s->prob_raw.coef));
|
|
|
|
// txfm updates
|
|
if (s->s.h.lossless) {
|
|
@@ -919,18 +934,25 @@ static int decode_frame_header(AVCodecContext *avctx,
|
|
|
|
if (s->s.h.txfmmode == TX_SWITCHABLE) {
|
|
for (i = 0; i < 2; i++)
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
- s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.tx8p[i] = read_prob_delta(&s->c);
|
|
+ s->prob.p.tx8p[i] = update_prob(s->prob.p.tx8p[i],
|
|
+ s->prob_raw.p.tx8p[i]);
|
|
+ }
|
|
for (i = 0; i < 2; i++)
|
|
for (j = 0; j < 2; j++)
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
- s->prob.p.tx16p[i][j] =
|
|
- update_prob(&s->c, s->prob.p.tx16p[i][j]);
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.tx16p[i][j] = read_prob_delta(&s->c);
|
|
+ s->prob.p.tx16p[i][j] = update_prob(s->prob.p.tx16p[i][j],
|
|
+ s->prob_raw.p.tx16p[i][j]);
|
|
+ }
|
|
for (i = 0; i < 2; i++)
|
|
for (j = 0; j < 3; j++)
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
- s->prob.p.tx32p[i][j] =
|
|
- update_prob(&s->c, s->prob.p.tx32p[i][j]);
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.tx32p[i][j] = read_prob_delta(&s->c);
|
|
+ s->prob.p.tx32p[i][j] = update_prob(s->prob.p.tx32p[i][j],
|
|
+ s->prob_raw.p.tx32p[i][j]);
|
|
+ }
|
|
}
|
|
}
|
|
|
|
@@ -942,15 +964,18 @@ static int decode_frame_header(AVCodecContext *avctx,
|
|
for (k = 0; k < 2; k++)
|
|
for (l = 0; l < 6; l++)
|
|
for (m = 0; m < 6; m++) {
|
|
+ uint8_t *pd = s->prob_raw.coef[i][j][k][l][m];
|
|
uint8_t *p = s->prob.coef[i][j][k][l][m];
|
|
uint8_t *r = ref[j][k][l][m];
|
|
if (m >= 3 && l == 0) // dc only has 3 pt
|
|
break;
|
|
for (n = 0; n < 3; n++) {
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
- p[n] = update_prob(&s->c, r[n]);
|
|
- else
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ pd[n] = read_prob_delta(&s->c);
|
|
+ p[n] = update_prob(r[n], pd[n]);
|
|
+ } else {
|
|
p[n] = r[n];
|
|
+ }
|
|
}
|
|
memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
|
|
}
|
|
@@ -965,7 +990,7 @@ static int decode_frame_header(AVCodecContext *avctx,
|
|
break;
|
|
memcpy(p, r, 3);
|
|
memcpy(&p[3], ff_vp9_model_pareto8[p[2]], 8);
|
|
- }
|
|
+ }
|
|
}
|
|
if (s->s.h.txfmmode == i)
|
|
break;
|
|
@@ -973,25 +998,37 @@ static int decode_frame_header(AVCodecContext *avctx,
|
|
|
|
// mode updates
|
|
for (i = 0; i < 3; i++)
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
- s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.skip[i] = read_prob_delta(&s->c);
|
|
+ s->prob.p.skip[i] = update_prob(s->prob.p.skip[i],
|
|
+ s->prob_raw.p.skip[i]);
|
|
+ }
|
|
if (!s->s.h.keyframe && !s->s.h.intraonly) {
|
|
for (i = 0; i < 7; i++)
|
|
for (j = 0; j < 3; j++)
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.mv_mode[i][j] = read_prob_delta(&s->c);
|
|
s->prob.p.mv_mode[i][j] =
|
|
- update_prob(&s->c, s->prob.p.mv_mode[i][j]);
|
|
+ update_prob(s->prob.p.mv_mode[i][j],
|
|
+ s->prob_raw.p.mv_mode[i][j]);
|
|
+ }
|
|
|
|
if (s->s.h.filtermode == FILTER_SWITCHABLE)
|
|
for (i = 0; i < 4; i++)
|
|
for (j = 0; j < 2; j++)
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.filter[i][j] = read_prob_delta(&s->c);
|
|
s->prob.p.filter[i][j] =
|
|
- update_prob(&s->c, s->prob.p.filter[i][j]);
|
|
+ update_prob(s->prob.p.filter[i][j],
|
|
+ s->prob_raw.p.filter[i][j]);
|
|
+ }
|
|
|
|
for (i = 0; i < 4; i++)
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
- s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.intra[i] = read_prob_delta(&s->c);
|
|
+ s->prob.p.intra[i] = update_prob(s->prob.p.intra[i],
|
|
+ s->prob_raw.p.intra[i]);
|
|
+ }
|
|
|
|
if (s->s.h.allowcompinter) {
|
|
s->s.h.comppredmode = vp89_rac_get(&s->c);
|
|
@@ -999,92 +1036,134 @@ static int decode_frame_header(AVCodecContext *avctx,
|
|
s->s.h.comppredmode += vp89_rac_get(&s->c);
|
|
if (s->s.h.comppredmode == PRED_SWITCHABLE)
|
|
for (i = 0; i < 5; i++)
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.comp[i] = read_prob_delta(&s->c);
|
|
s->prob.p.comp[i] =
|
|
- update_prob(&s->c, s->prob.p.comp[i]);
|
|
+ update_prob(s->prob.p.comp[i], s->prob_raw.p.comp[i]);
|
|
+ }
|
|
} else {
|
|
s->s.h.comppredmode = PRED_SINGLEREF;
|
|
}
|
|
|
|
if (s->s.h.comppredmode != PRED_COMPREF) {
|
|
for (i = 0; i < 5; i++) {
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.single_ref[i][0] = read_prob_delta(&s->c);
|
|
s->prob.p.single_ref[i][0] =
|
|
- update_prob(&s->c, s->prob.p.single_ref[i][0]);
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
+ update_prob(s->prob.p.single_ref[i][0],
|
|
+ s->prob_raw.p.single_ref[i][0]);
|
|
+ }
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.single_ref[i][1] = read_prob_delta(&s->c);
|
|
s->prob.p.single_ref[i][1] =
|
|
- update_prob(&s->c, s->prob.p.single_ref[i][1]);
|
|
+ update_prob(s->prob.p.single_ref[i][1],
|
|
+ s->prob_raw.p.single_ref[i][1]);
|
|
+ }
|
|
}
|
|
}
|
|
|
|
if (s->s.h.comppredmode != PRED_SINGLEREF) {
|
|
for (i = 0; i < 5; i++)
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.comp_ref[i] = read_prob_delta(&s->c);
|
|
s->prob.p.comp_ref[i] =
|
|
- update_prob(&s->c, s->prob.p.comp_ref[i]);
|
|
+ update_prob(s->prob.p.comp_ref[i],
|
|
+ s->prob_raw.p.comp_ref[i]);
|
|
+ }
|
|
}
|
|
|
|
for (i = 0; i < 4; i++)
|
|
for (j = 0; j < 9; j++)
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.y_mode[i][j] = read_prob_delta(&s->c);
|
|
s->prob.p.y_mode[i][j] =
|
|
- update_prob(&s->c, s->prob.p.y_mode[i][j]);
|
|
+ update_prob(s->prob.p.y_mode[i][j],
|
|
+ s->prob_raw.p.y_mode[i][j]);
|
|
+ }
|
|
|
|
for (i = 0; i < 4; i++)
|
|
for (j = 0; j < 4; j++)
|
|
for (k = 0; k < 3; k++)
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.partition[i][j][k] = read_prob_delta(&s->c);
|
|
s->prob.p.partition[3 - i][j][k] =
|
|
- update_prob(&s->c,
|
|
- s->prob.p.partition[3 - i][j][k]);
|
|
+ update_prob(s->prob.p.partition[3 - i][j][k],
|
|
+ s->prob_raw.p.partition[i][j][k]);
|
|
+ }
|
|
|
|
// mv fields don't use the update_prob subexp model for some reason
|
|
for (i = 0; i < 3; i++)
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
- s->prob.p.mv_joint[i] = (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.mv_joint[i] = (vp89_rac_get_uint(&s->c, 7) << 1) | 1;
|
|
+ s->prob.p.mv_joint[i] = s->prob_raw.p.mv_joint[i];
|
|
+ }
|
|
|
|
for (i = 0; i < 2; i++) {
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
- s->prob.p.mv_comp[i].sign =
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.mv_comp[i].sign =
|
|
(vp89_rac_get_uint(&s->c, 7) << 1) | 1;
|
|
+ s->prob.p.mv_comp[i].sign =
|
|
+ s->prob_raw.p.mv_comp[i].sign;
|
|
+ }
|
|
|
|
for (j = 0; j < 10; j++)
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
- s->prob.p.mv_comp[i].classes[j] =
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.mv_comp[i].classes[j] =
|
|
(vp89_rac_get_uint(&s->c, 7) << 1) | 1;
|
|
+ s->prob.p.mv_comp[i].classes[j] =
|
|
+ s->prob_raw.p.mv_comp[i].classes[j];
|
|
+ }
|
|
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
- s->prob.p.mv_comp[i].class0 =
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.mv_comp[i].class0 =
|
|
(vp89_rac_get_uint(&s->c, 7) << 1) | 1;
|
|
+ s->prob.p.mv_comp[i].class0 =
|
|
+ s->prob_raw.p.mv_comp[i].class0;
|
|
+ }
|
|
|
|
for (j = 0; j < 10; j++)
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
- s->prob.p.mv_comp[i].bits[j] =
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.mv_comp[i].bits[j] =
|
|
(vp89_rac_get_uint(&s->c, 7) << 1) | 1;
|
|
+ s->prob.p.mv_comp[i].bits[j] =
|
|
+ s->prob_raw.p.mv_comp[i].bits[j];
|
|
+ }
|
|
}
|
|
|
|
for (i = 0; i < 2; i++) {
|
|
for (j = 0; j < 2; j++)
|
|
for (k = 0; k < 3; k++)
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
- s->prob.p.mv_comp[i].class0_fp[j][k] =
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.mv_comp[i].class0_fp[j][k] =
|
|
(vp89_rac_get_uint(&s->c, 7) << 1) | 1;
|
|
+ s->prob.p.mv_comp[i].class0_fp[j][k] =
|
|
+ s->prob_raw.p.mv_comp[i].class0_fp[j][k];
|
|
+ }
|
|
|
|
for (j = 0; j < 3; j++)
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
- s->prob.p.mv_comp[i].fp[j] =
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.mv_comp[i].fp[j] =
|
|
(vp89_rac_get_uint(&s->c, 7) << 1) | 1;
|
|
+ s->prob.p.mv_comp[i].fp[j] =
|
|
+ s->prob_raw.p.mv_comp[i].fp[j];
|
|
+ }
|
|
}
|
|
|
|
if (s->s.h.highprecisionmvs) {
|
|
for (i = 0; i < 2; i++) {
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
- s->prob.p.mv_comp[i].class0_hp =
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.mv_comp[i].class0_hp =
|
|
(vp89_rac_get_uint(&s->c, 7) << 1) | 1;
|
|
+ s->prob.p.mv_comp[i].class0_hp =
|
|
+ s->prob_raw.p.mv_comp[i].class0_hp;
|
|
+ }
|
|
|
|
- if (vpx_rac_get_prob_branchy(&s->c, 252))
|
|
- s->prob.p.mv_comp[i].hp =
|
|
+ if (vpx_rac_get_prob_branchy(&s->c, 252)) {
|
|
+ s->prob_raw.p.mv_comp[i].hp =
|
|
(vp89_rac_get_uint(&s->c, 7) << 1) | 1;
|
|
+ s->prob.p.mv_comp[i].hp =
|
|
+ s->prob_raw.p.mv_comp[i].hp;
|
|
+ }
|
|
}
|
|
}
|
|
}
|
|
@@ -1906,6 +1985,9 @@ const FFCodec ff_vp9_decoder = {
|
|
#if CONFIG_VP9_VDPAU_HWACCEL
|
|
HWACCEL_VDPAU(vp9),
|
|
#endif
|
|
+#if CONFIG_VP9_V4L2REQUEST_HWACCEL
|
|
+ HWACCEL_V4L2REQUEST(vp9),
|
|
+#endif
|
|
#if CONFIG_VP9_VIDEOTOOLBOX_HWACCEL
|
|
HWACCEL_VIDEOTOOLBOX(vp9),
|
|
#endif
|
|
diff --git a/libavcodec/vp9dec.h b/libavcodec/vp9dec.h
|
|
index de7aba0458..5935ba6227 100644
|
|
--- a/libavcodec/vp9dec.h
|
|
+++ b/libavcodec/vp9dec.h
|
|
@@ -135,6 +135,10 @@ typedef struct VP9Context {
|
|
ProbContext p;
|
|
uint8_t coef[4][2][2][6][6][11];
|
|
} prob;
|
|
+ struct {
|
|
+ ProbContext p;
|
|
+ uint8_t coef[4][2][2][6][6][11];
|
|
+ } prob_raw;
|
|
|
|
// contextual (above) cache
|
|
uint8_t *above_partition_ctx;
|
|
diff --git a/libavcodec/vp9shared.h b/libavcodec/vp9shared.h
|
|
index 543a496df8..a5028d4b39 100644
|
|
--- a/libavcodec/vp9shared.h
|
|
+++ b/libavcodec/vp9shared.h
|
|
@@ -137,6 +137,7 @@ typedef struct VP9BitstreamHeader {
|
|
uint8_t temporal;
|
|
uint8_t absolute_vals;
|
|
uint8_t update_map;
|
|
+ uint8_t update_data;
|
|
uint8_t prob[7];
|
|
uint8_t pred_prob[3];
|
|
struct {
|
|
|
|
From 5057eb96b2adbff022a1abd8d5b06f369f908d51 Mon Sep 17 00:00:00 2001
|
|
From: Jonas Karlman <jonas@kwiboo.se>
|
|
Date: Mon, 29 Apr 2019 22:08:59 +0000
|
|
Subject: [PATCH 09/13] HACK: hwcontext_drm: do not require drm device
|
|
|
|
Signed-off-by: Jonas Karlman <jonas@kwiboo.se>
|
|
---
|
|
libavutil/hwcontext_drm.c | 5 +++++
|
|
1 file changed, 5 insertions(+)
|
|
|
|
diff --git a/libavutil/hwcontext_drm.c b/libavutil/hwcontext_drm.c
|
|
index 7a9fdbd263..6297d1f9b6 100644
|
|
--- a/libavutil/hwcontext_drm.c
|
|
+++ b/libavutil/hwcontext_drm.c
|
|
@@ -53,6 +53,11 @@ static int drm_device_create(AVHWDeviceContext *hwdev, const char *device,
|
|
AVDRMDeviceContext *hwctx = hwdev->hwctx;
|
|
drmVersionPtr version;
|
|
|
|
+ if (device == NULL) {
|
|
+ hwctx->fd = -1;
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
hwctx->fd = open(device, O_RDWR);
|
|
if (hwctx->fd < 0)
|
|
return AVERROR(errno);
|
|
|
|
From fce915b2195b3c4d74b8b40e0d5aa9ba4dcecd33 Mon Sep 17 00:00:00 2001
|
|
From: Jonas Karlman <jonas@kwiboo.se>
|
|
Date: Fri, 15 May 2020 16:54:05 +0000
|
|
Subject: [PATCH 10/13] WIP: add NV15 and NV20 support
|
|
|
|
Signed-off-by: Jonas Karlman <jonas@kwiboo.se>
|
|
---
|
|
libavcodec/h264_slice.c | 14 ++++++++++++--
|
|
libavcodec/v4l2_request.c | 23 +++++++++++++++++++++++
|
|
2 files changed, 35 insertions(+), 2 deletions(-)
|
|
|
|
diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c
|
|
index 3d0d45b2a3..f7af51b28e 100644
|
|
--- a/libavcodec/h264_slice.c
|
|
+++ b/libavcodec/h264_slice.c
|
|
@@ -808,10 +808,17 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
|
|
*fmt++ = AV_PIX_FMT_GBRP10;
|
|
} else
|
|
*fmt++ = AV_PIX_FMT_YUV444P10;
|
|
- } else if (CHROMA422(h))
|
|
+ } else if (CHROMA422(h)) {
|
|
+#if CONFIG_H264_V4L2REQUEST_HWACCEL
|
|
+ *fmt++ = AV_PIX_FMT_DRM_PRIME;
|
|
+#endif
|
|
*fmt++ = AV_PIX_FMT_YUV422P10;
|
|
- else
|
|
+ } else {
|
|
+#if CONFIG_H264_V4L2REQUEST_HWACCEL
|
|
+ *fmt++ = AV_PIX_FMT_DRM_PRIME;
|
|
+#endif
|
|
*fmt++ = AV_PIX_FMT_YUV420P10;
|
|
+ }
|
|
break;
|
|
case 12:
|
|
if (CHROMA444(h)) {
|
|
@@ -854,6 +861,9 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback)
|
|
else
|
|
*fmt++ = AV_PIX_FMT_YUV444P;
|
|
} else if (CHROMA422(h)) {
|
|
+#if CONFIG_H264_V4L2REQUEST_HWACCEL
|
|
+ *fmt++ = AV_PIX_FMT_DRM_PRIME;
|
|
+#endif
|
|
if (h->avctx->color_range == AVCOL_RANGE_JPEG)
|
|
*fmt++ = AV_PIX_FMT_YUVJ422P;
|
|
else
|
|
diff --git a/libavcodec/v4l2_request.c b/libavcodec/v4l2_request.c
|
|
index e7faf100f0..c77d3a8cb1 100644
|
|
--- a/libavcodec/v4l2_request.c
|
|
+++ b/libavcodec/v4l2_request.c
|
|
@@ -186,6 +186,13 @@ const uint32_t v4l2_request_capture_pixelformats[] = {
|
|
#ifdef DRM_FORMAT_MOD_ALLWINNER_TILED
|
|
V4L2_PIX_FMT_SUNXI_TILED_NV12,
|
|
#endif
|
|
+#if defined(V4L2_PIX_FMT_NV15) && defined(DRM_FORMAT_NV15)
|
|
+ V4L2_PIX_FMT_NV15,
|
|
+#endif
|
|
+ V4L2_PIX_FMT_NV16,
|
|
+#if defined(V4L2_PIX_FMT_NV20) && defined(DRM_FORMAT_NV20)
|
|
+ V4L2_PIX_FMT_NV20,
|
|
+#endif
|
|
};
|
|
|
|
static int v4l2_request_set_drm_descriptor(V4L2RequestDescriptor *req, struct v4l2_format *format)
|
|
@@ -204,6 +211,22 @@ static int v4l2_request_set_drm_descriptor(V4L2RequestDescriptor *req, struct v4
|
|
layer->format = DRM_FORMAT_NV12;
|
|
desc->objects[0].format_modifier = DRM_FORMAT_MOD_ALLWINNER_TILED;
|
|
break;
|
|
+#endif
|
|
+#if defined(V4L2_PIX_FMT_NV15) && defined(DRM_FORMAT_NV15)
|
|
+ case V4L2_PIX_FMT_NV15:
|
|
+ layer->format = DRM_FORMAT_NV15;
|
|
+ desc->objects[0].format_modifier = DRM_FORMAT_MOD_LINEAR;
|
|
+ break;
|
|
+#endif
|
|
+ case V4L2_PIX_FMT_NV16:
|
|
+ layer->format = DRM_FORMAT_NV16;
|
|
+ desc->objects[0].format_modifier = DRM_FORMAT_MOD_LINEAR;
|
|
+ break;
|
|
+#if defined(V4L2_PIX_FMT_NV20) && defined(DRM_FORMAT_NV20)
|
|
+ case V4L2_PIX_FMT_NV20:
|
|
+ layer->format = DRM_FORMAT_NV20;
|
|
+ desc->objects[0].format_modifier = DRM_FORMAT_MOD_LINEAR;
|
|
+ break;
|
|
#endif
|
|
default:
|
|
return -1;
|
|
|
|
From 943156690cb35cdcb177538d9e795f5755b03b3e Mon Sep 17 00:00:00 2001
|
|
From: Jonas Karlman <jonas@kwiboo.se>
|
|
Date: Mon, 27 Jul 2020 23:15:45 +0000
|
|
Subject: [PATCH 11/13] HACK: define drm NV15 and NV20 format
|
|
|
|
---
|
|
libavcodec/v4l2_request.c | 8 ++++++++
|
|
1 file changed, 8 insertions(+)
|
|
|
|
diff --git a/libavcodec/v4l2_request.c b/libavcodec/v4l2_request.c
|
|
index c77d3a8cb1..19c41f2b3f 100644
|
|
--- a/libavcodec/v4l2_request.c
|
|
+++ b/libavcodec/v4l2_request.c
|
|
@@ -30,6 +30,14 @@
|
|
#include "internal.h"
|
|
#include "v4l2_request.h"
|
|
|
|
+#ifndef DRM_FORMAT_NV15
|
|
+#define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5')
|
|
+#endif
|
|
+
|
|
+#ifndef DRM_FORMAT_NV20
|
|
+#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0')
|
|
+#endif
|
|
+
|
|
uint64_t ff_v4l2_request_get_capture_timestamp(AVFrame *frame)
|
|
{
|
|
V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)frame->data[0];
|
|
|
|
From 28e490b972e644113237d6a3f48b0c7fd6cb011e Mon Sep 17 00:00:00 2001
|
|
From: Alex Bee <knaerzche@gmail.com>
|
|
Date: Sat, 22 Oct 2022 22:23:22 +0200
|
|
Subject: [PATCH 12/13] HACK: Revert "lavc/pthread_frame: always transfer
|
|
stashed hwaccel state"
|
|
|
|
This reverts commit 96c78e50a66a3b443eb2f237e2554ab84b8a12ce.
|
|
---
|
|
libavcodec/pthread_frame.c | 12 ++++++------
|
|
1 file changed, 6 insertions(+), 6 deletions(-)
|
|
|
|
diff --git a/libavcodec/pthread_frame.c b/libavcodec/pthread_frame.c
|
|
index d9d5afaa82..800f7a2377 100644
|
|
--- a/libavcodec/pthread_frame.c
|
|
+++ b/libavcodec/pthread_frame.c
|
|
@@ -430,13 +430,13 @@ static int submit_packet(PerThreadContext *p, AVCodecContext *user_avctx,
|
|
pthread_mutex_unlock(&p->mutex);
|
|
return err;
|
|
}
|
|
- }
|
|
|
|
- /* transfer the stashed hwaccel state, if any */
|
|
- av_assert0(!p->avctx->hwaccel);
|
|
- FFSWAP(const AVHWAccel*, p->avctx->hwaccel, fctx->stash_hwaccel);
|
|
- FFSWAP(void*, p->avctx->hwaccel_context, fctx->stash_hwaccel_context);
|
|
- FFSWAP(void*, p->avctx->internal->hwaccel_priv_data, fctx->stash_hwaccel_priv);
|
|
+ /* transfer hwaccel state stashed from previous thread, if any */
|
|
+ av_assert0(!p->avctx->hwaccel);
|
|
+ FFSWAP(const AVHWAccel*, p->avctx->hwaccel, fctx->stash_hwaccel);
|
|
+ FFSWAP(void*, p->avctx->hwaccel_context, fctx->stash_hwaccel_context);
|
|
+ FFSWAP(void*, p->avctx->internal->hwaccel_priv_data, fctx->stash_hwaccel_priv);
|
|
+ }
|
|
|
|
av_packet_unref(p->avpkt);
|
|
ret = av_packet_ref(p->avpkt, avpkt);
|
|
|
|
From fe61bbb249a3a9ae96447ded7ef538d7034783e5 Mon Sep 17 00:00:00 2001
|
|
From: Alex Bee <knaerzche@gmail.com>
|
|
Date: Sat, 22 Oct 2022 22:24:07 +0200
|
|
Subject: [PATCH 13/13] HACK: Revert "lavc/pthread_frame: avoid leaving stale
|
|
hwaccel state in worker threads"
|
|
|
|
This reverts commit 35aa7e70e7ec350319e7634a30d8d8aa1e6ecdda.
|
|
---
|
|
libavcodec/pthread_frame.c | 47 ++++++++++----------------------------
|
|
1 file changed, 12 insertions(+), 35 deletions(-)
|
|
|
|
diff --git a/libavcodec/pthread_frame.c b/libavcodec/pthread_frame.c
|
|
index 800f7a2377..5acc261e60 100644
|
|
--- a/libavcodec/pthread_frame.c
|
|
+++ b/libavcodec/pthread_frame.c
|
|
@@ -132,12 +132,6 @@ typedef struct FrameThreadContext {
|
|
* Set for the first N packets, where N is the number of threads.
|
|
* While it is set, ff_thread_en/decode_frame won't return any results.
|
|
*/
|
|
-
|
|
- /* hwaccel state is temporarily stored here in order to transfer its ownership
|
|
- * to the next decoding thread without the need for extra synchronization */
|
|
- const AVHWAccel *stash_hwaccel;
|
|
- void *stash_hwaccel_context;
|
|
- void *stash_hwaccel_priv;
|
|
} FrameThreadContext;
|
|
|
|
static void async_lock(FrameThreadContext *fctx)
|
|
@@ -220,17 +214,9 @@ static attribute_align_arg void *frame_worker_thread(void *arg)
|
|
ff_thread_finish_setup(avctx);
|
|
|
|
if (p->hwaccel_serializing) {
|
|
- /* wipe hwaccel state to avoid stale pointers lying around;
|
|
- * the state was transferred to FrameThreadContext in
|
|
- * ff_thread_finish_setup(), so nothing is leaked */
|
|
- avctx->hwaccel = NULL;
|
|
- avctx->hwaccel_context = NULL;
|
|
- avctx->internal->hwaccel_priv_data = NULL;
|
|
-
|
|
p->hwaccel_serializing = 0;
|
|
pthread_mutex_unlock(&p->parent->hwaccel_mutex);
|
|
}
|
|
- av_assert0(!avctx->hwaccel);
|
|
|
|
if (p->async_serializing) {
|
|
p->async_serializing = 0;
|
|
@@ -294,6 +280,9 @@ static int update_context_from_thread(AVCodecContext *dst, AVCodecContext *src,
|
|
dst->color_range = src->color_range;
|
|
dst->chroma_sample_location = src->chroma_sample_location;
|
|
|
|
+ dst->hwaccel = src->hwaccel;
|
|
+ dst->hwaccel_context = src->hwaccel_context;
|
|
+
|
|
dst->sample_rate = src->sample_rate;
|
|
dst->sample_fmt = src->sample_fmt;
|
|
#if FF_API_OLD_CHANNEL_LAYOUT
|
|
@@ -306,6 +295,8 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
|
if (err < 0)
|
|
return err;
|
|
|
|
+ dst->internal->hwaccel_priv_data = src->internal->hwaccel_priv_data;
|
|
+
|
|
if (!!dst->hw_frames_ctx != !!src->hw_frames_ctx ||
|
|
(dst->hw_frames_ctx && dst->hw_frames_ctx->data != src->hw_frames_ctx->data)) {
|
|
av_buffer_unref(&dst->hw_frames_ctx);
|
|
@@ -430,12 +421,6 @@ static int submit_packet(PerThreadContext *p, AVCodecContext *user_avctx,
|
|
pthread_mutex_unlock(&p->mutex);
|
|
return err;
|
|
}
|
|
-
|
|
- /* transfer hwaccel state stashed from previous thread, if any */
|
|
- av_assert0(!p->avctx->hwaccel);
|
|
- FFSWAP(const AVHWAccel*, p->avctx->hwaccel, fctx->stash_hwaccel);
|
|
- FFSWAP(void*, p->avctx->hwaccel_context, fctx->stash_hwaccel_context);
|
|
- FFSWAP(void*, p->avctx->internal->hwaccel_priv_data, fctx->stash_hwaccel_priv);
|
|
}
|
|
|
|
av_packet_unref(p->avpkt);
|
|
@@ -603,14 +588,6 @@ void ff_thread_finish_setup(AVCodecContext *avctx) {
|
|
async_lock(p->parent);
|
|
}
|
|
|
|
- /* save hwaccel state for passing to the next thread;
|
|
- * this is done here so that this worker thread can wipe its own hwaccel
|
|
- * state after decoding, without requiring synchronization */
|
|
- av_assert0(!p->parent->stash_hwaccel);
|
|
- p->parent->stash_hwaccel = avctx->hwaccel;
|
|
- p->parent->stash_hwaccel_context = avctx->hwaccel_context;
|
|
- p->parent->stash_hwaccel_priv = avctx->internal->hwaccel_priv_data;
|
|
-
|
|
pthread_mutex_lock(&p->progress_mutex);
|
|
if(atomic_load(&p->state) == STATE_SETUP_FINISHED){
|
|
av_log(avctx, AV_LOG_WARNING, "Multiple ff_thread_finish_setup() calls\n");
|
|
@@ -664,6 +641,13 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
|
|
|
|
park_frame_worker_threads(fctx, thread_count);
|
|
|
|
+ if (fctx->prev_thread && avctx->internal->hwaccel_priv_data !=
|
|
+ fctx->prev_thread->avctx->internal->hwaccel_priv_data) {
|
|
+ if (update_context_from_thread(avctx, fctx->prev_thread->avctx, 1) < 0) {
|
|
+ av_log(avctx, AV_LOG_ERROR, "Failed to update user thread.\n");
|
|
+ }
|
|
+ }
|
|
+
|
|
for (i = 0; i < thread_count; i++) {
|
|
PerThreadContext *p = &fctx->threads[i];
|
|
AVCodecContext *ctx = p->avctx;
|
|
@@ -705,13 +689,6 @@ void ff_frame_thread_free(AVCodecContext *avctx, int thread_count)
|
|
av_freep(&fctx->threads);
|
|
ff_pthread_free(fctx, thread_ctx_offsets);
|
|
|
|
- /* if we have stashed hwaccel state, move it to the user-facing context,
|
|
- * so it will be freed in avcodec_close() */
|
|
- av_assert0(!avctx->hwaccel);
|
|
- FFSWAP(const AVHWAccel*, avctx->hwaccel, fctx->stash_hwaccel);
|
|
- FFSWAP(void*, avctx->hwaccel_context, fctx->stash_hwaccel_context);
|
|
- FFSWAP(void*, avctx->internal->hwaccel_priv_data, fctx->stash_hwaccel_priv);
|
|
-
|
|
av_freep(&avctx->internal->thread_ctx);
|
|
}
|
|
|