summaryrefslogtreecommitdiffstats
path: root/drivers/media/platform
diff options
context:
space:
mode:
authorMaxime Ripard <maxime@cerno.tech>2022-10-18 15:00:03 +0200
committerMaxime Ripard <maxime@cerno.tech>2022-10-18 15:00:03 +0200
commita140a6a2d5ec0329ad05cd3532a91ad0ce58dceb (patch)
treeb2d44a1da423c53bd6c3ab3facd45ff5f2087ffd /drivers/media/platform
parent28743e25fa1c867675bd8ff976eb92d4251f13a1 (diff)
parent9abf2313adc1ca1b6180c508c25f22f9395cc780 (diff)
downloadlinux-stable-a140a6a2d5ec0329ad05cd3532a91ad0ce58dceb.tar.gz
linux-stable-a140a6a2d5ec0329ad05cd3532a91ad0ce58dceb.tar.bz2
linux-stable-a140a6a2d5ec0329ad05cd3532a91ad0ce58dceb.zip
Merge drm/drm-next into drm-misc-next
Let's kick-off this release cycle. Signed-off-by: Maxime Ripard <maxime@cerno.tech>
Diffstat (limited to 'drivers/media/platform')
-rw-r--r--drivers/media/platform/Kconfig1
-rw-r--r--drivers/media/platform/Makefile1
-rw-r--r--drivers/media/platform/amlogic/meson-ge2d/ge2d.c1
-rw-r--r--drivers/media/platform/amphion/vdec.c16
-rw-r--r--drivers/media/platform/amphion/venc.c2
-rw-r--r--drivers/media/platform/amphion/vpu.h1
-rw-r--r--drivers/media/platform/amphion/vpu_core.c84
-rw-r--r--drivers/media/platform/amphion/vpu_core.h1
-rw-r--r--drivers/media/platform/amphion/vpu_dbg.c9
-rw-r--r--drivers/media/platform/amphion/vpu_malone.c2
-rw-r--r--drivers/media/platform/intel/pxa_camera.c8
-rw-r--r--drivers/media/platform/marvell/mcam-core.h2
-rw-r--r--drivers/media/platform/mediatek/Kconfig1
-rw-r--r--drivers/media/platform/mediatek/Makefile1
-rw-r--r--drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c1
-rw-r--r--drivers/media/platform/mediatek/mdp3/Kconfig21
-rw-r--r--drivers/media/platform/mediatek/mdp3/Makefile6
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_ccorr.h19
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_rdma.h65
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_rsz.h39
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_wdma.h47
-rw-r--r--drivers/media/platform/mediatek/mdp3/mdp_reg_wrot.h55
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-img-ipi.h290
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c466
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h43
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c1033
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h186
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c357
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h94
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c724
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.h48
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c735
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.h373
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c313
-rw-r--r--drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.h78
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c2
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c4
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h6
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c19
-rw-r--r--drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c23
-rw-r--r--drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c200
-rw-r--r--drivers/media/platform/mediatek/vcodec/venc_ipi_msg.h24
-rw-r--r--drivers/media/platform/mediatek/vcodec/venc_vpu_if.c76
-rw-r--r--drivers/media/platform/nxp/Kconfig13
-rw-r--r--drivers/media/platform/nxp/Makefile2
-rw-r--r--drivers/media/platform/nxp/dw100/Kconfig16
-rw-r--r--drivers/media/platform/nxp/dw100/Makefile3
-rw-r--r--drivers/media/platform/nxp/dw100/dw100.c1707
-rw-r--r--drivers/media/platform/nxp/dw100/dw100_regs.h117
-rw-r--r--drivers/media/platform/nxp/fsl-viu.c1599
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1.h4
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drm.c2
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_drv.c101
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_lif.c12
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_regs.h6
-rw-r--r--drivers/media/platform/renesas/vsp1/vsp1_video.c2
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-core.h2
-rw-r--r--drivers/media/platform/samsung/exynos4-is/fimc-is.c1
-rw-r--r--drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c5
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c2
-rw-r--r--drivers/media/platform/ti/am437x/am437x-vpfe.h2
-rw-r--r--drivers/media/platform/ti/cal/cal-camerarx.c31
-rw-r--r--drivers/media/platform/ti/cal/cal-video.c5
-rw-r--r--drivers/media/platform/ti/cal/cal.c139
-rw-r--r--drivers/media/platform/ti/cal/cal.h7
-rw-r--r--drivers/media/platform/ti/davinci/Kconfig49
-rw-r--r--drivers/media/platform/ti/davinci/Makefile4
-rw-r--r--drivers/media/platform/ti/davinci/ccdc_hw_device.h80
-rw-r--r--drivers/media/platform/ti/davinci/dm355_ccdc.c934
-rw-r--r--drivers/media/platform/ti/davinci/dm355_ccdc_regs.h297
-rw-r--r--drivers/media/platform/ti/davinci/dm644x_ccdc.c879
-rw-r--r--drivers/media/platform/ti/davinci/dm644x_ccdc_regs.h140
-rw-r--r--drivers/media/platform/ti/davinci/isif.c1127
-rw-r--r--drivers/media/platform/ti/davinci/isif_regs.h256
-rw-r--r--drivers/media/platform/ti/davinci/vpbe.c2
-rw-r--r--drivers/media/platform/ti/davinci/vpfe_capture.c1902
-rw-r--r--drivers/media/platform/ti/davinci/vpif.h60
-rw-r--r--drivers/media/platform/ti/davinci/vpif_capture.c6
-rw-r--r--drivers/media/platform/ti/davinci/vpif_capture.h2
-rw-r--r--drivers/media/platform/ti/davinci/vpif_display.c6
-rw-r--r--drivers/media/platform/ti/davinci/vpif_display.h6
-rw-r--r--drivers/media/platform/ti/omap/omap_voutlib.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/isp.c2
-rw-r--r--drivers/media/platform/ti/omap3isp/ispvideo.c2
-rw-r--r--drivers/media/platform/verisilicon/Kconfig54
-rw-r--r--drivers/media/platform/verisilicon/Makefile38
-rw-r--r--drivers/media/platform/verisilicon/hantro.h485
-rw-r--r--drivers/media/platform/verisilicon/hantro_drv.c1146
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1.c39
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1_h264_dec.c284
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c240
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1_regs.h356
-rw-r--r--drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c511
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2.c44
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c629
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_regs.h325
-rw-r--r--drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c1014
-rw-r--r--drivers/media/platform/verisilicon/hantro_h1_jpeg_enc.c166
-rw-r--r--drivers/media/platform/verisilicon/hantro_h1_regs.h154
-rw-r--r--drivers/media/platform/verisilicon/hantro_h264.c521
-rw-r--r--drivers/media/platform/verisilicon/hantro_hevc.c284
-rw-r--r--drivers/media/platform/verisilicon/hantro_hw.h441
-rw-r--r--drivers/media/platform/verisilicon/hantro_jpeg.c348
-rw-r--r--drivers/media/platform/verisilicon/hantro_jpeg.h15
-rw-r--r--drivers/media/platform/verisilicon/hantro_mpeg2.c61
-rw-r--r--drivers/media/platform/verisilicon/hantro_postproc.c279
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.c990
-rw-r--r--drivers/media/platform/verisilicon/hantro_v4l2.h29
-rw-r--r--drivers/media/platform/verisilicon/hantro_vp8.c201
-rw-r--r--drivers/media/platform/verisilicon/hantro_vp9.c240
-rw-r--r--drivers/media/platform/verisilicon/hantro_vp9.h102
-rw-r--r--drivers/media/platform/verisilicon/imx8m_vpu_hw.c373
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu2_hw_h264_dec.c491
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu2_hw_jpeg_enc.c197
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c248
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c600
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu2_regs.h600
-rw-r--r--drivers/media/platform/verisilicon/rockchip_vpu_hw.c680
-rw-r--r--drivers/media/platform/verisilicon/sama5d4_vdec_hw.c128
-rw-r--r--drivers/media/platform/verisilicon/sunxi_vpu_hw.c129
-rw-r--r--drivers/media/platform/xilinx/xilinx-csi2rxss.c1
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.c2
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c9
124 files changed, 19944 insertions, 7523 deletions
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index f1056ceaf5a8..a9334263fa9b 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -81,6 +81,7 @@ source "drivers/media/platform/samsung/Kconfig"
source "drivers/media/platform/st/Kconfig"
source "drivers/media/platform/sunxi/Kconfig"
source "drivers/media/platform/ti/Kconfig"
+source "drivers/media/platform/verisilicon/Kconfig"
source "drivers/media/platform/via/Kconfig"
source "drivers/media/platform/xilinx/Kconfig"
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index a881e97bae95..a91f42024273 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -24,6 +24,7 @@ obj-y += samsung/
obj-y += st/
obj-y += sunxi/
obj-y += ti/
+obj-y += verisilicon/
obj-y += via/
obj-y += xilinx/
diff --git a/drivers/media/platform/amlogic/meson-ge2d/ge2d.c b/drivers/media/platform/amlogic/meson-ge2d/ge2d.c
index 5e7b319f300d..142d421a8d76 100644
--- a/drivers/media/platform/amlogic/meson-ge2d/ge2d.c
+++ b/drivers/media/platform/amlogic/meson-ge2d/ge2d.c
@@ -1030,7 +1030,6 @@ static int ge2d_remove(struct platform_device *pdev)
video_unregister_device(ge2d->vfd);
v4l2_m2m_release(ge2d->m2m_dev);
- video_device_release(ge2d->vfd);
v4l2_device_unregister(&ge2d->v4l2_dev);
clk_disable_unprepare(ge2d->clk);
diff --git a/drivers/media/platform/amphion/vdec.c b/drivers/media/platform/amphion/vdec.c
index 9e64041cc1c1..feb75dc204de 100644
--- a/drivers/media/platform/amphion/vdec.c
+++ b/drivers/media/platform/amphion/vdec.c
@@ -808,14 +808,6 @@ static void vdec_init_fmt(struct vpu_inst *inst)
inst->cap_format.field = V4L2_FIELD_NONE;
else
inst->cap_format.field = V4L2_FIELD_SEQ_TB;
- if (vdec->codec_info.color_primaries == V4L2_COLORSPACE_DEFAULT)
- vdec->codec_info.color_primaries = V4L2_COLORSPACE_REC709;
- if (vdec->codec_info.transfer_chars == V4L2_XFER_FUNC_DEFAULT)
- vdec->codec_info.transfer_chars = V4L2_XFER_FUNC_709;
- if (vdec->codec_info.matrix_coeffs == V4L2_YCBCR_ENC_DEFAULT)
- vdec->codec_info.matrix_coeffs = V4L2_YCBCR_ENC_709;
- if (vdec->codec_info.full_range == V4L2_QUANTIZATION_DEFAULT)
- vdec->codec_info.full_range = V4L2_QUANTIZATION_LIM_RANGE;
}
static void vdec_init_crop(struct vpu_inst *inst)
@@ -1555,6 +1547,14 @@ static int vdec_get_debug_info(struct vpu_inst *inst, char *str, u32 size, u32 i
vdec->codec_info.frame_rate.numerator,
vdec->codec_info.frame_rate.denominator);
break;
+ case 9:
+ num = scnprintf(str, size, "colorspace: %d, %d, %d, %d (%d)\n",
+ vdec->codec_info.color_primaries,
+ vdec->codec_info.transfer_chars,
+ vdec->codec_info.matrix_coeffs,
+ vdec->codec_info.full_range,
+ vdec->codec_info.vui_present);
+ break;
default:
break;
}
diff --git a/drivers/media/platform/amphion/venc.c b/drivers/media/platform/amphion/venc.c
index 461524dd1e44..37212f087fdd 100644
--- a/drivers/media/platform/amphion/venc.c
+++ b/drivers/media/platform/amphion/venc.c
@@ -644,7 +644,7 @@ static int venc_ctrl_init(struct vpu_inst *inst)
BITRATE_DEFAULT_PEAK);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0, (1 << 16) - 1, 1, 30);
+ V4L2_CID_MPEG_VIDEO_GOP_SIZE, 1, 8000, 1, 30);
v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops,
V4L2_CID_MPEG_VIDEO_B_FRAMES, 0, 4, 1, 0);
diff --git a/drivers/media/platform/amphion/vpu.h b/drivers/media/platform/amphion/vpu.h
index f914de6ed81e..beac0309ca8d 100644
--- a/drivers/media/platform/amphion/vpu.h
+++ b/drivers/media/platform/amphion/vpu.h
@@ -119,7 +119,6 @@ struct vpu_mbox {
enum vpu_core_state {
VPU_CORE_DEINIT = 0,
VPU_CORE_ACTIVE,
- VPU_CORE_SNAPSHOT,
VPU_CORE_HANG
};
diff --git a/drivers/media/platform/amphion/vpu_core.c b/drivers/media/platform/amphion/vpu_core.c
index 73faa50d2865..f9ec1753f7c8 100644
--- a/drivers/media/platform/amphion/vpu_core.c
+++ b/drivers/media/platform/amphion/vpu_core.c
@@ -89,7 +89,7 @@ static int vpu_core_boot_done(struct vpu_core *core)
core->supported_instance_count = min(core->supported_instance_count, count);
}
core->fw_version = fw_version;
- core->state = VPU_CORE_ACTIVE;
+ vpu_core_set_state(core, VPU_CORE_ACTIVE);
return 0;
}
@@ -172,10 +172,26 @@ int vpu_alloc_dma(struct vpu_core *core, struct vpu_buffer *buf)
return __vpu_alloc_dma(core->dev, buf);
}
-static void vpu_core_check_hang(struct vpu_core *core)
+void vpu_core_set_state(struct vpu_core *core, enum vpu_core_state state)
{
- if (core->hang_mask)
- core->state = VPU_CORE_HANG;
+ if (state != core->state)
+ vpu_trace(core->dev, "vpu core state change from %d to %d\n", core->state, state);
+ core->state = state;
+ if (core->state == VPU_CORE_DEINIT)
+ core->hang_mask = 0;
+}
+
+static void vpu_core_update_state(struct vpu_core *core)
+{
+ if (!vpu_iface_get_power_state(core)) {
+ if (core->request_count)
+ vpu_core_set_state(core, VPU_CORE_HANG);
+ else
+ vpu_core_set_state(core, VPU_CORE_DEINIT);
+
+ } else if (core->state == VPU_CORE_ACTIVE && core->hang_mask) {
+ vpu_core_set_state(core, VPU_CORE_HANG);
+ }
}
static struct vpu_core *vpu_core_find_proper_by_type(struct vpu_dev *vpu, u32 type)
@@ -188,11 +204,13 @@ static struct vpu_core *vpu_core_find_proper_by_type(struct vpu_dev *vpu, u32 ty
dev_dbg(c->dev, "instance_mask = 0x%lx, state = %d\n", c->instance_mask, c->state);
if (c->type != type)
continue;
+ mutex_lock(&c->lock);
+ vpu_core_update_state(c);
+ mutex_unlock(&c->lock);
if (c->state == VPU_CORE_DEINIT) {
core = c;
break;
}
- vpu_core_check_hang(c);
if (c->state != VPU_CORE_ACTIVE)
continue;
if (c->request_count < request_count) {
@@ -409,6 +427,12 @@ int vpu_inst_register(struct vpu_inst *inst)
}
mutex_lock(&core->lock);
+ if (core->state != VPU_CORE_ACTIVE) {
+ dev_err(core->dev, "vpu core is not active, state = %d\n", core->state);
+ ret = -EINVAL;
+ goto exit;
+ }
+
if (inst->id >= 0 && inst->id < core->supported_instance_count)
goto exit;
@@ -450,7 +474,7 @@ int vpu_inst_unregister(struct vpu_inst *inst)
vpu_core_release_instance(core, inst->id);
inst->id = VPU_INST_NULL_ID;
}
- vpu_core_check_hang(core);
+ vpu_core_update_state(core);
if (core->state == VPU_CORE_HANG && !core->instance_mask) {
int err;
@@ -459,7 +483,7 @@ int vpu_inst_unregister(struct vpu_inst *inst)
err = vpu_core_sw_reset(core);
mutex_lock(&core->lock);
if (!err) {
- core->state = VPU_CORE_ACTIVE;
+ vpu_core_set_state(core, VPU_CORE_ACTIVE);
core->hang_mask = 0;
}
}
@@ -609,7 +633,7 @@ static int vpu_core_probe(struct platform_device *pdev)
mutex_init(&core->cmd_lock);
init_completion(&core->cmp);
init_waitqueue_head(&core->ack_wq);
- core->state = VPU_CORE_DEINIT;
+ vpu_core_set_state(core, VPU_CORE_DEINIT);
core->res = of_device_get_match_data(dev);
if (!core->res)
@@ -758,33 +782,18 @@ static int __maybe_unused vpu_core_resume(struct device *dev)
mutex_lock(&core->lock);
pm_runtime_resume_and_get(dev);
vpu_core_get_vpu(core);
- if (core->state != VPU_CORE_SNAPSHOT)
- goto exit;
- if (!vpu_iface_get_power_state(core)) {
- if (!list_empty(&core->instances)) {
+ if (core->request_count) {
+ if (!vpu_iface_get_power_state(core))
ret = vpu_core_boot(core, false);
- if (ret) {
- dev_err(core->dev, "%s boot fail\n", __func__);
- core->state = VPU_CORE_DEINIT;
- goto exit;
- }
- } else {
- core->state = VPU_CORE_DEINIT;
- }
- } else {
- if (!list_empty(&core->instances)) {
+ else
ret = vpu_core_sw_reset(core);
- if (ret) {
- dev_err(core->dev, "%s sw_reset fail\n", __func__);
- core->state = VPU_CORE_HANG;
- goto exit;
- }
+ if (ret) {
+ dev_err(core->dev, "resume fail\n");
+ vpu_core_set_state(core, VPU_CORE_HANG);
}
- core->state = VPU_CORE_ACTIVE;
}
-
-exit:
+ vpu_core_update_state(core);
pm_runtime_put_sync(dev);
mutex_unlock(&core->lock);
@@ -798,18 +807,11 @@ static int __maybe_unused vpu_core_suspend(struct device *dev)
int ret = 0;
mutex_lock(&core->lock);
- if (core->state == VPU_CORE_ACTIVE) {
- if (!list_empty(&core->instances)) {
- ret = vpu_core_snapshot(core);
- if (ret) {
- mutex_unlock(&core->lock);
- return ret;
- }
- }
-
- core->state = VPU_CORE_SNAPSHOT;
- }
+ if (core->request_count)
+ ret = vpu_core_snapshot(core);
mutex_unlock(&core->lock);
+ if (ret)
+ return ret;
vpu_core_cancel_work(core);
diff --git a/drivers/media/platform/amphion/vpu_core.h b/drivers/media/platform/amphion/vpu_core.h
index 00a662997da4..65b562642603 100644
--- a/drivers/media/platform/amphion/vpu_core.h
+++ b/drivers/media/platform/amphion/vpu_core.h
@@ -11,5 +11,6 @@ u32 csr_readl(struct vpu_core *core, u32 reg);
int vpu_alloc_dma(struct vpu_core *core, struct vpu_buffer *buf);
void vpu_free_dma(struct vpu_buffer *buf);
struct vpu_inst *vpu_core_find_instance(struct vpu_core *core, u32 index);
+void vpu_core_set_state(struct vpu_core *core, enum vpu_core_state state);
#endif
diff --git a/drivers/media/platform/amphion/vpu_dbg.c b/drivers/media/platform/amphion/vpu_dbg.c
index f72c8a506b22..260f1c4b8f8d 100644
--- a/drivers/media/platform/amphion/vpu_dbg.c
+++ b/drivers/media/platform/amphion/vpu_dbg.c
@@ -15,6 +15,7 @@
#include <linux/debugfs.h>
#include "vpu.h"
#include "vpu_defs.h"
+#include "vpu_core.h"
#include "vpu_helpers.h"
#include "vpu_cmds.h"
#include "vpu_rpc.h"
@@ -233,6 +234,10 @@ static int vpu_dbg_core(struct seq_file *s, void *data)
if (seq_write(s, str, num))
return 0;
+ num = scnprintf(str, sizeof(str), "power %s\n",
+ vpu_iface_get_power_state(core) ? "on" : "off");
+ if (seq_write(s, str, num))
+ return 0;
num = scnprintf(str, sizeof(str), "state = %d\n", core->state);
if (seq_write(s, str, num))
return 0;
@@ -346,10 +351,10 @@ static ssize_t vpu_dbg_core_write(struct file *file,
pm_runtime_resume_and_get(core->dev);
mutex_lock(&core->lock);
- if (core->state != VPU_CORE_DEINIT && !core->instance_mask) {
+ if (vpu_iface_get_power_state(core) && !core->request_count) {
dev_info(core->dev, "reset\n");
if (!vpu_core_sw_reset(core)) {
- core->state = VPU_CORE_ACTIVE;
+ vpu_core_set_state(core, VPU_CORE_ACTIVE);
core->hang_mask = 0;
}
}
diff --git a/drivers/media/platform/amphion/vpu_malone.c b/drivers/media/platform/amphion/vpu_malone.c
index f4a488bf9880..51e0702f9ae1 100644
--- a/drivers/media/platform/amphion/vpu_malone.c
+++ b/drivers/media/platform/amphion/vpu_malone.c
@@ -1293,7 +1293,7 @@ static int vpu_malone_insert_scode_vc1_g_pic(struct malone_scode_t *scode)
vbuf = to_vb2_v4l2_buffer(scode->vb);
data = vb2_plane_vaddr(scode->vb, 0);
- if (vbuf->sequence == 0 || vpu_vb_is_codecconfig(vbuf))
+ if (scode->inst->total_input_count == 0 || vpu_vb_is_codecconfig(vbuf))
return 0;
if (MALONE_VC1_CONTAIN_NAL(*data))
return 0;
diff --git a/drivers/media/platform/intel/pxa_camera.c b/drivers/media/platform/intel/pxa_camera.c
index 35145e3348f0..54270d6b6f50 100644
--- a/drivers/media/platform/intel/pxa_camera.c
+++ b/drivers/media/platform/intel/pxa_camera.c
@@ -854,7 +854,7 @@ fail:
return -ENOMEM;
}
-static void pxa_videobuf_set_actdma(struct pxa_camera_dev *pcdev,
+static void pxa_video_buf_set_actdma(struct pxa_camera_dev *pcdev,
struct pxa_buffer *buf)
{
buf->active_dma = DMA_Y;
@@ -973,7 +973,7 @@ static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev,
* stopped. This means the tailed buffer would never be transferred by DMA.
* This function restarts the capture for this corner case, where :
* - DADR() == DADDR_STOP
- * - a videobuffer is queued on the pcdev->capture list
+ * - a video buffer is queued on the pcdev->capture list
*
* Please check the "DMA hot chaining timeslice issue" in
* Documentation/driver-api/media/drivers/pxa_camera.rst
@@ -1163,7 +1163,7 @@ static void pxa_camera_eof(struct tasklet_struct *t)
pcdev->active = list_first_entry(&pcdev->capture,
struct pxa_buffer, queue);
buf = pcdev->active;
- pxa_videobuf_set_actdma(pcdev, buf);
+ pxa_video_buf_set_actdma(pcdev, buf);
pxa_dma_start_channels(pcdev);
}
@@ -1416,7 +1416,7 @@ static int pxac_vb2_prepare(struct vb2_buffer *vb)
* the actual buffer is yours
*/
buf->inwork = 0;
- pxa_videobuf_set_actdma(pcdev, buf);
+ pxa_video_buf_set_actdma(pcdev, buf);
return ret;
}
diff --git a/drivers/media/platform/marvell/mcam-core.h b/drivers/media/platform/marvell/mcam-core.h
index f324d808d737..51e66db45af6 100644
--- a/drivers/media/platform/marvell/mcam-core.h
+++ b/drivers/media/platform/marvell/mcam-core.h
@@ -32,7 +32,7 @@
#if !defined(MCAM_MODE_VMALLOC) && !defined(MCAM_MODE_DMA_CONTIG) && \
!defined(MCAM_MODE_DMA_SG)
-#error One of the videobuf buffer modes must be selected in the config
+#error One of the vb2 buffer modes must be selected in the config
#endif
diff --git a/drivers/media/platform/mediatek/Kconfig b/drivers/media/platform/mediatek/Kconfig
index af47d9888552..84104e2cd024 100644
--- a/drivers/media/platform/mediatek/Kconfig
+++ b/drivers/media/platform/mediatek/Kconfig
@@ -6,3 +6,4 @@ source "drivers/media/platform/mediatek/jpeg/Kconfig"
source "drivers/media/platform/mediatek/mdp/Kconfig"
source "drivers/media/platform/mediatek/vcodec/Kconfig"
source "drivers/media/platform/mediatek/vpu/Kconfig"
+source "drivers/media/platform/mediatek/mdp3/Kconfig"
diff --git a/drivers/media/platform/mediatek/Makefile b/drivers/media/platform/mediatek/Makefile
index d3850a13f128..38e6ba917fe5 100644
--- a/drivers/media/platform/mediatek/Makefile
+++ b/drivers/media/platform/mediatek/Makefile
@@ -3,3 +3,4 @@ obj-y += jpeg/
obj-y += mdp/
obj-y += vcodec/
obj-y += vpu/
+obj-y += mdp3/
diff --git a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
index 87685a62a5c2..3071b61946c3 100644
--- a/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mediatek/jpeg/mtk_jpeg_core.c
@@ -1414,7 +1414,6 @@ static int mtk_jpeg_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
video_unregister_device(jpeg->vdev);
- video_device_release(jpeg->vdev);
v4l2_m2m_release(jpeg->m2m_dev);
v4l2_device_unregister(&jpeg->v4l2_dev);
diff --git a/drivers/media/platform/mediatek/mdp3/Kconfig b/drivers/media/platform/mediatek/mdp3/Kconfig
new file mode 100644
index 000000000000..50ae07b75b5f
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/Kconfig
@@ -0,0 +1,21 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VIDEO_MEDIATEK_MDP3
+ tristate "MediaTek MDP v3 driver"
+ depends on MTK_IOMMU || COMPILE_TEST
+ depends on VIDEO_DEV
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ depends on HAS_DMA
+ depends on REMOTEPROC
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_MEM2MEM_DEV
+ select MTK_MMSYS
+ select VIDEO_MEDIATEK_VPU
+ select MTK_CMDQ
+ select MTK_SCP
+ default n
+ help
+ It is a v4l2 driver and present in MediaTek MT8183 SoC.
+ The driver supports scaling and color space conversion.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mtk-mdp3.
diff --git a/drivers/media/platform/mediatek/mdp3/Makefile b/drivers/media/platform/mediatek/mdp3/Makefile
new file mode 100644
index 000000000000..63e6c87e480b
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+mtk-mdp3-y += mtk-mdp3-core.o mtk-mdp3-vpu.o mtk-mdp3-regs.o
+mtk-mdp3-y += mtk-mdp3-m2m.o
+mtk-mdp3-y += mtk-mdp3-comp.o mtk-mdp3-cmdq.o
+
+obj-$(CONFIG_VIDEO_MEDIATEK_MDP3) += mtk-mdp3.o
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_ccorr.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_ccorr.h
new file mode 100644
index 000000000000..3b2c6531c194
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_ccorr.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MDP_REG_CCORR_H__
+#define __MDP_REG_CCORR_H__
+
+#define MDP_CCORR_EN 0x000
+#define MDP_CCORR_CFG 0x020
+#define MDP_CCORR_SIZE 0x030
+
+/* MASK */
+#define MDP_CCORR_EN_MASK 0x00000001
+#define MDP_CCORR_CFG_MASK 0x70001317
+#define MDP_CCORR_SIZE_MASK 0x1fff1fff
+
+#endif // __MDP_REG_CCORR_H__
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_rdma.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_rdma.h
new file mode 100644
index 000000000000..be4065e252d3
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_rdma.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MDP_REG_RDMA_H__
+#define __MDP_REG_RDMA_H__
+
+#define MDP_RDMA_EN 0x000
+#define MDP_RDMA_RESET 0x008
+#define MDP_RDMA_CON 0x020
+#define MDP_RDMA_GMCIF_CON 0x028
+#define MDP_RDMA_SRC_CON 0x030
+#define MDP_RDMA_MF_BKGD_SIZE_IN_BYTE 0x060
+#define MDP_RDMA_MF_BKGD_SIZE_IN_PXL 0x068
+#define MDP_RDMA_MF_SRC_SIZE 0x070
+#define MDP_RDMA_MF_CLIP_SIZE 0x078
+#define MDP_RDMA_MF_OFFSET_1 0x080
+#define MDP_RDMA_SF_BKGD_SIZE_IN_BYTE 0x090
+#define MDP_RDMA_SRC_END_0 0x100
+#define MDP_RDMA_SRC_END_1 0x108
+#define MDP_RDMA_SRC_END_2 0x110
+#define MDP_RDMA_SRC_OFFSET_0 0x118
+#define MDP_RDMA_SRC_OFFSET_1 0x120
+#define MDP_RDMA_SRC_OFFSET_2 0x128
+#define MDP_RDMA_SRC_OFFSET_0_P 0x148
+#define MDP_RDMA_TRANSFORM_0 0x200
+#define MDP_RDMA_RESV_DUMMY_0 0x2a0
+#define MDP_RDMA_MON_STA_1 0x408
+#define MDP_RDMA_SRC_BASE_0 0xf00
+#define MDP_RDMA_SRC_BASE_1 0xf08
+#define MDP_RDMA_SRC_BASE_2 0xf10
+#define MDP_RDMA_UFO_DEC_LENGTH_BASE_Y 0xf20
+#define MDP_RDMA_UFO_DEC_LENGTH_BASE_C 0xf28
+
+/* MASK */
+#define MDP_RDMA_EN_MASK 0x00000001
+#define MDP_RDMA_RESET_MASK 0x00000001
+#define MDP_RDMA_CON_MASK 0x00001110
+#define MDP_RDMA_GMCIF_CON_MASK 0xfffb3771
+#define MDP_RDMA_SRC_CON_MASK 0xf3ffffff
+#define MDP_RDMA_MF_BKGD_SIZE_IN_BYTE_MASK 0x001fffff
+#define MDP_RDMA_MF_BKGD_SIZE_IN_PXL_MASK 0x001fffff
+#define MDP_RDMA_MF_SRC_SIZE_MASK 0x1fff1fff
+#define MDP_RDMA_MF_CLIP_SIZE_MASK 0x1fff1fff
+#define MDP_RDMA_MF_OFFSET_1_MASK 0x003f001f
+#define MDP_RDMA_SF_BKGD_SIZE_IN_BYTE_MASK 0x001fffff
+#define MDP_RDMA_SRC_END_0_MASK 0xffffffff
+#define MDP_RDMA_SRC_END_1_MASK 0xffffffff
+#define MDP_RDMA_SRC_END_2_MASK 0xffffffff
+#define MDP_RDMA_SRC_OFFSET_0_MASK 0xffffffff
+#define MDP_RDMA_SRC_OFFSET_1_MASK 0xffffffff
+#define MDP_RDMA_SRC_OFFSET_2_MASK 0xffffffff
+#define MDP_RDMA_SRC_OFFSET_0_P_MASK 0xffffffff
+#define MDP_RDMA_TRANSFORM_0_MASK 0xff110777
+#define MDP_RDMA_RESV_DUMMY_0_MASK 0xffffffff
+#define MDP_RDMA_MON_STA_1_MASK 0xffffffff
+#define MDP_RDMA_SRC_BASE_0_MASK 0xffffffff
+#define MDP_RDMA_SRC_BASE_1_MASK 0xffffffff
+#define MDP_RDMA_SRC_BASE_2_MASK 0xffffffff
+#define MDP_RDMA_UFO_DEC_LENGTH_BASE_Y_MASK 0xffffffff
+#define MDP_RDMA_UFO_DEC_LENGTH_BASE_C_MASK 0xffffffff
+
+#endif // __MDP_REG_RDMA_H__
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_rsz.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_rsz.h
new file mode 100644
index 000000000000..484f6d60641f
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_rsz.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MDP_REG_RSZ_H__
+#define __MDP_REG_RSZ_H__
+
+#define PRZ_ENABLE 0x000
+#define PRZ_CONTROL_1 0x004
+#define PRZ_CONTROL_2 0x008
+#define PRZ_INPUT_IMAGE 0x010
+#define PRZ_OUTPUT_IMAGE 0x014
+#define PRZ_HORIZONTAL_COEFF_STEP 0x018
+#define PRZ_VERTICAL_COEFF_STEP 0x01c
+#define PRZ_LUMA_HORIZONTAL_INTEGER_OFFSET 0x020
+#define PRZ_LUMA_HORIZONTAL_SUBPIXEL_OFFSET 0x024
+#define PRZ_LUMA_VERTICAL_INTEGER_OFFSET 0x028
+#define PRZ_LUMA_VERTICAL_SUBPIXEL_OFFSET 0x02c
+#define PRZ_CHROMA_HORIZONTAL_INTEGER_OFFSET 0x030
+#define PRZ_CHROMA_HORIZONTAL_SUBPIXEL_OFFSET 0x034
+
+/* MASK */
+#define PRZ_ENABLE_MASK 0x00010001
+#define PRZ_CONTROL_1_MASK 0xfffffff3
+#define PRZ_CONTROL_2_MASK 0x0ffffaff
+#define PRZ_INPUT_IMAGE_MASK 0xffffffff
+#define PRZ_OUTPUT_IMAGE_MASK 0xffffffff
+#define PRZ_HORIZONTAL_COEFF_STEP_MASK 0x007fffff
+#define PRZ_VERTICAL_COEFF_STEP_MASK 0x007fffff
+#define PRZ_LUMA_HORIZONTAL_INTEGER_OFFSET_MASK 0x0000ffff
+#define PRZ_LUMA_HORIZONTAL_SUBPIXEL_OFFSET_MASK 0x001fffff
+#define PRZ_LUMA_VERTICAL_INTEGER_OFFSET_MASK 0x0000ffff
+#define PRZ_LUMA_VERTICAL_SUBPIXEL_OFFSET_MASK 0x001fffff
+#define PRZ_CHROMA_HORIZONTAL_INTEGER_OFFSET_MASK 0x0000ffff
+#define PRZ_CHROMA_HORIZONTAL_SUBPIXEL_OFFSET_MASK 0x001fffff
+
+#endif // __MDP_REG_RSZ_H__
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_wdma.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_wdma.h
new file mode 100644
index 000000000000..0280e91c09e4
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_wdma.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MDP_REG_WDMA_H__
+#define __MDP_REG_WDMA_H__
+
+#define WDMA_EN 0x008
+#define WDMA_RST 0x00c
+#define WDMA_CFG 0x014
+#define WDMA_SRC_SIZE 0x018
+#define WDMA_CLIP_SIZE 0x01c
+#define WDMA_CLIP_COORD 0x020
+#define WDMA_DST_W_IN_BYTE 0x028
+#define WDMA_ALPHA 0x02c
+#define WDMA_BUF_CON2 0x03c
+#define WDMA_DST_UV_PITCH 0x078
+#define WDMA_DST_ADDR_OFFSET 0x080
+#define WDMA_DST_U_ADDR_OFFSET 0x084
+#define WDMA_DST_V_ADDR_OFFSET 0x088
+#define WDMA_FLOW_CTRL_DBG 0x0a0
+#define WDMA_DST_ADDR 0xf00
+#define WDMA_DST_U_ADDR 0xf04
+#define WDMA_DST_V_ADDR 0xf08
+
+/* MASK */
+#define WDMA_EN_MASK 0x00000001
+#define WDMA_RST_MASK 0x00000001
+#define WDMA_CFG_MASK 0xff03bff0
+#define WDMA_SRC_SIZE_MASK 0x3fff3fff
+#define WDMA_CLIP_SIZE_MASK 0x3fff3fff
+#define WDMA_CLIP_COORD_MASK 0x3fff3fff
+#define WDMA_DST_W_IN_BYTE_MASK 0x0000ffff
+#define WDMA_ALPHA_MASK 0x800000ff
+#define WDMA_BUF_CON2_MASK 0xffffffff
+#define WDMA_DST_UV_PITCH_MASK 0x0000ffff
+#define WDMA_DST_ADDR_OFFSET_MASK 0x0fffffff
+#define WDMA_DST_U_ADDR_OFFSET_MASK 0x0fffffff
+#define WDMA_DST_V_ADDR_OFFSET_MASK 0x0fffffff
+#define WDMA_FLOW_CTRL_DBG_MASK 0x0000f3ff
+#define WDMA_DST_ADDR_MASK 0xffffffff
+#define WDMA_DST_U_ADDR_MASK 0xffffffff
+#define WDMA_DST_V_ADDR_MASK 0xffffffff
+
+#endif // __MDP_REG_WDMA_H__
diff --git a/drivers/media/platform/mediatek/mdp3/mdp_reg_wrot.h b/drivers/media/platform/mediatek/mdp3/mdp_reg_wrot.h
new file mode 100644
index 000000000000..6d3ff0e2b672
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mdp_reg_wrot.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MDP_REG_WROT_H__
+#define __MDP_REG_WROT_H__
+
+#define VIDO_CTRL 0x000
+#define VIDO_MAIN_BUF_SIZE 0x008
+#define VIDO_SOFT_RST 0x010
+#define VIDO_SOFT_RST_STAT 0x014
+#define VIDO_CROP_OFST 0x020
+#define VIDO_TAR_SIZE 0x024
+#define VIDO_OFST_ADDR 0x02c
+#define VIDO_STRIDE 0x030
+#define VIDO_OFST_ADDR_C 0x038
+#define VIDO_STRIDE_C 0x03c
+#define VIDO_DITHER 0x054
+#define VIDO_STRIDE_V 0x06c
+#define VIDO_OFST_ADDR_V 0x068
+#define VIDO_RSV_1 0x070
+#define VIDO_IN_SIZE 0x078
+#define VIDO_ROT_EN 0x07c
+#define VIDO_FIFO_TEST 0x080
+#define VIDO_MAT_CTRL 0x084
+#define VIDO_BASE_ADDR 0xf00
+#define VIDO_BASE_ADDR_C 0xf04
+#define VIDO_BASE_ADDR_V 0xf08
+
+/* MASK */
+#define VIDO_CTRL_MASK 0xf530711f
+#define VIDO_MAIN_BUF_SIZE_MASK 0x1fff7f77
+#define VIDO_SOFT_RST_MASK 0x00000001
+#define VIDO_SOFT_RST_STAT_MASK 0x00000001
+#define VIDO_TAR_SIZE_MASK 0x1fff1fff
+#define VIDO_CROP_OFST_MASK 0x1fff1fff
+#define VIDO_OFST_ADDR_MASK 0x0fffffff
+#define VIDO_STRIDE_MASK 0x0000ffff
+#define VIDO_OFST_ADDR_C_MASK 0x0fffffff
+#define VIDO_STRIDE_C_MASK 0x0000ffff
+#define VIDO_DITHER_MASK 0xff000001
+#define VIDO_STRIDE_V_MASK 0x0000ffff
+#define VIDO_OFST_ADDR_V_MASK 0x0fffffff
+#define VIDO_RSV_1_MASK 0xffffffff
+#define VIDO_IN_SIZE_MASK 0x1fff1fff
+#define VIDO_ROT_EN_MASK 0x00000001
+#define VIDO_FIFO_TEST_MASK 0x00000fff
+#define VIDO_MAT_CTRL_MASK 0x000000f3
+#define VIDO_BASE_ADDR_MASK 0xffffffff
+#define VIDO_BASE_ADDR_C_MASK 0xffffffff
+#define VIDO_BASE_ADDR_V_MASK 0xffffffff
+
+#endif // __MDP_REG_WROT_H__
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-img-ipi.h b/drivers/media/platform/mediatek/mdp3/mtk-img-ipi.h
new file mode 100644
index 000000000000..3e66ebaee2da
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-img-ipi.h
@@ -0,0 +1,290 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Holmes Chiou <holmes.chiou@mediatek.com>
+ * Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MTK_IMG_IPI_H__
+#define __MTK_IMG_IPI_H__
+
+#include <linux/types.h>
+
+/*
+ * ISP-MDP generic input information
+ * MD5 of the target SCP blob:
+ * 6da52bdcf4bf76a0983b313e1d4745d6
+ */
+
+#define IMG_MAX_HW_INPUTS 3
+
+#define IMG_MAX_HW_OUTPUTS 4
+
+#define IMG_MAX_PLANES 3
+
+#define IMG_IPI_INIT 1
+#define IMG_IPI_DEINIT 2
+#define IMG_IPI_FRAME 3
+#define IMG_IPI_DEBUG 4
+
+struct img_timeval {
+ u32 tv_sec;
+ u32 tv_usec;
+} __packed;
+
+struct img_addr {
+ u64 va; /* Used for Linux OS access */
+ u32 pa; /* Used for CM4 access */
+ u32 iova; /* Used for IOMMU HW access */
+} __packed;
+
+struct tuning_addr {
+ u64 present;
+ u32 pa; /* Used for CM4 access */
+ u32 iova; /* Used for IOMMU HW access */
+} __packed;
+
+struct img_sw_addr {
+ u64 va; /* Used for APMCU access */
+ u32 pa; /* Used for CM4 access */
+} __packed;
+
+struct img_plane_format {
+ u32 size;
+ u16 stride;
+} __packed;
+
+struct img_pix_format {
+ u16 width;
+ u16 height;
+ u32 colorformat; /* enum mdp_color */
+ u16 ycbcr_prof; /* enum mdp_ycbcr_profile */
+ struct img_plane_format plane_fmt[IMG_MAX_PLANES];
+} __packed;
+
+struct img_image_buffer {
+ struct img_pix_format format;
+ u32 iova[IMG_MAX_PLANES];
+ /* enum mdp_buffer_usage, FD or advanced ISP usages */
+ u32 usage;
+} __packed;
+
+#define IMG_SUBPIXEL_SHIFT 20
+
+struct img_crop {
+ s16 left;
+ s16 top;
+ u16 width;
+ u16 height;
+ u32 left_subpix;
+ u32 top_subpix;
+ u32 width_subpix;
+ u32 height_subpix;
+} __packed;
+
+#define IMG_CTRL_FLAG_HFLIP BIT(0)
+#define IMG_CTRL_FLAG_DITHER BIT(1)
+#define IMG_CTRL_FLAG_SHARPNESS BIT(4)
+#define IMG_CTRL_FLAG_HDR BIT(5)
+#define IMG_CTRL_FLAG_DRE BIT(6)
+
+struct img_input {
+ struct img_image_buffer buffer;
+ u16 flags; /* HDR, DRE, dither */
+} __packed;
+
+struct img_output {
+ struct img_image_buffer buffer;
+ struct img_crop crop;
+ s16 rotation;
+ u16 flags; /* H-flip, sharpness, dither */
+} __packed;
+
+struct img_ipi_frameparam {
+ u32 index;
+ u32 frame_no;
+ struct img_timeval timestamp;
+ u8 type; /* enum mdp_stream_type */
+ u8 state;
+ u8 num_inputs;
+ u8 num_outputs;
+ u64 drv_data;
+ struct img_input inputs[IMG_MAX_HW_INPUTS];
+ struct img_output outputs[IMG_MAX_HW_OUTPUTS];
+ struct tuning_addr tuning_data;
+ struct img_addr subfrm_data;
+ struct img_sw_addr config_data;
+ struct img_sw_addr self_data;
+} __packed;
+
+struct img_sw_buffer {
+ u64 handle; /* Used for APMCU access */
+ u32 scp_addr; /* Used for CM4 access */
+} __packed;
+
+struct img_ipi_param {
+ u8 usage;
+ struct img_sw_buffer frm_param;
+} __packed;
+
+struct img_frameparam {
+ struct list_head list_entry;
+ struct img_ipi_frameparam frameparam;
+};
+
+/* ISP-MDP generic output information */
+
+struct img_comp_frame {
+ u32 output_disable:1;
+ u32 bypass:1;
+ u16 in_width;
+ u16 in_height;
+ u16 out_width;
+ u16 out_height;
+ struct img_crop crop;
+ u16 in_total_width;
+ u16 out_total_width;
+} __packed;
+
+struct img_region {
+ s16 left;
+ s16 right;
+ s16 top;
+ s16 bottom;
+} __packed;
+
+struct img_offset {
+ s16 left;
+ s16 top;
+ u32 left_subpix;
+ u32 top_subpix;
+} __packed;
+
+struct img_comp_subfrm {
+ u32 tile_disable:1;
+ struct img_region in;
+ struct img_region out;
+ struct img_offset luma;
+ struct img_offset chroma;
+ s16 out_vertical; /* Output vertical index */
+ s16 out_horizontal; /* Output horizontal index */
+} __packed;
+
+#define IMG_MAX_SUBFRAMES 14
+
+struct mdp_rdma_subfrm {
+ u32 offset[IMG_MAX_PLANES];
+ u32 offset_0_p;
+ u32 src;
+ u32 clip;
+ u32 clip_ofst;
+} __packed;
+
+struct mdp_rdma_data {
+ u32 src_ctrl;
+ u32 control;
+ u32 iova[IMG_MAX_PLANES];
+ u32 iova_end[IMG_MAX_PLANES];
+ u32 mf_bkgd;
+ u32 mf_bkgd_in_pxl;
+ u32 sf_bkgd;
+ u32 ufo_dec_y;
+ u32 ufo_dec_c;
+ u32 transform;
+ struct mdp_rdma_subfrm subfrms[IMG_MAX_SUBFRAMES];
+} __packed;
+
+struct mdp_rsz_subfrm {
+ u32 control2;
+ u32 src;
+ u32 clip;
+} __packed;
+
+struct mdp_rsz_data {
+ u32 coeff_step_x;
+ u32 coeff_step_y;
+ u32 control1;
+ u32 control2;
+ struct mdp_rsz_subfrm subfrms[IMG_MAX_SUBFRAMES];
+} __packed;
+
+struct mdp_wrot_subfrm {
+ u32 offset[IMG_MAX_PLANES];
+ u32 src;
+ u32 clip;
+ u32 clip_ofst;
+ u32 main_buf;
+} __packed;
+
+struct mdp_wrot_data {
+ u32 iova[IMG_MAX_PLANES];
+ u32 control;
+ u32 stride[IMG_MAX_PLANES];
+ u32 mat_ctrl;
+ u32 fifo_test;
+ u32 filter;
+ struct mdp_wrot_subfrm subfrms[IMG_MAX_SUBFRAMES];
+} __packed;
+
+struct mdp_wdma_subfrm {
+ u32 offset[IMG_MAX_PLANES];
+ u32 src;
+ u32 clip;
+ u32 clip_ofst;
+} __packed;
+
+struct mdp_wdma_data {
+ u32 wdma_cfg;
+ u32 iova[IMG_MAX_PLANES];
+ u32 w_in_byte;
+ u32 uv_stride;
+ struct mdp_wdma_subfrm subfrms[IMG_MAX_SUBFRAMES];
+} __packed;
+
+struct isp_data {
+ u64 dl_flags; /* 1 << (enum mdp_comp_type) */
+ u32 smxi_iova[4];
+ u32 cq_idx;
+ u32 cq_iova;
+ u32 tpipe_iova[IMG_MAX_SUBFRAMES];
+} __packed;
+
+struct img_compparam {
+ u16 type; /* enum mdp_comp_type */
+ u16 id; /* enum mtk_mdp_comp_id */
+ u32 input;
+ u32 outputs[IMG_MAX_HW_OUTPUTS];
+ u32 num_outputs;
+ struct img_comp_frame frame;
+ struct img_comp_subfrm subfrms[IMG_MAX_SUBFRAMES];
+ u32 num_subfrms;
+ union {
+ struct mdp_rdma_data rdma;
+ struct mdp_rsz_data rsz;
+ struct mdp_wrot_data wrot;
+ struct mdp_wdma_data wdma;
+ struct isp_data isp;
+ };
+} __packed;
+
+#define IMG_MAX_COMPONENTS 20
+
+struct img_mux {
+ u32 reg;
+ u32 value;
+ u32 subsys_id;
+};
+
+struct img_mmsys_ctrl {
+ struct img_mux sets[IMG_MAX_COMPONENTS * 2];
+ u32 num_sets;
+};
+
+struct img_config {
+ struct img_compparam components[IMG_MAX_COMPONENTS];
+ u32 num_components;
+ struct img_mmsys_ctrl ctrls[IMG_MAX_SUBFRAMES];
+ u32 num_subfrms;
+} __packed;
+
+#endif /* __MTK_IMG_IPI_H__ */
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
new file mode 100644
index 000000000000..29f6c1cd3de7
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#include <linux/mailbox_controller.h>
+#include <linux/platform_device.h>
+#include "mtk-mdp3-cmdq.h"
+#include "mtk-mdp3-comp.h"
+#include "mtk-mdp3-core.h"
+#include "mtk-mdp3-m2m.h"
+
+#define MDP_PATH_MAX_COMPS IMG_MAX_COMPONENTS
+
+struct mdp_path {
+ struct mdp_dev *mdp_dev;
+ struct mdp_comp_ctx comps[MDP_PATH_MAX_COMPS];
+ u32 num_comps;
+ const struct img_config *config;
+ const struct img_ipi_frameparam *param;
+ const struct v4l2_rect *composes[IMG_MAX_HW_OUTPUTS];
+ struct v4l2_rect bounds[IMG_MAX_HW_OUTPUTS];
+};
+
+#define has_op(ctx, op) \
+ ((ctx)->comp->ops && (ctx)->comp->ops->op)
+ #define call_op(ctx, op, ...) \
+ (has_op(ctx, op) ? (ctx)->comp->ops->op(ctx, ##__VA_ARGS__) : 0)
+
+static bool is_output_disabled(const struct img_compparam *param, u32 count)
+{
+ return (count < param->num_subfrms) ?
+ (param->frame.output_disable ||
+ param->subfrms[count].tile_disable) :
+ true;
+}
+
+static int mdp_path_subfrm_require(const struct mdp_path *path,
+ struct mdp_cmdq_cmd *cmd,
+ s32 *mutex_id, u32 count)
+{
+ const struct img_config *config = path->config;
+ const struct mdp_comp_ctx *ctx;
+ const struct mtk_mdp_driver_data *data = path->mdp_dev->mdp_data;
+ struct device *dev = &path->mdp_dev->pdev->dev;
+ struct mtk_mutex **mutex = path->mdp_dev->mdp_mutex;
+ int id, index;
+
+ /* Decide which mutex to use based on the current pipeline */
+ switch (path->comps[0].comp->id) {
+ case MDP_COMP_RDMA0:
+ *mutex_id = MDP_PIPE_RDMA0;
+ break;
+ case MDP_COMP_ISP_IMGI:
+ *mutex_id = MDP_PIPE_IMGI;
+ break;
+ case MDP_COMP_WPEI:
+ *mutex_id = MDP_PIPE_WPEI;
+ break;
+ case MDP_COMP_WPEI2:
+ *mutex_id = MDP_PIPE_WPEI2;
+ break;
+ default:
+ dev_err(dev, "Unknown pipeline and no mutex is assigned");
+ return -EINVAL;
+ }
+
+ /* Set mutex mod */
+ for (index = 0; index < config->num_components; index++) {
+ ctx = &path->comps[index];
+ if (is_output_disabled(ctx->param, count))
+ continue;
+ id = ctx->comp->id;
+ mtk_mutex_write_mod(mutex[*mutex_id],
+ data->mdp_mutex_table_idx[id], false);
+ }
+
+ mtk_mutex_write_sof(mutex[*mutex_id],
+ MUTEX_SOF_IDX_SINGLE_MODE);
+
+ return 0;
+}
+
+static int mdp_path_subfrm_run(const struct mdp_path *path,
+ struct mdp_cmdq_cmd *cmd,
+ s32 *mutex_id, u32 count)
+{
+ const struct img_config *config = path->config;
+ const struct mdp_comp_ctx *ctx;
+ struct device *dev = &path->mdp_dev->pdev->dev;
+ struct mtk_mutex **mutex = path->mdp_dev->mdp_mutex;
+ int index;
+ s32 event;
+
+ if (-1 == *mutex_id) {
+ dev_err(dev, "Incorrect mutex id");
+ return -EINVAL;
+ }
+
+ /* Wait WROT SRAM shared to DISP RDMA */
+ /* Clear SOF event for each engine */
+ for (index = 0; index < config->num_components; index++) {
+ ctx = &path->comps[index];
+ if (is_output_disabled(ctx->param, count))
+ continue;
+ event = ctx->comp->gce_event[MDP_GCE_EVENT_SOF];
+ if (event != MDP_GCE_NO_EVENT)
+ MM_REG_CLEAR(cmd, event);
+ }
+
+ /* Enable the mutex */
+ mtk_mutex_enable_by_cmdq(mutex[*mutex_id], (void *)&cmd->pkt);
+
+ /* Wait SOF events and clear mutex modules (optional) */
+ for (index = 0; index < config->num_components; index++) {
+ ctx = &path->comps[index];
+ if (is_output_disabled(ctx->param, count))
+ continue;
+ event = ctx->comp->gce_event[MDP_GCE_EVENT_SOF];
+ if (event != MDP_GCE_NO_EVENT)
+ MM_REG_WAIT(cmd, event);
+ }
+
+ return 0;
+}
+
+static int mdp_path_ctx_init(struct mdp_dev *mdp, struct mdp_path *path)
+{
+ const struct img_config *config = path->config;
+ int index, ret;
+
+ if (config->num_components < 1)
+ return -EINVAL;
+
+ for (index = 0; index < config->num_components; index++) {
+ ret = mdp_comp_ctx_config(mdp, &path->comps[index],
+ &config->components[index],
+ path->param);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mdp_path_config_subfrm(struct mdp_cmdq_cmd *cmd,
+ struct mdp_path *path, u32 count)
+{
+ const struct img_config *config = path->config;
+ const struct img_mmsys_ctrl *ctrl = &config->ctrls[count];
+ const struct img_mux *set;
+ struct mdp_comp_ctx *ctx;
+ s32 mutex_id;
+ int index, ret;
+
+ /* Acquire components */
+ ret = mdp_path_subfrm_require(path, cmd, &mutex_id, count);
+ if (ret)
+ return ret;
+ /* Enable mux settings */
+ for (index = 0; index < ctrl->num_sets; index++) {
+ set = &ctrl->sets[index];
+ cmdq_pkt_write_mask(&cmd->pkt, set->subsys_id, set->reg,
+ set->value, 0xFFFFFFFF);
+ }
+ /* Config sub-frame information */
+ for (index = (config->num_components - 1); index >= 0; index--) {
+ ctx = &path->comps[index];
+ if (is_output_disabled(ctx->param, count))
+ continue;
+ ret = call_op(ctx, config_subfrm, cmd, count);
+ if (ret)
+ return ret;
+ }
+ /* Run components */
+ ret = mdp_path_subfrm_run(path, cmd, &mutex_id, count);
+ if (ret)
+ return ret;
+ /* Wait components done */
+ for (index = 0; index < config->num_components; index++) {
+ ctx = &path->comps[index];
+ if (is_output_disabled(ctx->param, count))
+ continue;
+ ret = call_op(ctx, wait_comp_event, cmd);
+ if (ret)
+ return ret;
+ }
+ /* Advance to the next sub-frame */
+ for (index = 0; index < config->num_components; index++) {
+ ctx = &path->comps[index];
+ ret = call_op(ctx, advance_subfrm, cmd, count);
+ if (ret)
+ return ret;
+ }
+ /* Disable mux settings */
+ for (index = 0; index < ctrl->num_sets; index++) {
+ set = &ctrl->sets[index];
+ cmdq_pkt_write_mask(&cmd->pkt, set->subsys_id, set->reg,
+ 0, 0xFFFFFFFF);
+ }
+
+ return 0;
+}
+
+static int mdp_path_config(struct mdp_dev *mdp, struct mdp_cmdq_cmd *cmd,
+ struct mdp_path *path)
+{
+ const struct img_config *config = path->config;
+ struct mdp_comp_ctx *ctx;
+ int index, count, ret;
+
+ /* Config path frame */
+ /* Reset components */
+ for (index = 0; index < config->num_components; index++) {
+ ctx = &path->comps[index];
+ ret = call_op(ctx, init_comp, cmd);
+ if (ret)
+ return ret;
+ }
+ /* Config frame mode */
+ for (index = 0; index < config->num_components; index++) {
+ const struct v4l2_rect *compose =
+ path->composes[ctx->param->outputs[0]];
+
+ ctx = &path->comps[index];
+ ret = call_op(ctx, config_frame, cmd, compose);
+ if (ret)
+ return ret;
+ }
+
+ /* Config path sub-frames */
+ for (count = 0; count < config->num_subfrms; count++) {
+ ret = mdp_path_config_subfrm(cmd, path, count);
+ if (ret)
+ return ret;
+ }
+ /* Post processing information */
+ for (index = 0; index < config->num_components; index++) {
+ ctx = &path->comps[index];
+ ret = call_op(ctx, post_process, cmd);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int mdp_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt,
+ size_t size)
+{
+ struct device *dev;
+ dma_addr_t dma_addr;
+
+ pkt->va_base = kzalloc(size, GFP_KERNEL);
+ if (!pkt->va_base) {
+ kfree(pkt);
+ return -ENOMEM;
+ }
+ pkt->buf_size = size;
+ pkt->cl = (void *)client;
+
+ dev = client->chan->mbox->dev;
+ dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
+ kfree(pkt->va_base);
+ return -ENOMEM;
+ }
+
+ pkt->pa_base = dma_addr;
+
+ return 0;
+}
+
+static void mdp_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
+{
+ struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
+
+ dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
+ DMA_TO_DEVICE);
+ kfree(pkt->va_base);
+ pkt->va_base = NULL;
+}
+
+static void mdp_auto_release_work(struct work_struct *work)
+{
+ struct mdp_cmdq_cmd *cmd;
+ struct mdp_dev *mdp;
+
+ cmd = container_of(work, struct mdp_cmdq_cmd, auto_release_work);
+ mdp = cmd->mdp;
+
+ mtk_mutex_unprepare(mdp->mdp_mutex[MDP_PIPE_RDMA0]);
+ mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps,
+ cmd->num_comps);
+
+ atomic_dec(&mdp->job_count);
+ wake_up(&mdp->callback_wq);
+
+ mdp_cmdq_pkt_destroy(&cmd->pkt);
+ kfree(cmd->comps);
+ cmd->comps = NULL;
+ kfree(cmd);
+ cmd = NULL;
+}
+
+static void mdp_handle_cmdq_callback(struct mbox_client *cl, void *mssg)
+{
+ struct mdp_cmdq_cmd *cmd;
+ struct cmdq_cb_data *data;
+ struct mdp_dev *mdp;
+ struct device *dev;
+
+ if (!mssg) {
+ pr_info("%s:no callback data\n", __func__);
+ return;
+ }
+
+ data = (struct cmdq_cb_data *)mssg;
+ cmd = container_of(data->pkt, struct mdp_cmdq_cmd, pkt);
+ mdp = cmd->mdp;
+ dev = &mdp->pdev->dev;
+
+ if (cmd->mdp_ctx)
+ mdp_m2m_job_finish(cmd->mdp_ctx);
+
+ if (cmd->user_cmdq_cb) {
+ struct cmdq_cb_data user_cb_data;
+
+ user_cb_data.sta = data->sta;
+ user_cb_data.pkt = data->pkt;
+ cmd->user_cmdq_cb(user_cb_data);
+ }
+
+ INIT_WORK(&cmd->auto_release_work, mdp_auto_release_work);
+ if (!queue_work(mdp->clock_wq, &cmd->auto_release_work)) {
+ dev_err(dev, "%s:queue_work fail!\n", __func__);
+ mtk_mutex_unprepare(mdp->mdp_mutex[MDP_PIPE_RDMA0]);
+ mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps,
+ cmd->num_comps);
+
+ atomic_dec(&mdp->job_count);
+ wake_up(&mdp->callback_wq);
+
+ mdp_cmdq_pkt_destroy(&cmd->pkt);
+ kfree(cmd->comps);
+ cmd->comps = NULL;
+ kfree(cmd);
+ cmd = NULL;
+ }
+}
+
+int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param)
+{
+ struct mdp_path *path = NULL;
+ struct mdp_cmdq_cmd *cmd = NULL;
+ struct mdp_comp *comps = NULL;
+ struct device *dev = &mdp->pdev->dev;
+ int i, ret;
+
+ atomic_inc(&mdp->job_count);
+ if (atomic_read(&mdp->suspended)) {
+ atomic_dec(&mdp->job_count);
+ return -ECANCELED;
+ }
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto err_cmdq_data;
+ }
+
+ if (mdp_cmdq_pkt_create(mdp->cmdq_clt, &cmd->pkt, SZ_16K)) {
+ ret = -ENOMEM;
+ goto err_cmdq_data;
+ }
+
+ comps = kcalloc(param->config->num_components, sizeof(*comps),
+ GFP_KERNEL);
+ if (!comps) {
+ ret = -ENOMEM;
+ goto err_cmdq_data;
+ }
+
+ path = kzalloc(sizeof(*path), GFP_KERNEL);
+ if (!path) {
+ ret = -ENOMEM;
+ goto err_cmdq_data;
+ }
+
+ path->mdp_dev = mdp;
+ path->config = param->config;
+ path->param = param->param;
+ for (i = 0; i < param->param->num_outputs; i++) {
+ path->bounds[i].left = 0;
+ path->bounds[i].top = 0;
+ path->bounds[i].width =
+ param->param->outputs[i].buffer.format.width;
+ path->bounds[i].height =
+ param->param->outputs[i].buffer.format.height;
+ path->composes[i] = param->composes[i] ?
+ param->composes[i] : &path->bounds[i];
+ }
+
+ ret = mdp_path_ctx_init(mdp, path);
+ if (ret) {
+ dev_err(dev, "mdp_path_ctx_init error\n");
+ goto err_cmdq_data;
+ }
+
+ mtk_mutex_prepare(mdp->mdp_mutex[MDP_PIPE_RDMA0]);
+
+ ret = mdp_path_config(mdp, cmd, path);
+ if (ret) {
+ dev_err(dev, "mdp_path_config error\n");
+ goto err_cmdq_data;
+ }
+ cmdq_pkt_finalize(&cmd->pkt);
+
+ for (i = 0; i < param->config->num_components; i++)
+ memcpy(&comps[i], path->comps[i].comp,
+ sizeof(struct mdp_comp));
+
+ mdp->cmdq_clt->client.rx_callback = mdp_handle_cmdq_callback;
+ cmd->mdp = mdp;
+ cmd->user_cmdq_cb = param->cmdq_cb;
+ cmd->user_cb_data = param->cb_data;
+ cmd->comps = comps;
+ cmd->num_comps = param->config->num_components;
+ cmd->mdp_ctx = param->mdp_ctx;
+
+ ret = mdp_comp_clocks_on(&mdp->pdev->dev, cmd->comps, cmd->num_comps);
+ if (ret) {
+ dev_err(dev, "comp %d failed to enable clock!\n", ret);
+ goto err_clock_off;
+ }
+
+ dma_sync_single_for_device(mdp->cmdq_clt->chan->mbox->dev,
+ cmd->pkt.pa_base, cmd->pkt.cmd_buf_size,
+ DMA_TO_DEVICE);
+ ret = mbox_send_message(mdp->cmdq_clt->chan, &cmd->pkt);
+ if (ret < 0) {
+ dev_err(dev, "mbox send message fail %d!\n", ret);
+ goto err_clock_off;
+ }
+ mbox_client_txdone(mdp->cmdq_clt->chan, 0);
+
+ kfree(path);
+ return 0;
+
+err_clock_off:
+ mtk_mutex_unprepare(mdp->mdp_mutex[MDP_PIPE_RDMA0]);
+ mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps,
+ cmd->num_comps);
+err_cmdq_data:
+ kfree(path);
+ atomic_dec(&mdp->job_count);
+ wake_up(&mdp->callback_wq);
+ if (cmd->pkt.buf_size > 0)
+ mdp_cmdq_pkt_destroy(&cmd->pkt);
+ kfree(comps);
+ kfree(cmd);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mdp_cmdq_send);
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h
new file mode 100644
index 000000000000..43475b862ddb
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MTK_MDP3_CMDQ_H__
+#define __MTK_MDP3_CMDQ_H__
+
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/soc/mediatek/mtk-cmdq.h>
+#include "mtk-img-ipi.h"
+
+struct platform_device *mdp_get_plat_device(struct platform_device *pdev);
+
+struct mdp_cmdq_param {
+ struct img_config *config;
+ struct img_ipi_frameparam *param;
+ const struct v4l2_rect *composes[IMG_MAX_HW_OUTPUTS];
+
+ void (*cmdq_cb)(struct cmdq_cb_data data);
+ void *cb_data;
+ void *mdp_ctx;
+};
+
+struct mdp_cmdq_cmd {
+ struct work_struct auto_release_work;
+ struct cmdq_pkt pkt;
+ s32 *event;
+ struct mdp_dev *mdp;
+ void (*user_cmdq_cb)(struct cmdq_cb_data data);
+ void *user_cb_data;
+ struct mdp_comp *comps;
+ void *mdp_ctx;
+ u8 num_comps;
+};
+
+struct mdp_dev;
+
+int mdp_cmdq_send(struct mdp_dev *mdp, struct mdp_cmdq_param *param);
+
+#endif /* __MTK_MDP3_CMDQ_H__ */
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
new file mode 100644
index 000000000000..e62abf3587bf
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c
@@ -0,0 +1,1033 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/pm_runtime.h>
+#include "mtk-mdp3-comp.h"
+#include "mtk-mdp3-core.h"
+#include "mtk-mdp3-regs.h"
+
+#include "mdp_reg_rdma.h"
+#include "mdp_reg_ccorr.h"
+#include "mdp_reg_rsz.h"
+#include "mdp_reg_wrot.h"
+#include "mdp_reg_wdma.h"
+
+static u32 mdp_comp_alias_id[MDP_COMP_TYPE_COUNT];
+
+static inline const struct mdp_platform_config *
+__get_plat_cfg(const struct mdp_comp_ctx *ctx)
+{
+ if (!ctx)
+ return NULL;
+
+ return ctx->comp->mdp_dev->mdp_data->mdp_cfg;
+}
+
+static s64 get_comp_flag(const struct mdp_comp_ctx *ctx)
+{
+ const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
+
+ if (mdp_cfg && mdp_cfg->rdma_rsz1_sram_sharing)
+ if (ctx->comp->id == MDP_COMP_RDMA0)
+ return BIT(MDP_COMP_RDMA0) | BIT(MDP_COMP_RSZ1);
+
+ return BIT(ctx->comp->id);
+}
+
+static int init_rdma(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ if (mdp_cfg && mdp_cfg->rdma_support_10bit) {
+ struct mdp_comp *prz1 = ctx->comp->mdp_dev->comp[MDP_COMP_RSZ1];
+
+ /* Disable RSZ1 */
+ if (ctx->comp->id == MDP_COMP_RDMA0 && prz1)
+ MM_REG_WRITE(cmd, subsys_id, prz1->reg_base, PRZ_ENABLE,
+ 0x0, BIT(0));
+ }
+
+ /* Reset RDMA */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_RESET, BIT(0), BIT(0));
+ MM_REG_POLL(cmd, subsys_id, base, MDP_RDMA_MON_STA_1, BIT(8), BIT(8));
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_RESET, 0x0, BIT(0));
+ return 0;
+}
+
+static int config_rdma_frame(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd,
+ const struct v4l2_rect *compose)
+{
+ const struct mdp_rdma_data *rdma = &ctx->param->rdma;
+ const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
+ u32 colorformat = ctx->input->buffer.format.colorformat;
+ bool block10bit = MDP_COLOR_IS_10BIT_PACKED(colorformat);
+ bool en_ufo = MDP_COLOR_IS_UFP(colorformat);
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ if (mdp_cfg && mdp_cfg->rdma_support_10bit) {
+ if (block10bit)
+ MM_REG_WRITE(cmd, subsys_id, base,
+ MDP_RDMA_RESV_DUMMY_0, 0x7, 0x7);
+ else
+ MM_REG_WRITE(cmd, subsys_id, base,
+ MDP_RDMA_RESV_DUMMY_0, 0x0, 0x7);
+ }
+
+ /* Setup smi control */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_GMCIF_CON,
+ (7 << 4) + //burst type to 8
+ (1 << 16), //enable pre-ultra
+ 0x00030071);
+
+ /* Setup source frame info */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_CON, rdma->src_ctrl,
+ 0x03C8FE0F);
+
+ if (mdp_cfg)
+ if (mdp_cfg->rdma_support_10bit && en_ufo) {
+ /* Setup source buffer base */
+ MM_REG_WRITE(cmd, subsys_id,
+ base, MDP_RDMA_UFO_DEC_LENGTH_BASE_Y,
+ rdma->ufo_dec_y, 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id,
+ base, MDP_RDMA_UFO_DEC_LENGTH_BASE_C,
+ rdma->ufo_dec_c, 0xFFFFFFFF);
+ /* Set 10bit source frame pitch */
+ if (block10bit)
+ MM_REG_WRITE(cmd, subsys_id,
+ base, MDP_RDMA_MF_BKGD_SIZE_IN_PXL,
+ rdma->mf_bkgd_in_pxl, 0x001FFFFF);
+ }
+
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_CON, rdma->control,
+ 0x1110);
+ /* Setup source buffer base */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_0, rdma->iova[0],
+ 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_1, rdma->iova[1],
+ 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_BASE_2, rdma->iova[2],
+ 0xFFFFFFFF);
+ /* Setup source buffer end */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_0,
+ rdma->iova_end[0], 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_1,
+ rdma->iova_end[1], 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_END_2,
+ rdma->iova_end[2], 0xFFFFFFFF);
+ /* Setup source frame pitch */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_BKGD_SIZE_IN_BYTE,
+ rdma->mf_bkgd, 0x001FFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SF_BKGD_SIZE_IN_BYTE,
+ rdma->sf_bkgd, 0x001FFFFF);
+ /* Setup color transform */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_TRANSFORM_0,
+ rdma->transform, 0x0F110000);
+
+ return 0;
+}
+
+static int config_rdma_subfrm(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index)
+{
+ const struct mdp_rdma_subfrm *subfrm = &ctx->param->rdma.subfrms[index];
+ const struct img_comp_subfrm *csf = &ctx->param->subfrms[index];
+ const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
+ u32 colorformat = ctx->input->buffer.format.colorformat;
+ bool block10bit = MDP_COLOR_IS_10BIT_PACKED(colorformat);
+ bool en_ufo = MDP_COLOR_IS_UFP(colorformat);
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ /* Enable RDMA */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_EN, BIT(0), BIT(0));
+
+ /* Set Y pixel offset */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_0,
+ subfrm->offset[0], 0xFFFFFFFF);
+
+ /* Set 10bit UFO mode */
+ if (mdp_cfg)
+ if (mdp_cfg->rdma_support_10bit && block10bit && en_ufo)
+ MM_REG_WRITE(cmd, subsys_id, base,
+ MDP_RDMA_SRC_OFFSET_0_P,
+ subfrm->offset_0_p, 0xFFFFFFFF);
+
+ /* Set U pixel offset */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_1,
+ subfrm->offset[1], 0xFFFFFFFF);
+ /* Set V pixel offset */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_SRC_OFFSET_2,
+ subfrm->offset[2], 0xFFFFFFFF);
+ /* Set source size */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_SRC_SIZE, subfrm->src,
+ 0x1FFF1FFF);
+ /* Set target size */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_CLIP_SIZE,
+ subfrm->clip, 0x1FFF1FFF);
+ /* Set crop offset */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_MF_OFFSET_1,
+ subfrm->clip_ofst, 0x003F001F);
+
+ if (mdp_cfg && mdp_cfg->rdma_upsample_repeat_only)
+ if ((csf->in.right - csf->in.left + 1) > 320)
+ MM_REG_WRITE(cmd, subsys_id, base,
+ MDP_RDMA_RESV_DUMMY_0, BIT(2), BIT(2));
+
+ return 0;
+}
+
+static int wait_rdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ struct device *dev = &ctx->comp->mdp_dev->pdev->dev;
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ if (ctx->comp->alias_id == 0)
+ MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
+ else
+ dev_err(dev, "Do not support RDMA1_DONE event\n");
+
+ /* Disable RDMA */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_RDMA_EN, 0x0, BIT(0));
+ return 0;
+}
+
+static const struct mdp_comp_ops rdma_ops = {
+ .get_comp_flag = get_comp_flag,
+ .init_comp = init_rdma,
+ .config_frame = config_rdma_frame,
+ .config_subfrm = config_rdma_subfrm,
+ .wait_comp_event = wait_rdma_event,
+};
+
+static int init_rsz(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ /* Reset RSZ */
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x10000, BIT(16));
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(16));
+ /* Enable RSZ */
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, BIT(0), BIT(0));
+ return 0;
+}
+
+static int config_rsz_frame(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd,
+ const struct v4l2_rect *compose)
+{
+ const struct mdp_rsz_data *rsz = &ctx->param->rsz;
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ if (ctx->param->frame.bypass) {
+ /* Disable RSZ */
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_ENABLE, 0x0, BIT(0));
+ return 0;
+ }
+
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1, rsz->control1,
+ 0x03FFFDF3);
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_2, rsz->control2,
+ 0x0FFFC290);
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_HORIZONTAL_COEFF_STEP,
+ rsz->coeff_step_x, 0x007FFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_VERTICAL_COEFF_STEP,
+ rsz->coeff_step_y, 0x007FFFFF);
+ return 0;
+}
+
+static int config_rsz_subfrm(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index)
+{
+ const struct mdp_rsz_subfrm *subfrm = &ctx->param->rsz.subfrms[index];
+ const struct img_comp_subfrm *csf = &ctx->param->subfrms[index];
+ const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_2, subfrm->control2,
+ 0x00003800);
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_INPUT_IMAGE, subfrm->src,
+ 0xFFFFFFFF);
+
+ if (mdp_cfg && mdp_cfg->rsz_disable_dcm_small_sample)
+ if ((csf->in.right - csf->in.left + 1) <= 16)
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1,
+ BIT(27), BIT(27));
+
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_HORIZONTAL_INTEGER_OFFSET,
+ csf->luma.left, 0xFFFF);
+ MM_REG_WRITE(cmd, subsys_id,
+ base, PRZ_LUMA_HORIZONTAL_SUBPIXEL_OFFSET,
+ csf->luma.left_subpix, 0x1FFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_INTEGER_OFFSET,
+ csf->luma.top, 0xFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_LUMA_VERTICAL_SUBPIXEL_OFFSET,
+ csf->luma.top_subpix, 0x1FFFFF);
+ MM_REG_WRITE(cmd, subsys_id,
+ base, PRZ_CHROMA_HORIZONTAL_INTEGER_OFFSET,
+ csf->chroma.left, 0xFFFF);
+ MM_REG_WRITE(cmd, subsys_id,
+ base, PRZ_CHROMA_HORIZONTAL_SUBPIXEL_OFFSET,
+ csf->chroma.left_subpix, 0x1FFFFF);
+
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_OUTPUT_IMAGE, subfrm->clip,
+ 0xFFFFFFFF);
+
+ return 0;
+}
+
+static int advance_rsz_subfrm(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index)
+{
+ const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
+
+ if (mdp_cfg && mdp_cfg->rsz_disable_dcm_small_sample) {
+ const struct img_comp_subfrm *csf = &ctx->param->subfrms[index];
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ if ((csf->in.right - csf->in.left + 1) <= 16)
+ MM_REG_WRITE(cmd, subsys_id, base, PRZ_CONTROL_1, 0x0,
+ BIT(27));
+ }
+
+ return 0;
+}
+
+static const struct mdp_comp_ops rsz_ops = {
+ .get_comp_flag = get_comp_flag,
+ .init_comp = init_rsz,
+ .config_frame = config_rsz_frame,
+ .config_subfrm = config_rsz_subfrm,
+ .advance_subfrm = advance_rsz_subfrm,
+};
+
+static int init_wrot(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ /* Reset WROT */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_SOFT_RST, BIT(0), BIT(0));
+ MM_REG_POLL(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, BIT(0), BIT(0));
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_SOFT_RST, 0x0, BIT(0));
+ MM_REG_POLL(cmd, subsys_id, base, VIDO_SOFT_RST_STAT, 0x0, BIT(0));
+ return 0;
+}
+
+static int config_wrot_frame(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd,
+ const struct v4l2_rect *compose)
+{
+ const struct mdp_wrot_data *wrot = &ctx->param->wrot;
+ const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ /* Write frame base address */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR, wrot->iova[0],
+ 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_C, wrot->iova[1],
+ 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_BASE_ADDR_V, wrot->iova[2],
+ 0xFFFFFFFF);
+ /* Write frame related registers */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_CTRL, wrot->control,
+ 0xF131510F);
+ /* Write frame Y pitch */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE, wrot->stride[0],
+ 0x0000FFFF);
+ /* Write frame UV pitch */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE_C, wrot->stride[1],
+ 0xFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_STRIDE_V, wrot->stride[2],
+ 0xFFFF);
+ /* Write matrix control */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAT_CTRL, wrot->mat_ctrl, 0xF3);
+
+ /* Set the fixed ALPHA as 0xFF */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_DITHER, 0xFF000000,
+ 0xFF000000);
+ /* Set VIDO_EOL_SEL */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_RSV_1, BIT(31), BIT(31));
+ /* Set VIDO_FIFO_TEST */
+ if (wrot->fifo_test != 0)
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_FIFO_TEST,
+ wrot->fifo_test, 0xFFF);
+ /* Filter enable */
+ if (mdp_cfg && mdp_cfg->wrot_filter_constraint)
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE,
+ wrot->filter, 0x77);
+
+ return 0;
+}
+
+static int config_wrot_subfrm(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index)
+{
+ const struct mdp_wrot_subfrm *subfrm = &ctx->param->wrot.subfrms[index];
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ /* Write Y pixel offset */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR,
+ subfrm->offset[0], 0x0FFFFFFF);
+ /* Write U pixel offset */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR_C,
+ subfrm->offset[1], 0x0FFFFFFF);
+ /* Write V pixel offset */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_OFST_ADDR_V,
+ subfrm->offset[2], 0x0FFFFFFF);
+ /* Write source size */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_IN_SIZE, subfrm->src,
+ 0x1FFF1FFF);
+ /* Write target size */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_TAR_SIZE, subfrm->clip,
+ 0x1FFF1FFF);
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_CROP_OFST, subfrm->clip_ofst,
+ 0x1FFF1FFF);
+
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE,
+ subfrm->main_buf, 0x1FFF7F00);
+
+ /* Enable WROT */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN, BIT(0), BIT(0));
+
+ return 0;
+}
+
+static int wait_wrot_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ const struct mdp_platform_config *mdp_cfg = __get_plat_cfg(ctx);
+ struct device *dev = &ctx->comp->mdp_dev->pdev->dev;
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ if (ctx->comp->alias_id == 0)
+ MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
+ else
+ dev_err(dev, "Do not support WROT1_DONE event\n");
+
+ if (mdp_cfg && mdp_cfg->wrot_filter_constraint)
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_MAIN_BUF_SIZE, 0x0,
+ 0x77);
+
+ /* Disable WROT */
+ MM_REG_WRITE(cmd, subsys_id, base, VIDO_ROT_EN, 0x0, BIT(0));
+
+ return 0;
+}
+
+static const struct mdp_comp_ops wrot_ops = {
+ .get_comp_flag = get_comp_flag,
+ .init_comp = init_wrot,
+ .config_frame = config_wrot_frame,
+ .config_subfrm = config_wrot_subfrm,
+ .wait_comp_event = wait_wrot_event,
+};
+
+static int init_wdma(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ /* Reset WDMA */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_RST, BIT(0), BIT(0));
+ MM_REG_POLL(cmd, subsys_id, base, WDMA_FLOW_CTRL_DBG, BIT(0), BIT(0));
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_RST, 0x0, BIT(0));
+ return 0;
+}
+
+static int config_wdma_frame(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd,
+ const struct v4l2_rect *compose)
+{
+ const struct mdp_wdma_data *wdma = &ctx->param->wdma;
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_BUF_CON2, 0x10101050,
+ 0xFFFFFFFF);
+
+ /* Setup frame information */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_CFG, wdma->wdma_cfg,
+ 0x0F01B8F0);
+ /* Setup frame base address */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR, wdma->iova[0],
+ 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR, wdma->iova[1],
+ 0xFFFFFFFF);
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR, wdma->iova[2],
+ 0xFFFFFFFF);
+ /* Setup Y pitch */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_W_IN_BYTE,
+ wdma->w_in_byte, 0x0000FFFF);
+ /* Setup UV pitch */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_UV_PITCH,
+ wdma->uv_stride, 0x0000FFFF);
+ /* Set the fixed ALPHA as 0xFF */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_ALPHA, 0x800000FF,
+ 0x800000FF);
+
+ return 0;
+}
+
+static int config_wdma_subfrm(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index)
+{
+ const struct mdp_wdma_subfrm *subfrm = &ctx->param->wdma.subfrms[index];
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ /* Write Y pixel offset */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_ADDR_OFFSET,
+ subfrm->offset[0], 0x0FFFFFFF);
+ /* Write U pixel offset */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_U_ADDR_OFFSET,
+ subfrm->offset[1], 0x0FFFFFFF);
+ /* Write V pixel offset */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_DST_V_ADDR_OFFSET,
+ subfrm->offset[2], 0x0FFFFFFF);
+ /* Write source size */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_SRC_SIZE, subfrm->src,
+ 0x3FFF3FFF);
+ /* Write target size */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_CLIP_SIZE, subfrm->clip,
+ 0x3FFF3FFF);
+ /* Write clip offset */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_CLIP_COORD, subfrm->clip_ofst,
+ 0x3FFF3FFF);
+
+ /* Enable WDMA */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_EN, BIT(0), BIT(0));
+
+ return 0;
+}
+
+static int wait_wdma_event(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ MM_REG_WAIT(cmd, ctx->comp->gce_event[MDP_GCE_EVENT_EOF]);
+ /* Disable WDMA */
+ MM_REG_WRITE(cmd, subsys_id, base, WDMA_EN, 0x0, BIT(0));
+ return 0;
+}
+
+static const struct mdp_comp_ops wdma_ops = {
+ .get_comp_flag = get_comp_flag,
+ .init_comp = init_wdma,
+ .config_frame = config_wdma_frame,
+ .config_subfrm = config_wdma_subfrm,
+ .wait_comp_event = wait_wdma_event,
+};
+
+static int init_ccorr(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd)
+{
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+
+ /* CCORR enable */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_EN, BIT(0), BIT(0));
+ /* Relay mode */
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_CFG, BIT(0), BIT(0));
+ return 0;
+}
+
+static int config_ccorr_subfrm(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index)
+{
+ const struct img_comp_subfrm *csf = &ctx->param->subfrms[index];
+ phys_addr_t base = ctx->comp->reg_base;
+ u8 subsys_id = ctx->comp->subsys_id;
+ u32 hsize, vsize;
+
+ hsize = csf->in.right - csf->in.left + 1;
+ vsize = csf->in.bottom - csf->in.top + 1;
+ MM_REG_WRITE(cmd, subsys_id, base, MDP_CCORR_SIZE,
+ (hsize << 16) + (vsize << 0), 0x1FFF1FFF);
+ return 0;
+}
+
+static const struct mdp_comp_ops ccorr_ops = {
+ .get_comp_flag = get_comp_flag,
+ .init_comp = init_ccorr,
+ .config_subfrm = config_ccorr_subfrm,
+};
+
+static const struct mdp_comp_ops *mdp_comp_ops[MDP_COMP_TYPE_COUNT] = {
+ [MDP_COMP_TYPE_RDMA] = &rdma_ops,
+ [MDP_COMP_TYPE_RSZ] = &rsz_ops,
+ [MDP_COMP_TYPE_WROT] = &wrot_ops,
+ [MDP_COMP_TYPE_WDMA] = &wdma_ops,
+ [MDP_COMP_TYPE_CCORR] = &ccorr_ops,
+};
+
+struct mdp_comp_match {
+ enum mdp_comp_type type;
+ u32 alias_id;
+};
+
+static const struct mdp_comp_match mdp_comp_matches[MDP_MAX_COMP_COUNT] = {
+ [MDP_COMP_WPEI] = { MDP_COMP_TYPE_WPEI, 0 },
+ [MDP_COMP_WPEO] = { MDP_COMP_TYPE_EXTO, 2 },
+ [MDP_COMP_WPEI2] = { MDP_COMP_TYPE_WPEI, 1 },
+ [MDP_COMP_WPEO2] = { MDP_COMP_TYPE_EXTO, 3 },
+ [MDP_COMP_ISP_IMGI] = { MDP_COMP_TYPE_IMGI, 0 },
+ [MDP_COMP_ISP_IMGO] = { MDP_COMP_TYPE_EXTO, 0 },
+ [MDP_COMP_ISP_IMG2O] = { MDP_COMP_TYPE_EXTO, 1 },
+
+ [MDP_COMP_CAMIN] = { MDP_COMP_TYPE_DL_PATH, 0 },
+ [MDP_COMP_CAMIN2] = { MDP_COMP_TYPE_DL_PATH, 1 },
+ [MDP_COMP_RDMA0] = { MDP_COMP_TYPE_RDMA, 0 },
+ [MDP_COMP_CCORR0] = { MDP_COMP_TYPE_CCORR, 0 },
+ [MDP_COMP_RSZ0] = { MDP_COMP_TYPE_RSZ, 0 },
+ [MDP_COMP_RSZ1] = { MDP_COMP_TYPE_RSZ, 1 },
+ [MDP_COMP_PATH0_SOUT] = { MDP_COMP_TYPE_PATH, 0 },
+ [MDP_COMP_PATH1_SOUT] = { MDP_COMP_TYPE_PATH, 1 },
+ [MDP_COMP_WROT0] = { MDP_COMP_TYPE_WROT, 0 },
+ [MDP_COMP_WDMA] = { MDP_COMP_TYPE_WDMA, 0 },
+};
+
+static const struct of_device_id mdp_comp_dt_ids[] = {
+ {
+ .compatible = "mediatek,mt8183-mdp3-rdma",
+ .data = (void *)MDP_COMP_TYPE_RDMA,
+ }, {
+ .compatible = "mediatek,mt8183-mdp3-ccorr",
+ .data = (void *)MDP_COMP_TYPE_CCORR,
+ }, {
+ .compatible = "mediatek,mt8183-mdp3-rsz",
+ .data = (void *)MDP_COMP_TYPE_RSZ,
+ }, {
+ .compatible = "mediatek,mt8183-mdp3-wrot",
+ .data = (void *)MDP_COMP_TYPE_WROT,
+ }, {
+ .compatible = "mediatek,mt8183-mdp3-wdma",
+ .data = (void *)MDP_COMP_TYPE_WDMA,
+ },
+ {}
+};
+
+static const struct of_device_id mdp_sub_comp_dt_ids[] = {
+ {
+ .compatible = "mediatek,mt8183-mdp3-wdma",
+ .data = (void *)MDP_COMP_TYPE_PATH,
+ }, {
+ .compatible = "mediatek,mt8183-mdp3-wrot",
+ .data = (void *)MDP_COMP_TYPE_PATH,
+ },
+ {}
+};
+
+/* Used to describe the item order in MDP property */
+struct mdp_comp_info {
+ u32 clk_num;
+ u32 clk_ofst;
+ u32 dts_reg_ofst;
+};
+
+static const struct mdp_comp_info mdp_comp_dt_info[MDP_MAX_COMP_COUNT] = {
+ [MDP_COMP_RDMA0] = {2, 0, 0},
+ [MDP_COMP_RSZ0] = {1, 0, 0},
+ [MDP_COMP_WROT0] = {1, 0, 0},
+ [MDP_COMP_WDMA] = {1, 0, 0},
+ [MDP_COMP_CCORR0] = {1, 0, 0},
+};
+
+static inline bool is_dma_capable(const enum mdp_comp_type type)
+{
+ return (type == MDP_COMP_TYPE_RDMA ||
+ type == MDP_COMP_TYPE_WROT ||
+ type == MDP_COMP_TYPE_WDMA);
+}
+
+static inline bool is_bypass_gce_event(const enum mdp_comp_type type)
+{
+ /*
+ * Subcomponent PATH is only used for the direction of data flow and
+ * dose not need to wait for GCE event.
+ */
+ return (type == MDP_COMP_TYPE_PATH);
+}
+
+static int mdp_comp_get_id(enum mdp_comp_type type, int alias_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mdp_comp_matches); i++)
+ if (mdp_comp_matches[i].type == type &&
+ mdp_comp_matches[i].alias_id == alias_id)
+ return i;
+ return -ENODEV;
+}
+
+int mdp_comp_clock_on(struct device *dev, struct mdp_comp *comp)
+{
+ int i, ret;
+
+ if (comp->comp_dev) {
+ ret = pm_runtime_get_sync(comp->comp_dev);
+ if (ret < 0) {
+ dev_err(dev,
+ "Failed to get power, err %d. type:%d id:%d\n",
+ ret, comp->type, comp->id);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(comp->clks); i++) {
+ if (IS_ERR_OR_NULL(comp->clks[i]))
+ continue;
+ ret = clk_prepare_enable(comp->clks[i]);
+ if (ret) {
+ dev_err(dev,
+ "Failed to enable clk %d. type:%d id:%d\n",
+ i, comp->type, comp->id);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void mdp_comp_clock_off(struct device *dev, struct mdp_comp *comp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(comp->clks); i++) {
+ if (IS_ERR_OR_NULL(comp->clks[i]))
+ continue;
+ clk_disable_unprepare(comp->clks[i]);
+ }
+
+ if (comp->comp_dev)
+ pm_runtime_put(comp->comp_dev);
+}
+
+int mdp_comp_clocks_on(struct device *dev, struct mdp_comp *comps, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++)
+ if (mdp_comp_clock_on(dev, &comps[i]) != 0)
+ return ++i;
+
+ return 0;
+}
+
+void mdp_comp_clocks_off(struct device *dev, struct mdp_comp *comps, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++)
+ mdp_comp_clock_off(dev, &comps[i]);
+}
+
+static int mdp_get_subsys_id(struct device *dev, struct device_node *node,
+ struct mdp_comp *comp)
+{
+ struct platform_device *comp_pdev;
+ struct cmdq_client_reg cmdq_reg;
+ int ret = 0;
+ int index = 0;
+
+ if (!dev || !node || !comp)
+ return -EINVAL;
+
+ comp_pdev = of_find_device_by_node(node);
+
+ if (!comp_pdev) {
+ dev_err(dev, "get comp_pdev fail! comp id=%d type=%d\n",
+ comp->id, comp->type);
+ return -ENODEV;
+ }
+
+ index = mdp_comp_dt_info[comp->id].dts_reg_ofst;
+ ret = cmdq_dev_get_client_reg(&comp_pdev->dev, &cmdq_reg, index);
+ if (ret != 0) {
+ dev_err(&comp_pdev->dev, "cmdq_dev_get_subsys fail!\n");
+ return -EINVAL;
+ }
+
+ comp->subsys_id = cmdq_reg.subsys;
+ dev_dbg(&comp_pdev->dev, "subsys id=%d\n", cmdq_reg.subsys);
+
+ return 0;
+}
+
+static void __mdp_comp_init(struct mdp_dev *mdp, struct device_node *node,
+ struct mdp_comp *comp)
+{
+ struct resource res;
+ phys_addr_t base;
+ int index = mdp_comp_dt_info[comp->id].dts_reg_ofst;
+
+ if (of_address_to_resource(node, index, &res) < 0)
+ base = 0L;
+ else
+ base = res.start;
+
+ comp->mdp_dev = mdp;
+ comp->regs = of_iomap(node, 0);
+ comp->reg_base = base;
+}
+
+static int mdp_comp_init(struct mdp_dev *mdp, struct device_node *node,
+ struct mdp_comp *comp, enum mtk_mdp_comp_id id)
+{
+ struct device *dev = &mdp->pdev->dev;
+ int clk_num;
+ int clk_ofst;
+ int i;
+ s32 event;
+
+ if (id < 0 || id >= MDP_MAX_COMP_COUNT) {
+ dev_err(dev, "Invalid component id %d\n", id);
+ return -EINVAL;
+ }
+
+ comp->id = id;
+ comp->type = mdp_comp_matches[id].type;
+ comp->alias_id = mdp_comp_matches[id].alias_id;
+ comp->ops = mdp_comp_ops[comp->type];
+ __mdp_comp_init(mdp, node, comp);
+
+ clk_num = mdp_comp_dt_info[id].clk_num;
+ clk_ofst = mdp_comp_dt_info[id].clk_ofst;
+
+ for (i = 0; i < clk_num; i++) {
+ comp->clks[i] = of_clk_get(node, i + clk_ofst);
+ if (IS_ERR(comp->clks[i]))
+ break;
+ }
+
+ mdp_get_subsys_id(dev, node, comp);
+
+ /* Set GCE SOF event */
+ if (is_bypass_gce_event(comp->type) ||
+ of_property_read_u32_index(node, "mediatek,gce-events",
+ MDP_GCE_EVENT_SOF, &event))
+ event = MDP_GCE_NO_EVENT;
+
+ comp->gce_event[MDP_GCE_EVENT_SOF] = event;
+
+ /* Set GCE EOF event */
+ if (is_dma_capable(comp->type)) {
+ if (of_property_read_u32_index(node, "mediatek,gce-events",
+ MDP_GCE_EVENT_EOF, &event)) {
+ dev_err(dev, "Component id %d has no EOF\n", id);
+ return -EINVAL;
+ }
+ } else {
+ event = MDP_GCE_NO_EVENT;
+ }
+
+ comp->gce_event[MDP_GCE_EVENT_EOF] = event;
+
+ return 0;
+}
+
+static void mdp_comp_deinit(struct mdp_comp *comp)
+{
+ if (!comp)
+ return;
+
+ if (comp->regs)
+ iounmap(comp->regs);
+}
+
+static struct mdp_comp *mdp_comp_create(struct mdp_dev *mdp,
+ struct device_node *node,
+ enum mtk_mdp_comp_id id)
+{
+ struct device *dev = &mdp->pdev->dev;
+ struct mdp_comp *comp;
+ int ret;
+
+ if (mdp->comp[id])
+ return ERR_PTR(-EEXIST);
+
+ comp = devm_kzalloc(dev, sizeof(*comp), GFP_KERNEL);
+ if (!comp)
+ return ERR_PTR(-ENOMEM);
+
+ ret = mdp_comp_init(mdp, node, comp, id);
+ if (ret) {
+ kfree(comp);
+ return ERR_PTR(ret);
+ }
+ mdp->comp[id] = comp;
+ mdp->comp[id]->mdp_dev = mdp;
+
+ dev_dbg(dev, "%s type:%d alias:%d id:%d base:%#x regs:%p\n",
+ dev->of_node->name, comp->type, comp->alias_id, id,
+ (u32)comp->reg_base, comp->regs);
+ return comp;
+}
+
+static int mdp_comp_sub_create(struct mdp_dev *mdp)
+{
+ struct device *dev = &mdp->pdev->dev;
+ struct device_node *node, *parent;
+
+ parent = dev->of_node->parent;
+
+ for_each_child_of_node(parent, node) {
+ const struct of_device_id *of_id;
+ enum mdp_comp_type type;
+ int id, alias_id;
+ struct mdp_comp *comp;
+
+ of_id = of_match_node(mdp_sub_comp_dt_ids, node);
+ if (!of_id)
+ continue;
+ if (!of_device_is_available(node)) {
+ dev_dbg(dev, "Skipping disabled sub comp. %pOF\n",
+ node);
+ continue;
+ }
+
+ type = (enum mdp_comp_type)(uintptr_t)of_id->data;
+ alias_id = mdp_comp_alias_id[type];
+ id = mdp_comp_get_id(type, alias_id);
+ if (id < 0) {
+ dev_err(dev,
+ "Fail to get sub comp. id: type %d alias %d\n",
+ type, alias_id);
+ return -EINVAL;
+ }
+ mdp_comp_alias_id[type]++;
+
+ comp = mdp_comp_create(mdp, node, id);
+ if (IS_ERR(comp))
+ return PTR_ERR(comp);
+ }
+
+ return 0;
+}
+
+void mdp_comp_destroy(struct mdp_dev *mdp)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mdp->comp); i++) {
+ if (mdp->comp[i]) {
+ pm_runtime_disable(mdp->comp[i]->comp_dev);
+ mdp_comp_deinit(mdp->comp[i]);
+ kfree(mdp->comp[i]);
+ mdp->comp[i] = NULL;
+ }
+ }
+}
+
+int mdp_comp_config(struct mdp_dev *mdp)
+{
+ struct device *dev = &mdp->pdev->dev;
+ struct device_node *node, *parent;
+ struct platform_device *pdev;
+ int ret;
+
+ memset(mdp_comp_alias_id, 0, sizeof(mdp_comp_alias_id));
+
+ parent = dev->of_node->parent;
+ /* Iterate over sibling MDP function blocks */
+ for_each_child_of_node(parent, node) {
+ const struct of_device_id *of_id;
+ enum mdp_comp_type type;
+ int id, alias_id;
+ struct mdp_comp *comp;
+
+ of_id = of_match_node(mdp_comp_dt_ids, node);
+ if (!of_id)
+ continue;
+
+ if (!of_device_is_available(node)) {
+ dev_dbg(dev, "Skipping disabled component %pOF\n",
+ node);
+ continue;
+ }
+
+ type = (enum mdp_comp_type)(uintptr_t)of_id->data;
+ alias_id = mdp_comp_alias_id[type];
+ id = mdp_comp_get_id(type, alias_id);
+ if (id < 0) {
+ dev_err(dev,
+ "Fail to get component id: type %d alias %d\n",
+ type, alias_id);
+ continue;
+ }
+ mdp_comp_alias_id[type]++;
+
+ comp = mdp_comp_create(mdp, node, id);
+ if (IS_ERR(comp)) {
+ ret = PTR_ERR(comp);
+ goto err_init_comps;
+ }
+
+ /* Only DMA capable components need the pm control */
+ comp->comp_dev = NULL;
+ if (!is_dma_capable(comp->type))
+ continue;
+
+ pdev = of_find_device_by_node(node);
+ if (!pdev) {
+ dev_warn(dev, "can't find platform device of node:%s\n",
+ node->name);
+ return -ENODEV;
+ }
+
+ comp->comp_dev = &pdev->dev;
+ pm_runtime_enable(comp->comp_dev);
+ }
+
+ ret = mdp_comp_sub_create(mdp);
+ if (ret)
+ goto err_init_comps;
+
+ return 0;
+
+err_init_comps:
+ mdp_comp_destroy(mdp);
+ return ret;
+}
+
+int mdp_comp_ctx_config(struct mdp_dev *mdp, struct mdp_comp_ctx *ctx,
+ const struct img_compparam *param,
+ const struct img_ipi_frameparam *frame)
+{
+ struct device *dev = &mdp->pdev->dev;
+ int i;
+
+ if (param->type < 0 || param->type >= MDP_MAX_COMP_COUNT) {
+ dev_err(dev, "Invalid component id %d", param->type);
+ return -EINVAL;
+ }
+
+ ctx->comp = mdp->comp[param->type];
+ if (!ctx->comp) {
+ dev_err(dev, "Uninit component id %d", param->type);
+ return -EINVAL;
+ }
+
+ ctx->param = param;
+ ctx->input = &frame->inputs[param->input];
+ for (i = 0; i < param->num_outputs; i++)
+ ctx->outputs[i] = &frame->outputs[param->outputs[i]];
+ return 0;
+}
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h
new file mode 100644
index 000000000000..dc48f55ac4f7
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MTK_MDP3_COMP_H__
+#define __MTK_MDP3_COMP_H__
+
+#include "mtk-mdp3-cmdq.h"
+
+#define MM_REG_WRITE_MASK(cmd, id, base, ofst, val, mask, ...) \
+ cmdq_pkt_write_mask(&((cmd)->pkt), id, \
+ (base) + (ofst), (val), (mask), ##__VA_ARGS__)
+
+#define MM_REG_WRITE(cmd, id, base, ofst, val, mask, ...) \
+do { \
+ typeof(mask) (m) = (mask); \
+ MM_REG_WRITE_MASK(cmd, id, base, ofst, val, \
+ (((m) & (ofst##_MASK)) == (ofst##_MASK)) ? \
+ (0xffffffff) : (m), ##__VA_ARGS__); \
+} while (0)
+
+#define MM_REG_WAIT(cmd, evt) \
+do { \
+ typeof(cmd) (c) = (cmd); \
+ typeof(evt) (e) = (evt); \
+ cmdq_pkt_wfe(&((c)->pkt), (e), true); \
+} while (0)
+
+#define MM_REG_WAIT_NO_CLEAR(cmd, evt) \
+do { \
+ typeof(cmd) (c) = (cmd); \
+ typeof(evt) (e) = (evt); \
+ cmdq_pkt_wfe(&((c)->pkt), (e), false); \
+} while (0)
+
+#define MM_REG_CLEAR(cmd, evt) \
+do { \
+ typeof(cmd) (c) = (cmd); \
+ typeof(evt) (e) = (evt); \
+ cmdq_pkt_clear_event(&((c)->pkt), (e)); \
+} while (0)
+
+#define MM_REG_SET_EVENT(cmd, evt) \
+do { \
+ typeof(cmd) (c) = (cmd); \
+ typeof(evt) (e) = (evt); \
+ cmdq_pkt_set_event(&((c)->pkt), (e)); \
+} while (0)
+
+#define MM_REG_POLL_MASK(cmd, id, base, ofst, val, _mask, ...) \
+do { \
+ typeof(_mask) (_m) = (_mask); \
+ cmdq_pkt_poll_mask(&((cmd)->pkt), id, \
+ (base) + (ofst), (val), (_m), ##__VA_ARGS__); \
+} while (0)
+
+#define MM_REG_POLL(cmd, id, base, ofst, val, mask, ...) \
+do { \
+ typeof(mask) (m) = (mask); \
+ MM_REG_POLL_MASK((cmd), id, base, ofst, val, \
+ (((m) & (ofst##_MASK)) == (ofst##_MASK)) ? \
+ (0xffffffff) : (m), ##__VA_ARGS__); \
+} while (0)
+
+enum mtk_mdp_comp_id {
+ MDP_COMP_NONE = -1, /* Invalid engine */
+
+ /* ISP */
+ MDP_COMP_WPEI = 0,
+ MDP_COMP_WPEO, /* 1 */
+ MDP_COMP_WPEI2, /* 2 */
+ MDP_COMP_WPEO2, /* 3 */
+ MDP_COMP_ISP_IMGI, /* 4 */
+ MDP_COMP_ISP_IMGO, /* 5 */
+ MDP_COMP_ISP_IMG2O, /* 6 */
+
+ /* IPU */
+ MDP_COMP_IPUI, /* 7 */
+ MDP_COMP_IPUO, /* 8 */
+
+ /* MDP */
+ MDP_COMP_CAMIN, /* 9 */
+ MDP_COMP_CAMIN2, /* 10 */
+ MDP_COMP_RDMA0, /* 11 */
+ MDP_COMP_AAL0, /* 12 */
+ MDP_COMP_CCORR0, /* 13 */
+ MDP_COMP_RSZ0, /* 14 */
+ MDP_COMP_RSZ1, /* 15 */
+ MDP_COMP_TDSHP0, /* 16 */
+ MDP_COMP_COLOR0, /* 17 */
+ MDP_COMP_PATH0_SOUT, /* 18 */
+ MDP_COMP_PATH1_SOUT, /* 19 */
+ MDP_COMP_WROT0, /* 20 */
+ MDP_COMP_WDMA, /* 21 */
+
+ /* Dummy Engine */
+ MDP_COMP_RDMA1, /* 22 */
+ MDP_COMP_RSZ2, /* 23 */
+ MDP_COMP_TDSHP1, /* 24 */
+ MDP_COMP_WROT1, /* 25 */
+
+ MDP_MAX_COMP_COUNT /* ALWAYS keep at the end */
+};
+
+enum mdp_comp_type {
+ MDP_COMP_TYPE_INVALID = 0,
+
+ MDP_COMP_TYPE_RDMA,
+ MDP_COMP_TYPE_RSZ,
+ MDP_COMP_TYPE_WROT,
+ MDP_COMP_TYPE_WDMA,
+ MDP_COMP_TYPE_PATH,
+
+ MDP_COMP_TYPE_TDSHP,
+ MDP_COMP_TYPE_COLOR,
+ MDP_COMP_TYPE_DRE,
+ MDP_COMP_TYPE_CCORR,
+ MDP_COMP_TYPE_HDR,
+
+ MDP_COMP_TYPE_IMGI,
+ MDP_COMP_TYPE_WPEI,
+ MDP_COMP_TYPE_EXTO, /* External path */
+ MDP_COMP_TYPE_DL_PATH, /* Direct-link path */
+
+ MDP_COMP_TYPE_COUNT /* ALWAYS keep at the end */
+};
+
+#define MDP_GCE_NO_EVENT (-1)
+enum {
+ MDP_GCE_EVENT_SOF = 0,
+ MDP_GCE_EVENT_EOF = 1,
+ MDP_GCE_EVENT_MAX,
+};
+
+struct mdp_comp_ops;
+
+struct mdp_comp {
+ struct mdp_dev *mdp_dev;
+ void __iomem *regs;
+ phys_addr_t reg_base;
+ u8 subsys_id;
+ struct clk *clks[6];
+ struct device *comp_dev;
+ enum mdp_comp_type type;
+ enum mtk_mdp_comp_id id;
+ u32 alias_id;
+ s32 gce_event[MDP_GCE_EVENT_MAX];
+ const struct mdp_comp_ops *ops;
+};
+
+struct mdp_comp_ctx {
+ struct mdp_comp *comp;
+ const struct img_compparam *param;
+ const struct img_input *input;
+ const struct img_output *outputs[IMG_MAX_HW_OUTPUTS];
+};
+
+struct mdp_comp_ops {
+ s64 (*get_comp_flag)(const struct mdp_comp_ctx *ctx);
+ int (*init_comp)(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd);
+ int (*config_frame)(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd,
+ const struct v4l2_rect *compose);
+ int (*config_subfrm)(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index);
+ int (*wait_comp_event)(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd);
+ int (*advance_subfrm)(struct mdp_comp_ctx *ctx,
+ struct mdp_cmdq_cmd *cmd, u32 index);
+ int (*post_process)(struct mdp_comp_ctx *ctx, struct mdp_cmdq_cmd *cmd);
+};
+
+struct mdp_dev;
+
+int mdp_comp_config(struct mdp_dev *mdp);
+void mdp_comp_destroy(struct mdp_dev *mdp);
+int mdp_comp_clock_on(struct device *dev, struct mdp_comp *comp);
+void mdp_comp_clock_off(struct device *dev, struct mdp_comp *comp);
+int mdp_comp_clocks_on(struct device *dev, struct mdp_comp *comps, int num);
+void mdp_comp_clocks_off(struct device *dev, struct mdp_comp *comps, int num);
+int mdp_comp_ctx_config(struct mdp_dev *mdp, struct mdp_comp_ctx *ctx,
+ const struct img_compparam *param,
+ const struct img_ipi_frameparam *frame);
+
+#endif /* __MTK_MDP3_COMP_H__ */
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
new file mode 100644
index 000000000000..cde59579b7ae
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/mtk_scp.h>
+#include <media/videobuf2-dma-contig.h>
+#include "mtk-mdp3-core.h"
+#include "mtk-mdp3-m2m.h"
+
+static const struct mdp_platform_config mt8183_plat_cfg = {
+ .rdma_support_10bit = true,
+ .rdma_rsz1_sram_sharing = true,
+ .rdma_upsample_repeat_only = true,
+ .rsz_disable_dcm_small_sample = false,
+ .wrot_filter_constraint = false,
+};
+
+static const struct of_device_id mt8183_mdp_probe_infra[MDP_INFRA_MAX] = {
+ [MDP_INFRA_MMSYS] = { .compatible = "mediatek,mt8183-mmsys" },
+ [MDP_INFRA_MUTEX] = { .compatible = "mediatek,mt8183-disp-mutex" },
+ [MDP_INFRA_SCP] = { .compatible = "mediatek,mt8183-scp" }
+};
+
+static const u32 mt8183_mutex_idx[MDP_MAX_COMP_COUNT] = {
+ [MDP_COMP_RDMA0] = MUTEX_MOD_IDX_MDP_RDMA0,
+ [MDP_COMP_RSZ0] = MUTEX_MOD_IDX_MDP_RSZ0,
+ [MDP_COMP_RSZ1] = MUTEX_MOD_IDX_MDP_RSZ1,
+ [MDP_COMP_TDSHP0] = MUTEX_MOD_IDX_MDP_TDSHP0,
+ [MDP_COMP_WROT0] = MUTEX_MOD_IDX_MDP_WROT0,
+ [MDP_COMP_WDMA] = MUTEX_MOD_IDX_MDP_WDMA,
+ [MDP_COMP_AAL0] = MUTEX_MOD_IDX_MDP_AAL0,
+ [MDP_COMP_CCORR0] = MUTEX_MOD_IDX_MDP_CCORR0,
+};
+
+static const struct mtk_mdp_driver_data mt8183_mdp_driver_data = {
+ .mdp_probe_infra = mt8183_mdp_probe_infra,
+ .mdp_cfg = &mt8183_plat_cfg,
+ .mdp_mutex_table_idx = mt8183_mutex_idx,
+};
+
+static const struct of_device_id mdp_of_ids[] = {
+ { .compatible = "mediatek,mt8183-mdp3-rdma",
+ .data = &mt8183_mdp_driver_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, mdp_of_ids);
+
+static struct platform_device *__get_pdev_by_id(struct platform_device *pdev,
+ enum mdp_infra_id id)
+{
+ struct device_node *node;
+ struct platform_device *mdp_pdev = NULL;
+ const struct mtk_mdp_driver_data *mdp_data;
+ const char *compat;
+
+ if (!pdev)
+ return NULL;
+
+ if (id < MDP_INFRA_MMSYS || id >= MDP_INFRA_MAX) {
+ dev_err(&pdev->dev, "Illegal infra id %d\n", id);
+ return NULL;
+ }
+
+ mdp_data = of_device_get_match_data(&pdev->dev);
+ if (!mdp_data) {
+ dev_err(&pdev->dev, "have no driver data to find node\n");
+ return NULL;
+ }
+ compat = mdp_data->mdp_probe_infra[id].compatible;
+
+ node = of_find_compatible_node(NULL, NULL, compat);
+ if (WARN_ON(!node)) {
+ dev_err(&pdev->dev, "find node from id %d failed\n", id);
+ return NULL;
+ }
+
+ mdp_pdev = of_find_device_by_node(node);
+ of_node_put(node);
+ if (WARN_ON(!mdp_pdev)) {
+ dev_err(&pdev->dev, "find pdev from id %d failed\n", id);
+ return NULL;
+ }
+
+ return mdp_pdev;
+}
+
+struct platform_device *mdp_get_plat_device(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *mdp_node;
+ struct platform_device *mdp_pdev;
+
+ mdp_node = of_parse_phandle(dev->of_node, MDP_PHANDLE_NAME, 0);
+ if (!mdp_node) {
+ dev_err(dev, "can't get node %s\n", MDP_PHANDLE_NAME);
+ return NULL;
+ }
+
+ mdp_pdev = of_find_device_by_node(mdp_node);
+ of_node_put(mdp_node);
+
+ return mdp_pdev;
+}
+EXPORT_SYMBOL_GPL(mdp_get_plat_device);
+
+int mdp_vpu_get_locked(struct mdp_dev *mdp)
+{
+ int ret = 0;
+
+ if (mdp->vpu_count++ == 0) {
+ ret = rproc_boot(mdp->rproc_handle);
+ if (ret) {
+ dev_err(&mdp->pdev->dev,
+ "vpu_load_firmware failed %d\n", ret);
+ goto err_load_vpu;
+ }
+ ret = mdp_vpu_register(mdp);
+ if (ret) {
+ dev_err(&mdp->pdev->dev,
+ "mdp_vpu register failed %d\n", ret);
+ goto err_reg_vpu;
+ }
+ ret = mdp_vpu_dev_init(&mdp->vpu, mdp->scp, &mdp->vpu_lock);
+ if (ret) {
+ dev_err(&mdp->pdev->dev,
+ "mdp_vpu device init failed %d\n", ret);
+ goto err_init_vpu;
+ }
+ }
+ return 0;
+
+err_init_vpu:
+ mdp_vpu_unregister(mdp);
+err_reg_vpu:
+err_load_vpu:
+ mdp->vpu_count--;
+ return ret;
+}
+
+void mdp_vpu_put_locked(struct mdp_dev *mdp)
+{
+ if (--mdp->vpu_count == 0) {
+ mdp_vpu_dev_deinit(&mdp->vpu);
+ mdp_vpu_unregister(mdp);
+ }
+}
+
+void mdp_video_device_release(struct video_device *vdev)
+{
+ struct mdp_dev *mdp = (struct mdp_dev *)video_get_drvdata(vdev);
+ int i;
+
+ scp_put(mdp->scp);
+
+ destroy_workqueue(mdp->job_wq);
+ destroy_workqueue(mdp->clock_wq);
+
+ pm_runtime_disable(&mdp->pdev->dev);
+
+ vb2_dma_contig_clear_max_seg_size(&mdp->pdev->dev);
+
+ mdp_comp_destroy(mdp);
+ for (i = 0; i < MDP_PIPE_MAX; i++)
+ mtk_mutex_put(mdp->mdp_mutex[i]);
+
+ mdp_vpu_shared_mem_free(&mdp->vpu);
+ v4l2_m2m_release(mdp->m2m_dev);
+ kfree(mdp);
+}
+
+static int mdp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mdp_dev *mdp;
+ struct platform_device *mm_pdev;
+ int ret, i;
+
+ mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
+ if (!mdp) {
+ ret = -ENOMEM;
+ goto err_return;
+ }
+
+ mdp->pdev = pdev;
+ mdp->mdp_data = of_device_get_match_data(&pdev->dev);
+
+ mm_pdev = __get_pdev_by_id(pdev, MDP_INFRA_MMSYS);
+ if (!mm_pdev) {
+ ret = -ENODEV;
+ goto err_return;
+ }
+ mdp->mdp_mmsys = &mm_pdev->dev;
+
+ mm_pdev = __get_pdev_by_id(pdev, MDP_INFRA_MUTEX);
+ if (WARN_ON(!mm_pdev)) {
+ ret = -ENODEV;
+ goto err_return;
+ }
+ for (i = 0; i < MDP_PIPE_MAX; i++) {
+ mdp->mdp_mutex[i] = mtk_mutex_get(&mm_pdev->dev);
+ if (!mdp->mdp_mutex[i]) {
+ ret = -ENODEV;
+ goto err_return;
+ }
+ }
+
+ ret = mdp_comp_config(mdp);
+ if (ret) {
+ dev_err(dev, "Failed to config mdp components\n");
+ goto err_return;
+ }
+
+ mdp->job_wq = alloc_workqueue(MDP_MODULE_NAME, WQ_FREEZABLE, 0);
+ if (!mdp->job_wq) {
+ dev_err(dev, "Unable to create job workqueue\n");
+ ret = -ENOMEM;
+ goto err_deinit_comp;
+ }
+
+ mdp->clock_wq = alloc_workqueue(MDP_MODULE_NAME "-clock", WQ_FREEZABLE,
+ 0);
+ if (!mdp->clock_wq) {
+ dev_err(dev, "Unable to create clock workqueue\n");
+ ret = -ENOMEM;
+ goto err_destroy_job_wq;
+ }
+
+ mm_pdev = __get_pdev_by_id(pdev, MDP_INFRA_SCP);
+ if (WARN_ON(!mm_pdev)) {
+ dev_err(&pdev->dev, "Could not get scp device\n");
+ ret = -ENODEV;
+ goto err_destroy_clock_wq;
+ }
+ mdp->scp = platform_get_drvdata(mm_pdev);
+ mdp->rproc_handle = scp_get_rproc(mdp->scp);
+ dev_dbg(&pdev->dev, "MDP rproc_handle: %pK", mdp->rproc_handle);
+
+ mutex_init(&mdp->vpu_lock);
+ mutex_init(&mdp->m2m_lock);
+
+ mdp->cmdq_clt = cmdq_mbox_create(dev, 0);
+ if (IS_ERR(mdp->cmdq_clt)) {
+ ret = PTR_ERR(mdp->cmdq_clt);
+ goto err_put_scp;
+ }
+
+ init_waitqueue_head(&mdp->callback_wq);
+ ida_init(&mdp->mdp_ida);
+ platform_set_drvdata(pdev, mdp);
+
+ vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
+
+ ret = v4l2_device_register(dev, &mdp->v4l2_dev);
+ if (ret) {
+ dev_err(dev, "Failed to register v4l2 device\n");
+ ret = -EINVAL;
+ goto err_mbox_destroy;
+ }
+
+ ret = mdp_m2m_device_register(mdp);
+ if (ret) {
+ v4l2_err(&mdp->v4l2_dev, "Failed to register m2m device\n");
+ goto err_unregister_device;
+ }
+
+ dev_dbg(dev, "mdp-%d registered successfully\n", pdev->id);
+ return 0;
+
+err_unregister_device:
+ v4l2_device_unregister(&mdp->v4l2_dev);
+err_mbox_destroy:
+ cmdq_mbox_destroy(mdp->cmdq_clt);
+err_put_scp:
+ scp_put(mdp->scp);
+err_destroy_clock_wq:
+ destroy_workqueue(mdp->clock_wq);
+err_destroy_job_wq:
+ destroy_workqueue(mdp->job_wq);
+err_deinit_comp:
+ mdp_comp_destroy(mdp);
+err_return:
+ for (i = 0; i < MDP_PIPE_MAX; i++)
+ mtk_mutex_put(mdp->mdp_mutex[i]);
+ kfree(mdp);
+ dev_dbg(dev, "Errno %d\n", ret);
+ return ret;
+}
+
+static int mdp_remove(struct platform_device *pdev)
+{
+ struct mdp_dev *mdp = platform_get_drvdata(pdev);
+
+ v4l2_device_unregister(&mdp->v4l2_dev);
+
+ dev_dbg(&pdev->dev, "%s driver unloaded\n", pdev->name);
+ return 0;
+}
+
+static int __maybe_unused mdp_suspend(struct device *dev)
+{
+ struct mdp_dev *mdp = dev_get_drvdata(dev);
+ int ret;
+
+ atomic_set(&mdp->suspended, 1);
+
+ if (atomic_read(&mdp->job_count)) {
+ ret = wait_event_timeout(mdp->callback_wq,
+ !atomic_read(&mdp->job_count),
+ 2 * HZ);
+ if (ret == 0) {
+ dev_err(dev,
+ "%s:flushed cmdq task incomplete, count=%d\n",
+ __func__, atomic_read(&mdp->job_count));
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+static int __maybe_unused mdp_resume(struct device *dev)
+{
+ struct mdp_dev *mdp = dev_get_drvdata(dev);
+
+ atomic_set(&mdp->suspended, 0);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mdp_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mdp_suspend, mdp_resume)
+};
+
+static struct platform_driver mdp_driver = {
+ .probe = mdp_probe,
+ .remove = mdp_remove,
+ .driver = {
+ .name = MDP_MODULE_NAME,
+ .pm = &mdp_pm_ops,
+ .of_match_table = of_match_ptr(mdp_of_ids),
+ },
+};
+
+module_platform_driver(mdp_driver);
+
+MODULE_AUTHOR("Ping-Hsun Wu <ping-hsun.wu@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek image processor 3 driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h
new file mode 100644
index 000000000000..2ef5fbc4f25a
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MTK_MDP3_CORE_H__
+#define __MTK_MDP3_CORE_H__
+
+#include <media/v4l2-device.h>
+#include <media/v4l2-mem2mem.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
+#include <linux/soc/mediatek/mtk-mutex.h>
+#include "mtk-mdp3-comp.h"
+#include "mtk-mdp3-vpu.h"
+
+#define MDP_MODULE_NAME "mtk-mdp3"
+#define MDP_DEVICE_NAME "MediaTek MDP3"
+#define MDP_PHANDLE_NAME "mediatek,mdp3"
+
+enum mdp_infra_id {
+ MDP_INFRA_MMSYS,
+ MDP_INFRA_MUTEX,
+ MDP_INFRA_SCP,
+ MDP_INFRA_MAX
+};
+
+enum mdp_buffer_usage {
+ MDP_BUFFER_USAGE_HW_READ,
+ MDP_BUFFER_USAGE_MDP,
+ MDP_BUFFER_USAGE_MDP2,
+ MDP_BUFFER_USAGE_ISP,
+ MDP_BUFFER_USAGE_WPE,
+};
+
+struct mdp_platform_config {
+ bool rdma_support_10bit;
+ bool rdma_rsz1_sram_sharing;
+ bool rdma_upsample_repeat_only;
+ bool rsz_disable_dcm_small_sample;
+ bool wrot_filter_constraint;
+};
+
+/* indicate which mutex is used by each pipepline */
+enum mdp_pipe_id {
+ MDP_PIPE_RDMA0,
+ MDP_PIPE_IMGI,
+ MDP_PIPE_WPEI,
+ MDP_PIPE_WPEI2,
+ MDP_PIPE_MAX
+};
+
+struct mtk_mdp_driver_data {
+ const struct of_device_id *mdp_probe_infra;
+ const struct mdp_platform_config *mdp_cfg;
+ const u32 *mdp_mutex_table_idx;
+};
+
+struct mdp_dev {
+ struct platform_device *pdev;
+ struct device *mdp_mmsys;
+ struct mtk_mutex *mdp_mutex[MDP_PIPE_MAX];
+ struct mdp_comp *comp[MDP_MAX_COMP_COUNT];
+ const struct mtk_mdp_driver_data *mdp_data;
+
+ struct workqueue_struct *job_wq;
+ struct workqueue_struct *clock_wq;
+ struct mdp_vpu_dev vpu;
+ struct mtk_scp *scp;
+ struct rproc *rproc_handle;
+ /* synchronization protect for accessing vpu working buffer info */
+ struct mutex vpu_lock;
+ s32 vpu_count;
+ u32 id_count;
+ struct ida mdp_ida;
+ struct cmdq_client *cmdq_clt;
+ wait_queue_head_t callback_wq;
+
+ struct v4l2_device v4l2_dev;
+ struct video_device *m2m_vdev;
+ struct v4l2_m2m_dev *m2m_dev;
+ /* synchronization protect for m2m device operation */
+ struct mutex m2m_lock;
+ atomic_t suspended;
+ atomic_t job_count;
+};
+
+int mdp_vpu_get_locked(struct mdp_dev *mdp);
+void mdp_vpu_put_locked(struct mdp_dev *mdp);
+int mdp_vpu_register(struct mdp_dev *mdp);
+void mdp_vpu_unregister(struct mdp_dev *mdp);
+void mdp_video_device_release(struct video_device *vdev);
+
+#endif /* __MTK_MDP3_CORE_H__ */
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
new file mode 100644
index 000000000000..5f74ea3b7a52
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.c
@@ -0,0 +1,724 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#include <linux/platform_device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-event.h>
+#include <media/videobuf2-dma-contig.h>
+#include "mtk-mdp3-m2m.h"
+
+static inline struct mdp_m2m_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct mdp_m2m_ctx, fh);
+}
+
+static inline struct mdp_m2m_ctx *ctrl_to_ctx(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct mdp_m2m_ctx, ctrl_handler);
+}
+
+static inline struct mdp_frame *ctx_get_frame(struct mdp_m2m_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return &ctx->curr_param.output;
+ else
+ return &ctx->curr_param.captures[0];
+}
+
+static inline void mdp_m2m_ctx_set_state(struct mdp_m2m_ctx *ctx, u32 state)
+{
+ atomic_or(state, &ctx->curr_param.state);
+}
+
+static inline bool mdp_m2m_ctx_is_state_set(struct mdp_m2m_ctx *ctx, u32 mask)
+{
+ return ((atomic_read(&ctx->curr_param.state) & mask) == mask);
+}
+
+static void mdp_m2m_process_done(void *priv, int vb_state)
+{
+ struct mdp_m2m_ctx *ctx = priv;
+ struct vb2_v4l2_buffer *src_vbuf, *dst_vbuf;
+
+ src_vbuf = (struct vb2_v4l2_buffer *)
+ v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ dst_vbuf = (struct vb2_v4l2_buffer *)
+ v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ ctx->curr_param.frame_no = ctx->frame_count[MDP_M2M_SRC];
+ src_vbuf->sequence = ctx->frame_count[MDP_M2M_SRC]++;
+ dst_vbuf->sequence = ctx->frame_count[MDP_M2M_DST]++;
+ v4l2_m2m_buf_copy_metadata(src_vbuf, dst_vbuf, true);
+
+ v4l2_m2m_buf_done(src_vbuf, vb_state);
+ v4l2_m2m_buf_done(dst_vbuf, vb_state);
+ v4l2_m2m_job_finish(ctx->mdp_dev->m2m_dev, ctx->m2m_ctx);
+}
+
+static void mdp_m2m_device_run(void *priv)
+{
+ struct mdp_m2m_ctx *ctx = priv;
+ struct mdp_frame *frame;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+ struct img_ipi_frameparam param = {};
+ struct mdp_cmdq_param task = {};
+ enum vb2_buffer_state vb_state = VB2_BUF_STATE_ERROR;
+ int ret;
+
+ if (mdp_m2m_ctx_is_state_set(ctx, MDP_M2M_CTX_ERROR)) {
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "mdp_m2m_ctx is in error state\n");
+ goto worker_end;
+ }
+
+ param.frame_no = ctx->curr_param.frame_no;
+ param.type = ctx->curr_param.type;
+ param.num_inputs = 1;
+ param.num_outputs = 1;
+
+ frame = ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ mdp_set_src_config(&param.inputs[0], frame, &src_vb->vb2_buf);
+
+ frame = ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ mdp_set_dst_config(&param.outputs[0], frame, &dst_vb->vb2_buf);
+
+ ret = mdp_vpu_process(&ctx->vpu, &param);
+ if (ret) {
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "VPU MDP process failed: %d\n", ret);
+ goto worker_end;
+ }
+
+ task.config = ctx->vpu.config;
+ task.param = &param;
+ task.composes[0] = &frame->compose;
+ task.cmdq_cb = NULL;
+ task.cb_data = NULL;
+ task.mdp_ctx = ctx;
+
+ ret = mdp_cmdq_send(ctx->mdp_dev, &task);
+ if (ret) {
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "CMDQ sendtask failed: %d\n", ret);
+ goto worker_end;
+ }
+
+ return;
+
+worker_end:
+ mdp_m2m_process_done(ctx, vb_state);
+}
+
+static int mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct mdp_m2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct mdp_frame *capture;
+ struct vb2_queue *vq;
+ int ret;
+ bool out_streaming, cap_streaming;
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ ctx->frame_count[MDP_M2M_SRC] = 0;
+
+ if (V4L2_TYPE_IS_CAPTURE(q->type))
+ ctx->frame_count[MDP_M2M_DST] = 0;
+
+ capture = ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ vq = v4l2_m2m_get_src_vq(ctx->m2m_ctx);
+ out_streaming = vb2_is_streaming(vq);
+ vq = v4l2_m2m_get_dst_vq(ctx->m2m_ctx);
+ cap_streaming = vb2_is_streaming(vq);
+
+ /* Check to see if scaling ratio is within supported range */
+ if ((V4L2_TYPE_IS_OUTPUT(q->type) && cap_streaming) ||
+ (V4L2_TYPE_IS_CAPTURE(q->type) && out_streaming)) {
+ ret = mdp_check_scaling_ratio(&capture->crop.c,
+ &capture->compose,
+ capture->rotation,
+ ctx->curr_param.limit);
+ if (ret) {
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "Out of scaling range\n");
+ return ret;
+ }
+ }
+
+ if (!mdp_m2m_ctx_is_state_set(ctx, MDP_VPU_INIT)) {
+ ret = mdp_vpu_get_locked(ctx->mdp_dev);
+ if (ret)
+ return ret;
+
+ ret = mdp_vpu_ctx_init(&ctx->vpu, &ctx->mdp_dev->vpu,
+ MDP_DEV_M2M);
+ if (ret) {
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "VPU init failed %d\n", ret);
+ return -EINVAL;
+ }
+ mdp_m2m_ctx_set_state(ctx, MDP_VPU_INIT);
+ }
+
+ return 0;
+}
+
+static struct vb2_v4l2_buffer *mdp_m2m_buf_remove(struct mdp_m2m_ctx *ctx,
+ unsigned int type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return (struct vb2_v4l2_buffer *)
+ v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ else
+ return (struct vb2_v4l2_buffer *)
+ v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+}
+
+static void mdp_m2m_stop_streaming(struct vb2_queue *q)
+{
+ struct mdp_m2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vb;
+
+ vb = mdp_m2m_buf_remove(ctx, q->type);
+ while (vb) {
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
+ vb = mdp_m2m_buf_remove(ctx, q->type);
+ }
+}
+
+static int mdp_m2m_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct mdp_m2m_ctx *ctx = vb2_get_drv_priv(q);
+ struct v4l2_pix_format_mplane *pix_mp;
+ u32 i;
+
+ pix_mp = &ctx_get_frame(ctx, q->type)->format.fmt.pix_mp;
+
+ /* from VIDIOC_CREATE_BUFS */
+ if (*num_planes) {
+ if (*num_planes != pix_mp->num_planes)
+ return -EINVAL;
+ for (i = 0; i < pix_mp->num_planes; ++i)
+ if (sizes[i] < pix_mp->plane_fmt[i].sizeimage)
+ return -EINVAL;
+ } else {/* from VIDIOC_REQBUFS */
+ *num_planes = pix_mp->num_planes;
+ for (i = 0; i < pix_mp->num_planes; ++i)
+ sizes[i] = pix_mp->plane_fmt[i].sizeimage;
+ }
+
+ return 0;
+}
+
+static int mdp_m2m_buf_prepare(struct vb2_buffer *vb)
+{
+ struct mdp_m2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct v4l2_pix_format_mplane *pix_mp;
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+ u32 i;
+
+ v4l2_buf->field = V4L2_FIELD_NONE;
+
+ if (V4L2_TYPE_IS_CAPTURE(vb->type)) {
+ pix_mp = &ctx_get_frame(ctx, vb->type)->format.fmt.pix_mp;
+ for (i = 0; i < pix_mp->num_planes; ++i) {
+ vb2_set_plane_payload(vb, i,
+ pix_mp->plane_fmt[i].sizeimage);
+ }
+ }
+ return 0;
+}
+
+static int mdp_m2m_buf_out_validate(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+
+ v4l2_buf->field = V4L2_FIELD_NONE;
+
+ return 0;
+}
+
+static void mdp_m2m_buf_queue(struct vb2_buffer *vb)
+{
+ struct mdp_m2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb);
+
+ v4l2_buf->field = V4L2_FIELD_NONE;
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb));
+}
+
+static const struct vb2_ops mdp_m2m_qops = {
+ .queue_setup = mdp_m2m_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .buf_prepare = mdp_m2m_buf_prepare,
+ .start_streaming = mdp_m2m_start_streaming,
+ .stop_streaming = mdp_m2m_stop_streaming,
+ .buf_queue = mdp_m2m_buf_queue,
+ .buf_out_validate = mdp_m2m_buf_out_validate,
+};
+
+static int mdp_m2m_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, MDP_MODULE_NAME, sizeof(cap->driver));
+ strscpy(cap->card, MDP_DEVICE_NAME, sizeof(cap->card));
+
+ return 0;
+}
+
+static int mdp_m2m_enum_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f)
+{
+ return mdp_enum_fmt_mplane(f);
+}
+
+static int mdp_m2m_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+ struct mdp_frame *frame;
+ struct v4l2_pix_format_mplane *pix_mp;
+
+ frame = ctx_get_frame(ctx, f->type);
+ *f = frame->format;
+ pix_mp = &f->fmt.pix_mp;
+ pix_mp->colorspace = ctx->curr_param.colorspace;
+ pix_mp->xfer_func = ctx->curr_param.xfer_func;
+ pix_mp->ycbcr_enc = ctx->curr_param.ycbcr_enc;
+ pix_mp->quantization = ctx->curr_param.quant;
+
+ return 0;
+}
+
+static int mdp_m2m_s_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+ struct mdp_frame *frame = ctx_get_frame(ctx, f->type);
+ struct mdp_frame *capture;
+ const struct mdp_format *fmt;
+ struct vb2_queue *vq;
+
+ fmt = mdp_try_fmt_mplane(f, &ctx->curr_param, ctx->id);
+ if (!fmt)
+ return -EINVAL;
+
+ vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ frame->format = *f;
+ frame->mdp_fmt = fmt;
+ frame->ycbcr_prof = mdp_map_ycbcr_prof_mplane(f, fmt->mdp_color);
+ frame->usage = V4L2_TYPE_IS_OUTPUT(f->type) ?
+ MDP_BUFFER_USAGE_HW_READ : MDP_BUFFER_USAGE_MDP;
+
+ capture = ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (V4L2_TYPE_IS_OUTPUT(f->type)) {
+ capture->crop.c.left = 0;
+ capture->crop.c.top = 0;
+ capture->crop.c.width = f->fmt.pix_mp.width;
+ capture->crop.c.height = f->fmt.pix_mp.height;
+ ctx->curr_param.colorspace = f->fmt.pix_mp.colorspace;
+ ctx->curr_param.ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
+ ctx->curr_param.quant = f->fmt.pix_mp.quantization;
+ ctx->curr_param.xfer_func = f->fmt.pix_mp.xfer_func;
+ } else {
+ capture->compose.left = 0;
+ capture->compose.top = 0;
+ capture->compose.width = f->fmt.pix_mp.width;
+ capture->compose.height = f->fmt.pix_mp.height;
+ }
+
+ return 0;
+}
+
+static int mdp_m2m_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+
+ if (!mdp_try_fmt_mplane(f, &ctx->curr_param, ctx->id))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int mdp_m2m_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+ struct mdp_frame *frame;
+ bool valid = false;
+
+ if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ valid = mdp_target_is_crop(s->target);
+ else if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ valid = mdp_target_is_compose(s->target);
+
+ if (!valid)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_CROP:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ frame = ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ s->r = frame->crop.c;
+ return 0;
+ case V4L2_SEL_TGT_COMPOSE:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ frame = ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ s->r = frame->compose;
+ return 0;
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+ frame = ctx_get_frame(ctx, s->type);
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = frame->format.fmt.pix_mp.width;
+ s->r.height = frame->format.fmt.pix_mp.height;
+ return 0;
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ return -EINVAL;
+ frame = ctx_get_frame(ctx, s->type);
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = frame->format.fmt.pix_mp.width;
+ s->r.height = frame->format.fmt.pix_mp.height;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int mdp_m2m_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct mdp_m2m_ctx *ctx = fh_to_ctx(fh);
+ struct mdp_frame *frame = ctx_get_frame(ctx, s->type);
+ struct mdp_frame *capture;
+ struct v4l2_rect r;
+ struct device *dev = &ctx->mdp_dev->pdev->dev;
+ bool valid = false;
+ int ret;
+
+ if (s->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ valid = (s->target == V4L2_SEL_TGT_CROP);
+ else if (s->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ valid = (s->target == V4L2_SEL_TGT_COMPOSE);
+
+ if (!valid) {
+ dev_dbg(dev, "[%s:%d] invalid type:%u target:%u", __func__,
+ ctx->id, s->type, s->target);
+ return -EINVAL;
+ }
+
+ ret = mdp_try_crop(ctx, &r, s, frame);
+ if (ret)
+ return ret;
+ capture = ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ if (mdp_target_is_crop(s->target))
+ capture->crop.c = r;
+ else
+ capture->compose = r;
+
+ s->r = r;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops mdp_m2m_ioctl_ops = {
+ .vidioc_querycap = mdp_m2m_querycap,
+ .vidioc_enum_fmt_vid_cap = mdp_m2m_enum_fmt_mplane,
+ .vidioc_enum_fmt_vid_out = mdp_m2m_enum_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = mdp_m2m_g_fmt_mplane,
+ .vidioc_g_fmt_vid_out_mplane = mdp_m2m_g_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = mdp_m2m_s_fmt_mplane,
+ .vidioc_s_fmt_vid_out_mplane = mdp_m2m_s_fmt_mplane,
+ .vidioc_try_fmt_vid_cap_mplane = mdp_m2m_try_fmt_mplane,
+ .vidioc_try_fmt_vid_out_mplane = mdp_m2m_try_fmt_mplane,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+ .vidioc_g_selection = mdp_m2m_g_selection,
+ .vidioc_s_selection = mdp_m2m_s_selection,
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static int mdp_m2m_queue_init(void *priv,
+ struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct mdp_m2m_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->ops = &mdp_m2m_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->dev = &ctx->mdp_dev->pdev->dev;
+ src_vq->lock = &ctx->ctx_lock;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->ops = &mdp_m2m_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->dev = &ctx->mdp_dev->pdev->dev;
+ dst_vq->lock = &ctx->ctx_lock;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int mdp_m2m_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mdp_m2m_ctx *ctx = ctrl_to_ctx(ctrl);
+ struct mdp_frame *capture;
+
+ capture = ctx_get_frame(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ capture->hflip = ctrl->val;
+ break;
+ case V4L2_CID_VFLIP:
+ capture->vflip = ctrl->val;
+ break;
+ case V4L2_CID_ROTATE:
+ capture->rotation = ctrl->val;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops mdp_m2m_ctrl_ops = {
+ .s_ctrl = mdp_m2m_s_ctrl,
+};
+
+static int mdp_m2m_ctrls_create(struct mdp_m2m_ctx *ctx)
+{
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, MDP_MAX_CTRLS);
+ ctx->ctrls.hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &mdp_m2m_ctrl_ops, V4L2_CID_HFLIP,
+ 0, 1, 1, 0);
+ ctx->ctrls.vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &mdp_m2m_ctrl_ops, V4L2_CID_VFLIP,
+ 0, 1, 1, 0);
+ ctx->ctrls.rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler,
+ &mdp_m2m_ctrl_ops,
+ V4L2_CID_ROTATE, 0, 270, 90, 0);
+
+ if (ctx->ctrl_handler.error) {
+ int err = ctx->ctrl_handler.error;
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ dev_err(&ctx->mdp_dev->pdev->dev,
+ "Failed to register controls\n");
+ return err;
+ }
+ return 0;
+}
+
+static int mdp_m2m_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct mdp_dev *mdp = video_get_drvdata(vdev);
+ struct mdp_m2m_ctx *ctx;
+ struct device *dev = &mdp->pdev->dev;
+ int ret;
+ struct v4l2_format default_format = {};
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (mutex_lock_interruptible(&mdp->m2m_lock)) {
+ ret = -ERESTARTSYS;
+ goto err_free_ctx;
+ }
+
+ ctx->id = ida_alloc(&mdp->mdp_ida, GFP_KERNEL);
+ ctx->mdp_dev = mdp;
+
+ v4l2_fh_init(&ctx->fh, vdev);
+ file->private_data = &ctx->fh;
+ ret = mdp_m2m_ctrls_create(ctx);
+ if (ret)
+ goto err_exit_fh;
+
+ /* Use separate control handler per file handle */
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+ v4l2_fh_add(&ctx->fh);
+
+ mutex_init(&ctx->ctx_lock);
+ ctx->m2m_ctx = v4l2_m2m_ctx_init(mdp->m2m_dev, ctx, mdp_m2m_queue_init);
+ if (IS_ERR(ctx->m2m_ctx)) {
+ dev_err(dev, "Failed to initialize m2m context\n");
+ ret = PTR_ERR(ctx->m2m_ctx);
+ goto err_release_handler;
+ }
+ ctx->fh.m2m_ctx = ctx->m2m_ctx;
+
+ ctx->curr_param.ctx = ctx;
+ ret = mdp_frameparam_init(&ctx->curr_param);
+ if (ret) {
+ dev_err(dev, "Failed to initialize mdp parameter\n");
+ goto err_release_m2m_ctx;
+ }
+
+ mutex_unlock(&mdp->m2m_lock);
+
+ /* Default format */
+ default_format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ default_format.fmt.pix_mp.width = 32;
+ default_format.fmt.pix_mp.height = 32;
+ default_format.fmt.pix_mp.pixelformat = V4L2_PIX_FMT_YUV420M;
+ mdp_m2m_s_fmt_mplane(file, &ctx->fh, &default_format);
+ default_format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ mdp_m2m_s_fmt_mplane(file, &ctx->fh, &default_format);
+
+ dev_dbg(dev, "%s:[%d]", __func__, ctx->id);
+
+ return 0;
+
+err_release_m2m_ctx:
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+err_release_handler:
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+err_exit_fh:
+ v4l2_fh_exit(&ctx->fh);
+ mutex_unlock(&mdp->m2m_lock);
+err_free_ctx:
+ kfree(ctx);
+
+ return ret;
+}
+
+static int mdp_m2m_release(struct file *file)
+{
+ struct mdp_m2m_ctx *ctx = fh_to_ctx(file->private_data);
+ struct mdp_dev *mdp = video_drvdata(file);
+ struct device *dev = &mdp->pdev->dev;
+
+ mutex_lock(&mdp->m2m_lock);
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ if (mdp_m2m_ctx_is_state_set(ctx, MDP_VPU_INIT)) {
+ mdp_vpu_ctx_deinit(&ctx->vpu);
+ mdp_vpu_put_locked(mdp);
+ }
+
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ ida_free(&mdp->mdp_ida, ctx->id);
+ mutex_unlock(&mdp->m2m_lock);
+
+ dev_dbg(dev, "%s:[%d]", __func__, ctx->id);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations mdp_m2m_fops = {
+ .owner = THIS_MODULE,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+ .open = mdp_m2m_open,
+ .release = mdp_m2m_release,
+};
+
+static const struct v4l2_m2m_ops mdp_m2m_ops = {
+ .device_run = mdp_m2m_device_run,
+};
+
+int mdp_m2m_device_register(struct mdp_dev *mdp)
+{
+ struct device *dev = &mdp->pdev->dev;
+ int ret = 0;
+
+ mdp->m2m_vdev = video_device_alloc();
+ if (!mdp->m2m_vdev) {
+ dev_err(dev, "Failed to allocate video device\n");
+ ret = -ENOMEM;
+ goto err_video_alloc;
+ }
+ mdp->m2m_vdev->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE |
+ V4L2_CAP_STREAMING;
+ mdp->m2m_vdev->fops = &mdp_m2m_fops;
+ mdp->m2m_vdev->ioctl_ops = &mdp_m2m_ioctl_ops;
+ mdp->m2m_vdev->release = mdp_video_device_release;
+ mdp->m2m_vdev->lock = &mdp->m2m_lock;
+ mdp->m2m_vdev->vfl_dir = VFL_DIR_M2M;
+ mdp->m2m_vdev->v4l2_dev = &mdp->v4l2_dev;
+ snprintf(mdp->m2m_vdev->name, sizeof(mdp->m2m_vdev->name), "%s:m2m",
+ MDP_MODULE_NAME);
+ video_set_drvdata(mdp->m2m_vdev, mdp);
+
+ mdp->m2m_dev = v4l2_m2m_init(&mdp_m2m_ops);
+ if (IS_ERR(mdp->m2m_dev)) {
+ dev_err(dev, "Failed to initialize v4l2-m2m device\n");
+ ret = PTR_ERR(mdp->m2m_dev);
+ goto err_m2m_init;
+ }
+
+ ret = video_register_device(mdp->m2m_vdev, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(dev, "Failed to register video device\n");
+ goto err_video_register;
+ }
+
+ v4l2_info(&mdp->v4l2_dev, "Driver registered as /dev/video%d",
+ mdp->m2m_vdev->num);
+ return 0;
+
+err_video_register:
+ v4l2_m2m_release(mdp->m2m_dev);
+err_m2m_init:
+ video_device_release(mdp->m2m_vdev);
+err_video_alloc:
+
+ return ret;
+}
+
+void mdp_m2m_device_unregister(struct mdp_dev *mdp)
+{
+ video_unregister_device(mdp->m2m_vdev);
+}
+
+void mdp_m2m_job_finish(struct mdp_m2m_ctx *ctx)
+{
+ enum vb2_buffer_state vb_state = VB2_BUF_STATE_DONE;
+
+ mdp_m2m_process_done(ctx, vb_state);
+}
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.h
new file mode 100644
index 000000000000..61ddbaf1bf13
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-m2m.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MTK_MDP3_M2M_H__
+#define __MTK_MDP3_M2M_H__
+
+#include <media/v4l2-ctrls.h>
+#include "mtk-mdp3-core.h"
+#include "mtk-mdp3-vpu.h"
+#include "mtk-mdp3-regs.h"
+
+#define MDP_MAX_CTRLS 10
+
+enum {
+ MDP_M2M_SRC = 0,
+ MDP_M2M_DST = 1,
+ MDP_M2M_MAX,
+};
+
+struct mdp_m2m_ctrls {
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *rotate;
+};
+
+struct mdp_m2m_ctx {
+ u32 id;
+ struct mdp_dev *mdp_dev;
+ struct v4l2_fh fh;
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct mdp_m2m_ctrls ctrls;
+ struct v4l2_m2m_ctx *m2m_ctx;
+ struct mdp_vpu_ctx vpu;
+ u32 frame_count[MDP_M2M_MAX];
+
+ struct mdp_frameparam curr_param;
+ /* synchronization protect for mdp m2m context */
+ struct mutex ctx_lock;
+};
+
+int mdp_m2m_device_register(struct mdp_dev *mdp);
+void mdp_m2m_device_unregister(struct mdp_dev *mdp);
+void mdp_m2m_job_finish(struct mdp_m2m_ctx *ctx);
+
+#endif /* __MTK_MDP3_M2M_H__ */
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
new file mode 100644
index 000000000000..4e84a37ecdfc
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.c
@@ -0,0 +1,735 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#include <media/v4l2-common.h>
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+#include "mtk-mdp3-core.h"
+#include "mtk-mdp3-regs.h"
+#include "mtk-mdp3-m2m.h"
+
+/*
+ * All 10-bit related formats are not added in the basic format list,
+ * please add the corresponding format settings before use.
+ */
+static const struct mdp_format mdp_formats[] = {
+ {
+ .pixelformat = V4L2_PIX_FMT_GREY,
+ .mdp_color = MDP_COLOR_GREY,
+ .depth = { 8 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_RGB565X,
+ .mdp_color = MDP_COLOR_BGR565,
+ .depth = { 16 },
+ .row_depth = { 16 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_RGB565,
+ .mdp_color = MDP_COLOR_RGB565,
+ .depth = { 16 },
+ .row_depth = { 16 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_RGB24,
+ .mdp_color = MDP_COLOR_RGB888,
+ .depth = { 24 },
+ .row_depth = { 24 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_BGR24,
+ .mdp_color = MDP_COLOR_BGR888,
+ .depth = { 24 },
+ .row_depth = { 24 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_ABGR32,
+ .mdp_color = MDP_COLOR_BGRA8888,
+ .depth = { 32 },
+ .row_depth = { 32 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_ARGB32,
+ .mdp_color = MDP_COLOR_ARGB8888,
+ .depth = { 32 },
+ .row_depth = { 32 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .mdp_color = MDP_COLOR_UYVY,
+ .depth = { 16 },
+ .row_depth = { 16 },
+ .num_planes = 1,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_VYUY,
+ .mdp_color = MDP_COLOR_VYUY,
+ .depth = { 16 },
+ .row_depth = { 16 },
+ .num_planes = 1,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUYV,
+ .mdp_color = MDP_COLOR_YUYV,
+ .depth = { 16 },
+ .row_depth = { 16 },
+ .num_planes = 1,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YVYU,
+ .mdp_color = MDP_COLOR_YVYU,
+ .depth = { 16 },
+ .row_depth = { 16 },
+ .num_planes = 1,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUV420,
+ .mdp_color = MDP_COLOR_I420,
+ .depth = { 12 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YVU420,
+ .mdp_color = MDP_COLOR_YV12,
+ .depth = { 12 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV12,
+ .mdp_color = MDP_COLOR_NV12,
+ .depth = { 12 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV21,
+ .mdp_color = MDP_COLOR_NV21,
+ .depth = { 12 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV16,
+ .mdp_color = MDP_COLOR_NV16,
+ .depth = { 16 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV61,
+ .mdp_color = MDP_COLOR_NV61,
+ .depth = { 16 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV24,
+ .mdp_color = MDP_COLOR_NV24,
+ .depth = { 24 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV42,
+ .mdp_color = MDP_COLOR_NV42,
+ .depth = { 24 },
+ .row_depth = { 8 },
+ .num_planes = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_MT21C,
+ .mdp_color = MDP_COLOR_420_BLK_UFO,
+ .depth = { 8, 4 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .walign = 4,
+ .halign = 5,
+ .flags = MDP_FMT_FLAG_OUTPUT,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_MM21,
+ .mdp_color = MDP_COLOR_420_BLK,
+ .depth = { 8, 4 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .walign = 4,
+ .halign = 5,
+ .flags = MDP_FMT_FLAG_OUTPUT,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV12M,
+ .mdp_color = MDP_COLOR_NV12,
+ .depth = { 8, 4 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV21M,
+ .mdp_color = MDP_COLOR_NV21,
+ .depth = { 8, 4 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV16M,
+ .mdp_color = MDP_COLOR_NV16,
+ .depth = { 8, 8 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_NV61M,
+ .mdp_color = MDP_COLOR_NV61,
+ .depth = { 8, 8 },
+ .row_depth = { 8, 8 },
+ .num_planes = 2,
+ .walign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YUV420M,
+ .mdp_color = MDP_COLOR_I420,
+ .depth = { 8, 2, 2 },
+ .row_depth = { 8, 4, 4 },
+ .num_planes = 3,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }, {
+ .pixelformat = V4L2_PIX_FMT_YVU420M,
+ .mdp_color = MDP_COLOR_YV12,
+ .depth = { 8, 2, 2 },
+ .row_depth = { 8, 4, 4 },
+ .num_planes = 3,
+ .walign = 1,
+ .halign = 1,
+ .flags = MDP_FMT_FLAG_OUTPUT | MDP_FMT_FLAG_CAPTURE,
+ }
+};
+
+static const struct mdp_limit mdp_def_limit = {
+ .out_limit = {
+ .wmin = 16,
+ .hmin = 16,
+ .wmax = 8176,
+ .hmax = 8176,
+ },
+ .cap_limit = {
+ .wmin = 2,
+ .hmin = 2,
+ .wmax = 8176,
+ .hmax = 8176,
+ },
+ .h_scale_up_max = 32,
+ .v_scale_up_max = 32,
+ .h_scale_down_max = 20,
+ .v_scale_down_max = 128,
+};
+
+static const struct mdp_format *mdp_find_fmt(u32 pixelformat, u32 type)
+{
+ u32 i, flag;
+
+ flag = V4L2_TYPE_IS_OUTPUT(type) ? MDP_FMT_FLAG_OUTPUT :
+ MDP_FMT_FLAG_CAPTURE;
+ for (i = 0; i < ARRAY_SIZE(mdp_formats); ++i) {
+ if (!(mdp_formats[i].flags & flag))
+ continue;
+ if (mdp_formats[i].pixelformat == pixelformat)
+ return &mdp_formats[i];
+ }
+ return NULL;
+}
+
+static const struct mdp_format *mdp_find_fmt_by_index(u32 index, u32 type)
+{
+ u32 i, flag, num = 0;
+
+ flag = V4L2_TYPE_IS_OUTPUT(type) ? MDP_FMT_FLAG_OUTPUT :
+ MDP_FMT_FLAG_CAPTURE;
+ for (i = 0; i < ARRAY_SIZE(mdp_formats); ++i) {
+ if (!(mdp_formats[i].flags & flag))
+ continue;
+ if (index == num)
+ return &mdp_formats[i];
+ num++;
+ }
+ return NULL;
+}
+
+enum mdp_ycbcr_profile mdp_map_ycbcr_prof_mplane(struct v4l2_format *f,
+ u32 mdp_color)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+
+ if (MDP_COLOR_IS_RGB(mdp_color))
+ return MDP_YCBCR_PROFILE_FULL_BT601;
+
+ switch (pix_mp->colorspace) {
+ case V4L2_COLORSPACE_JPEG:
+ return MDP_YCBCR_PROFILE_JPEG;
+ case V4L2_COLORSPACE_REC709:
+ case V4L2_COLORSPACE_DCI_P3:
+ if (pix_mp->quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ return MDP_YCBCR_PROFILE_FULL_BT709;
+ return MDP_YCBCR_PROFILE_BT709;
+ case V4L2_COLORSPACE_BT2020:
+ if (pix_mp->quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ return MDP_YCBCR_PROFILE_FULL_BT2020;
+ return MDP_YCBCR_PROFILE_BT2020;
+ default:
+ if (pix_mp->quantization == V4L2_QUANTIZATION_FULL_RANGE)
+ return MDP_YCBCR_PROFILE_FULL_BT601;
+ return MDP_YCBCR_PROFILE_BT601;
+ }
+}
+
+static void mdp_bound_align_image(u32 *w, u32 *h,
+ struct v4l2_frmsize_stepwise *s,
+ unsigned int salign)
+{
+ unsigned int org_w, org_h;
+
+ org_w = *w;
+ org_h = *h;
+ v4l_bound_align_image(w, s->min_width, s->max_width, s->step_width,
+ h, s->min_height, s->max_height, s->step_height,
+ salign);
+
+ s->min_width = org_w;
+ s->min_height = org_h;
+ v4l2_apply_frmsize_constraints(w, h, s);
+}
+
+static int mdp_clamp_align(s32 *x, int min, int max, unsigned int align)
+{
+ unsigned int mask;
+
+ if (min < 0 || max < 0)
+ return -ERANGE;
+
+ /* Bits that must be zero to be aligned */
+ mask = ~((1 << align) - 1);
+
+ min = 0 ? 0 : ((min + ~mask) & mask);
+ max = max & mask;
+ if ((unsigned int)min > (unsigned int)max)
+ return -ERANGE;
+
+ /* Clamp to aligned min and max */
+ *x = clamp(*x, min, max);
+
+ /* Round to nearest aligned value */
+ if (align)
+ *x = (*x + (1 << (align - 1))) & mask;
+ return 0;
+}
+
+int mdp_enum_fmt_mplane(struct v4l2_fmtdesc *f)
+{
+ const struct mdp_format *fmt;
+
+ fmt = mdp_find_fmt_by_index(f->index, f->type);
+ if (!fmt)
+ return -EINVAL;
+
+ f->pixelformat = fmt->pixelformat;
+ return 0;
+}
+
+const struct mdp_format *mdp_try_fmt_mplane(struct v4l2_format *f,
+ struct mdp_frameparam *param,
+ u32 ctx_id)
+{
+ struct device *dev = &param->ctx->mdp_dev->pdev->dev;
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ const struct mdp_format *fmt;
+ const struct mdp_pix_limit *pix_limit;
+ struct v4l2_frmsize_stepwise s;
+ u32 org_w, org_h;
+ unsigned int i;
+
+ fmt = mdp_find_fmt(pix_mp->pixelformat, f->type);
+ if (!fmt) {
+ fmt = mdp_find_fmt_by_index(0, f->type);
+ if (!fmt) {
+ dev_dbg(dev, "%d: pixelformat %c%c%c%c invalid", ctx_id,
+ (pix_mp->pixelformat & 0xff),
+ (pix_mp->pixelformat >> 8) & 0xff,
+ (pix_mp->pixelformat >> 16) & 0xff,
+ (pix_mp->pixelformat >> 24) & 0xff);
+ return NULL;
+ }
+ }
+
+ pix_mp->field = V4L2_FIELD_NONE;
+ pix_mp->flags = 0;
+ pix_mp->pixelformat = fmt->pixelformat;
+ if (V4L2_TYPE_IS_CAPTURE(f->type)) {
+ pix_mp->colorspace = param->colorspace;
+ pix_mp->xfer_func = param->xfer_func;
+ pix_mp->ycbcr_enc = param->ycbcr_enc;
+ pix_mp->quantization = param->quant;
+ }
+
+ pix_limit = V4L2_TYPE_IS_OUTPUT(f->type) ? &param->limit->out_limit :
+ &param->limit->cap_limit;
+ s.min_width = pix_limit->wmin;
+ s.max_width = pix_limit->wmax;
+ s.step_width = fmt->walign;
+ s.min_height = pix_limit->hmin;
+ s.max_height = pix_limit->hmax;
+ s.step_height = fmt->halign;
+ org_w = pix_mp->width;
+ org_h = pix_mp->height;
+
+ mdp_bound_align_image(&pix_mp->width, &pix_mp->height, &s, fmt->salign);
+ if (org_w != pix_mp->width || org_h != pix_mp->height)
+ dev_dbg(dev, "%d: size change: %ux%u to %ux%u", ctx_id,
+ org_w, org_h, pix_mp->width, pix_mp->height);
+
+ if (pix_mp->num_planes && pix_mp->num_planes != fmt->num_planes)
+ dev_dbg(dev, "%d num of planes change: %u to %u", ctx_id,
+ pix_mp->num_planes, fmt->num_planes);
+ pix_mp->num_planes = fmt->num_planes;
+
+ for (i = 0; i < pix_mp->num_planes; ++i) {
+ u32 min_bpl = (pix_mp->width * fmt->row_depth[i]) >> 3;
+ u32 max_bpl = (pix_limit->wmax * fmt->row_depth[i]) >> 3;
+ u32 bpl = pix_mp->plane_fmt[i].bytesperline;
+ u32 min_si, max_si;
+ u32 si = pix_mp->plane_fmt[i].sizeimage;
+
+ bpl = clamp(bpl, min_bpl, max_bpl);
+ pix_mp->plane_fmt[i].bytesperline = bpl;
+
+ min_si = (bpl * pix_mp->height * fmt->depth[i]) /
+ fmt->row_depth[i];
+ max_si = (bpl * s.max_height * fmt->depth[i]) /
+ fmt->row_depth[i];
+
+ si = clamp(si, min_si, max_si);
+ pix_mp->plane_fmt[i].sizeimage = si;
+
+ dev_dbg(dev, "%d: p%u, bpl:%u [%u, %u], sizeimage:%u [%u, %u]",
+ ctx_id, i, bpl, min_bpl, max_bpl, si, min_si, max_si);
+ }
+
+ return fmt;
+}
+
+static int mdp_clamp_start(s32 *x, int min, int max, unsigned int align,
+ u32 flags)
+{
+ if (flags & V4L2_SEL_FLAG_GE)
+ max = *x;
+ if (flags & V4L2_SEL_FLAG_LE)
+ min = *x;
+ return mdp_clamp_align(x, min, max, align);
+}
+
+static int mdp_clamp_end(s32 *x, int min, int max, unsigned int align,
+ u32 flags)
+{
+ if (flags & V4L2_SEL_FLAG_GE)
+ min = *x;
+ if (flags & V4L2_SEL_FLAG_LE)
+ max = *x;
+ return mdp_clamp_align(x, min, max, align);
+}
+
+int mdp_try_crop(struct mdp_m2m_ctx *ctx, struct v4l2_rect *r,
+ const struct v4l2_selection *s, struct mdp_frame *frame)
+{
+ struct device *dev = &ctx->mdp_dev->pdev->dev;
+ s32 left, top, right, bottom;
+ u32 framew, frameh, walign, halign;
+ int ret;
+
+ dev_dbg(dev, "%d target:%d, set:(%d,%d) %ux%u", ctx->id,
+ s->target, s->r.left, s->r.top, s->r.width, s->r.height);
+
+ left = s->r.left;
+ top = s->r.top;
+ right = s->r.left + s->r.width;
+ bottom = s->r.top + s->r.height;
+ framew = frame->format.fmt.pix_mp.width;
+ frameh = frame->format.fmt.pix_mp.height;
+
+ if (mdp_target_is_crop(s->target)) {
+ walign = 1;
+ halign = 1;
+ } else {
+ walign = frame->mdp_fmt->walign;
+ halign = frame->mdp_fmt->halign;
+ }
+
+ dev_dbg(dev, "%d align:%u,%u, bound:%ux%u", ctx->id,
+ walign, halign, framew, frameh);
+
+ ret = mdp_clamp_start(&left, 0, right, walign, s->flags);
+ if (ret)
+ return ret;
+ ret = mdp_clamp_start(&top, 0, bottom, halign, s->flags);
+ if (ret)
+ return ret;
+ ret = mdp_clamp_end(&right, left, framew, walign, s->flags);
+ if (ret)
+ return ret;
+ ret = mdp_clamp_end(&bottom, top, frameh, halign, s->flags);
+ if (ret)
+ return ret;
+
+ r->left = left;
+ r->top = top;
+ r->width = right - left;
+ r->height = bottom - top;
+
+ dev_dbg(dev, "%d crop:(%d,%d) %ux%u", ctx->id,
+ r->left, r->top, r->width, r->height);
+ return 0;
+}
+
+int mdp_check_scaling_ratio(const struct v4l2_rect *crop,
+ const struct v4l2_rect *compose, s32 rotation,
+ const struct mdp_limit *limit)
+{
+ u32 crop_w, crop_h, comp_w, comp_h;
+
+ crop_w = crop->width;
+ crop_h = crop->height;
+ if (90 == rotation || 270 == rotation) {
+ comp_w = compose->height;
+ comp_h = compose->width;
+ } else {
+ comp_w = compose->width;
+ comp_h = compose->height;
+ }
+
+ if ((crop_w / comp_w) > limit->h_scale_down_max ||
+ (crop_h / comp_h) > limit->v_scale_down_max ||
+ (comp_w / crop_w) > limit->h_scale_up_max ||
+ (comp_h / crop_h) > limit->v_scale_up_max)
+ return -ERANGE;
+ return 0;
+}
+
+/* Stride that is accepted by MDP HW */
+static u32 mdp_fmt_get_stride(const struct mdp_format *fmt,
+ u32 bytesperline, unsigned int plane)
+{
+ enum mdp_color c = fmt->mdp_color;
+ u32 stride;
+
+ stride = (bytesperline * MDP_COLOR_BITS_PER_PIXEL(c))
+ / fmt->row_depth[0];
+ if (plane == 0)
+ return stride;
+ if (plane < MDP_COLOR_GET_PLANE_COUNT(c)) {
+ if (MDP_COLOR_IS_BLOCK_MODE(c))
+ stride = stride / 2;
+ return stride;
+ }
+ return 0;
+}
+
+/* Stride that is accepted by MDP HW of format with contiguous planes */
+static u32 mdp_fmt_get_stride_contig(const struct mdp_format *fmt,
+ u32 pix_stride, unsigned int plane)
+{
+ enum mdp_color c = fmt->mdp_color;
+ u32 stride = pix_stride;
+
+ if (plane == 0)
+ return stride;
+ if (plane < MDP_COLOR_GET_PLANE_COUNT(c)) {
+ stride = stride >> MDP_COLOR_GET_H_SUBSAMPLE(c);
+ if (MDP_COLOR_IS_UV_COPLANE(c) && !MDP_COLOR_IS_BLOCK_MODE(c))
+ stride = stride * 2;
+ return stride;
+ }
+ return 0;
+}
+
+/* Plane size that is accepted by MDP HW */
+static u32 mdp_fmt_get_plane_size(const struct mdp_format *fmt,
+ u32 stride, u32 height, unsigned int plane)
+{
+ enum mdp_color c = fmt->mdp_color;
+ u32 bytesperline;
+
+ bytesperline = (stride * fmt->row_depth[0])
+ / MDP_COLOR_BITS_PER_PIXEL(c);
+ if (plane == 0)
+ return bytesperline * height;
+ if (plane < MDP_COLOR_GET_PLANE_COUNT(c)) {
+ height = height >> MDP_COLOR_GET_V_SUBSAMPLE(c);
+ if (MDP_COLOR_IS_BLOCK_MODE(c))
+ bytesperline = bytesperline * 2;
+ return bytesperline * height;
+ }
+ return 0;
+}
+
+static void mdp_prepare_buffer(struct img_image_buffer *b,
+ struct mdp_frame *frame, struct vb2_buffer *vb)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &frame->format.fmt.pix_mp;
+ unsigned int i;
+
+ b->format.colorformat = frame->mdp_fmt->mdp_color;
+ b->format.ycbcr_prof = frame->ycbcr_prof;
+ for (i = 0; i < pix_mp->num_planes; ++i) {
+ u32 stride = mdp_fmt_get_stride(frame->mdp_fmt,
+ pix_mp->plane_fmt[i].bytesperline, i);
+
+ b->format.plane_fmt[i].stride = stride;
+ b->format.plane_fmt[i].size =
+ mdp_fmt_get_plane_size(frame->mdp_fmt, stride,
+ pix_mp->height, i);
+ b->iova[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+ }
+ for (; i < MDP_COLOR_GET_PLANE_COUNT(b->format.colorformat); ++i) {
+ u32 stride = mdp_fmt_get_stride_contig(frame->mdp_fmt,
+ b->format.plane_fmt[0].stride, i);
+
+ b->format.plane_fmt[i].stride = stride;
+ b->format.plane_fmt[i].size =
+ mdp_fmt_get_plane_size(frame->mdp_fmt, stride,
+ pix_mp->height, i);
+ b->iova[i] = b->iova[i - 1] + b->format.plane_fmt[i - 1].size;
+ }
+ b->usage = frame->usage;
+}
+
+void mdp_set_src_config(struct img_input *in,
+ struct mdp_frame *frame, struct vb2_buffer *vb)
+{
+ in->buffer.format.width = frame->format.fmt.pix_mp.width;
+ in->buffer.format.height = frame->format.fmt.pix_mp.height;
+ mdp_prepare_buffer(&in->buffer, frame, vb);
+}
+
+static u32 mdp_to_fixed(u32 *r, struct v4l2_fract *f)
+{
+ u32 q;
+
+ if (f->denominator == 0) {
+ *r = 0;
+ return 0;
+ }
+
+ q = f->numerator / f->denominator;
+ *r = div_u64(((u64)f->numerator - q * f->denominator) <<
+ IMG_SUBPIXEL_SHIFT, f->denominator);
+ return q;
+}
+
+static void mdp_set_src_crop(struct img_crop *c, struct mdp_crop *crop)
+{
+ c->left = crop->c.left
+ + mdp_to_fixed(&c->left_subpix, &crop->left_subpix);
+ c->top = crop->c.top
+ + mdp_to_fixed(&c->top_subpix, &crop->top_subpix);
+ c->width = crop->c.width
+ + mdp_to_fixed(&c->width_subpix, &crop->width_subpix);
+ c->height = crop->c.height
+ + mdp_to_fixed(&c->height_subpix, &crop->height_subpix);
+}
+
+static void mdp_set_orientation(struct img_output *out,
+ s32 rotation, bool hflip, bool vflip)
+{
+ u8 flip = 0;
+
+ if (hflip)
+ flip ^= 1;
+ if (vflip) {
+ /*
+ * A vertical flip is equivalent to
+ * a 180-degree rotation with a horizontal flip
+ */
+ rotation += 180;
+ flip ^= 1;
+ }
+
+ out->rotation = rotation % 360;
+ if (flip != 0)
+ out->flags |= IMG_CTRL_FLAG_HFLIP;
+ else
+ out->flags &= ~IMG_CTRL_FLAG_HFLIP;
+}
+
+void mdp_set_dst_config(struct img_output *out,
+ struct mdp_frame *frame, struct vb2_buffer *vb)
+{
+ out->buffer.format.width = frame->compose.width;
+ out->buffer.format.height = frame->compose.height;
+ mdp_prepare_buffer(&out->buffer, frame, vb);
+ mdp_set_src_crop(&out->crop, &frame->crop);
+ mdp_set_orientation(out, frame->rotation, frame->hflip, frame->vflip);
+}
+
+int mdp_frameparam_init(struct mdp_frameparam *param)
+{
+ struct mdp_frame *frame;
+
+ if (!param)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&param->list);
+ param->limit = &mdp_def_limit;
+ param->type = MDP_STREAM_TYPE_BITBLT;
+
+ frame = &param->output;
+ frame->format.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ frame->mdp_fmt = mdp_try_fmt_mplane(&frame->format, param, 0);
+ frame->ycbcr_prof =
+ mdp_map_ycbcr_prof_mplane(&frame->format,
+ frame->mdp_fmt->mdp_color);
+ frame->usage = MDP_BUFFER_USAGE_HW_READ;
+
+ param->num_captures = 1;
+ frame = &param->captures[0];
+ frame->format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ frame->mdp_fmt = mdp_try_fmt_mplane(&frame->format, param, 0);
+ frame->ycbcr_prof =
+ mdp_map_ycbcr_prof_mplane(&frame->format,
+ frame->mdp_fmt->mdp_color);
+ frame->usage = MDP_BUFFER_USAGE_MDP;
+ frame->crop.c.width = param->output.format.fmt.pix_mp.width;
+ frame->crop.c.height = param->output.format.fmt.pix_mp.height;
+ frame->compose.width = frame->format.fmt.pix_mp.width;
+ frame->compose.height = frame->format.fmt.pix_mp.height;
+
+ return 0;
+}
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.h
new file mode 100644
index 000000000000..f995e536d45f
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-regs.h
@@ -0,0 +1,373 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MTK_MDP3_REGS_H__
+#define __MTK_MDP3_REGS_H__
+
+#include <linux/videodev2.h>
+#include <media/videobuf2-core.h>
+#include "mtk-img-ipi.h"
+
+/*
+ * MDP native color code
+ * Plane count: 1, 2, 3
+ * H-subsample: 0, 1, 2
+ * V-subsample: 0, 1
+ * Color group: 0-RGB, 1-YUV, 2-raw
+ */
+#define MDP_COLOR(PACKED, LOOSE, VIDEO, PLANE, HF, VF, BITS, GROUP, SWAP, ID)\
+ (((PACKED) << 27) | ((LOOSE) << 26) | ((VIDEO) << 23) |\
+ ((PLANE) << 21) | ((HF) << 19) | ((VF) << 18) | ((BITS) << 8) |\
+ ((GROUP) << 6) | ((SWAP) << 5) | ((ID) << 0))
+
+#define MDP_COLOR_IS_10BIT_PACKED(c) ((0x08000000 & (c)) >> 27)
+#define MDP_COLOR_IS_10BIT_LOOSE(c) (((0x0c000000 & (c)) >> 26) == 1)
+#define MDP_COLOR_IS_10BIT_TILE(c) (((0x0c000000 & (c)) >> 26) == 3)
+#define MDP_COLOR_IS_UFP(c) ((0x02000000 & (c)) >> 25)
+#define MDP_COLOR_IS_INTERLACED(c) ((0x01000000 & (c)) >> 24)
+#define MDP_COLOR_IS_BLOCK_MODE(c) ((0x00800000 & (c)) >> 23)
+#define MDP_COLOR_GET_PLANE_COUNT(c) ((0x00600000 & (c)) >> 21)
+#define MDP_COLOR_GET_H_SUBSAMPLE(c) ((0x00180000 & (c)) >> 19)
+#define MDP_COLOR_GET_V_SUBSAMPLE(c) ((0x00040000 & (c)) >> 18)
+#define MDP_COLOR_BITS_PER_PIXEL(c) ((0x0003ff00 & (c)) >> 8)
+#define MDP_COLOR_GET_GROUP(c) ((0x000000c0 & (c)) >> 6)
+#define MDP_COLOR_IS_SWAPPED(c) ((0x00000020 & (c)) >> 5)
+#define MDP_COLOR_GET_UNIQUE_ID(c) ((0x0000001f & (c)) >> 0)
+#define MDP_COLOR_GET_HW_FORMAT(c) ((0x0000001f & (c)) >> 0)
+
+#define MDP_COLOR_IS_RGB(c) (MDP_COLOR_GET_GROUP(c) == 0)
+#define MDP_COLOR_IS_YUV(c) (MDP_COLOR_GET_GROUP(c) == 1)
+
+enum mdp_color {
+ MDP_COLOR_UNKNOWN = 0,
+
+ //MDP_COLOR_FULLG8,
+ MDP_COLOR_FULLG8_RGGB = MDP_COLOR(0, 0, 0, 1, 0, 0, 8, 2, 0, 21),
+ MDP_COLOR_FULLG8_GRBG = MDP_COLOR(0, 0, 0, 1, 0, 1, 8, 2, 0, 21),
+ MDP_COLOR_FULLG8_GBRG = MDP_COLOR(0, 0, 0, 1, 1, 0, 8, 2, 0, 21),
+ MDP_COLOR_FULLG8_BGGR = MDP_COLOR(0, 0, 0, 1, 1, 1, 8, 2, 0, 21),
+ MDP_COLOR_FULLG8 = MDP_COLOR_FULLG8_BGGR,
+
+ //MDP_COLOR_FULLG10,
+ MDP_COLOR_FULLG10_RGGB = MDP_COLOR(0, 0, 0, 1, 0, 0, 10, 2, 0, 21),
+ MDP_COLOR_FULLG10_GRBG = MDP_COLOR(0, 0, 0, 1, 0, 1, 10, 2, 0, 21),
+ MDP_COLOR_FULLG10_GBRG = MDP_COLOR(0, 0, 0, 1, 1, 0, 10, 2, 0, 21),
+ MDP_COLOR_FULLG10_BGGR = MDP_COLOR(0, 0, 0, 1, 1, 1, 10, 2, 0, 21),
+ MDP_COLOR_FULLG10 = MDP_COLOR_FULLG10_BGGR,
+
+ //MDP_COLOR_FULLG12,
+ MDP_COLOR_FULLG12_RGGB = MDP_COLOR(0, 0, 0, 1, 0, 0, 12, 2, 0, 21),
+ MDP_COLOR_FULLG12_GRBG = MDP_COLOR(0, 0, 0, 1, 0, 1, 12, 2, 0, 21),
+ MDP_COLOR_FULLG12_GBRG = MDP_COLOR(0, 0, 0, 1, 1, 0, 12, 2, 0, 21),
+ MDP_COLOR_FULLG12_BGGR = MDP_COLOR(0, 0, 0, 1, 1, 1, 12, 2, 0, 21),
+ MDP_COLOR_FULLG12 = MDP_COLOR_FULLG12_BGGR,
+
+ //MDP_COLOR_FULLG14,
+ MDP_COLOR_FULLG14_RGGB = MDP_COLOR(0, 0, 0, 1, 0, 0, 14, 2, 0, 21),
+ MDP_COLOR_FULLG14_GRBG = MDP_COLOR(0, 0, 0, 1, 0, 1, 14, 2, 0, 21),
+ MDP_COLOR_FULLG14_GBRG = MDP_COLOR(0, 0, 0, 1, 1, 0, 14, 2, 0, 21),
+ MDP_COLOR_FULLG14_BGGR = MDP_COLOR(0, 0, 0, 1, 1, 1, 14, 2, 0, 21),
+ MDP_COLOR_FULLG14 = MDP_COLOR_FULLG14_BGGR,
+
+ MDP_COLOR_UFO10 = MDP_COLOR(0, 0, 0, 1, 0, 0, 10, 2, 0, 24),
+
+ //MDP_COLOR_BAYER8,
+ MDP_COLOR_BAYER8_RGGB = MDP_COLOR(0, 0, 0, 1, 0, 0, 8, 2, 0, 20),
+ MDP_COLOR_BAYER8_GRBG = MDP_COLOR(0, 0, 0, 1, 0, 1, 8, 2, 0, 20),
+ MDP_COLOR_BAYER8_GBRG = MDP_COLOR(0, 0, 0, 1, 1, 0, 8, 2, 0, 20),
+ MDP_COLOR_BAYER8_BGGR = MDP_COLOR(0, 0, 0, 1, 1, 1, 8, 2, 0, 20),
+ MDP_COLOR_BAYER8 = MDP_COLOR_BAYER8_BGGR,
+
+ //MDP_COLOR_BAYER10,
+ MDP_COLOR_BAYER10_RGGB = MDP_COLOR(0, 0, 0, 1, 0, 0, 10, 2, 0, 20),
+ MDP_COLOR_BAYER10_GRBG = MDP_COLOR(0, 0, 0, 1, 0, 1, 10, 2, 0, 20),
+ MDP_COLOR_BAYER10_GBRG = MDP_COLOR(0, 0, 0, 1, 1, 0, 10, 2, 0, 20),
+ MDP_COLOR_BAYER10_BGGR = MDP_COLOR(0, 0, 0, 1, 1, 1, 10, 2, 0, 20),
+ MDP_COLOR_BAYER10 = MDP_COLOR_BAYER10_BGGR,
+
+ //MDP_COLOR_BAYER12,
+ MDP_COLOR_BAYER12_RGGB = MDP_COLOR(0, 0, 0, 1, 0, 0, 12, 2, 0, 20),
+ MDP_COLOR_BAYER12_GRBG = MDP_COLOR(0, 0, 0, 1, 0, 1, 12, 2, 0, 20),
+ MDP_COLOR_BAYER12_GBRG = MDP_COLOR(0, 0, 0, 1, 1, 0, 12, 2, 0, 20),
+ MDP_COLOR_BAYER12_BGGR = MDP_COLOR(0, 0, 0, 1, 1, 1, 12, 2, 0, 20),
+ MDP_COLOR_BAYER12 = MDP_COLOR_BAYER12_BGGR,
+
+ //MDP_COLOR_BAYER14,
+ MDP_COLOR_BAYER14_RGGB = MDP_COLOR(0, 0, 0, 1, 0, 0, 14, 2, 0, 20),
+ MDP_COLOR_BAYER14_GRBG = MDP_COLOR(0, 0, 0, 1, 0, 1, 14, 2, 0, 20),
+ MDP_COLOR_BAYER14_GBRG = MDP_COLOR(0, 0, 0, 1, 1, 0, 14, 2, 0, 20),
+ MDP_COLOR_BAYER14_BGGR = MDP_COLOR(0, 0, 0, 1, 1, 1, 14, 2, 0, 20),
+ MDP_COLOR_BAYER14 = MDP_COLOR_BAYER14_BGGR,
+
+ MDP_COLOR_RGB48 = MDP_COLOR(0, 0, 0, 1, 0, 0, 48, 0, 0, 23),
+ /* For bayer+mono raw-16 */
+ MDP_COLOR_RGB565_RAW = MDP_COLOR(0, 0, 0, 1, 0, 0, 16, 2, 0, 0),
+
+ MDP_COLOR_BAYER8_UNPAK = MDP_COLOR(0, 0, 0, 1, 0, 0, 8, 2, 0, 22),
+ MDP_COLOR_BAYER10_UNPAK = MDP_COLOR(0, 0, 0, 1, 0, 0, 10, 2, 0, 22),
+ MDP_COLOR_BAYER12_UNPAK = MDP_COLOR(0, 0, 0, 1, 0, 0, 12, 2, 0, 22),
+ MDP_COLOR_BAYER14_UNPAK = MDP_COLOR(0, 0, 0, 1, 0, 0, 14, 2, 0, 22),
+
+ /* Unified formats */
+ MDP_COLOR_GREY = MDP_COLOR(0, 0, 0, 1, 0, 0, 8, 1, 0, 7),
+
+ MDP_COLOR_RGB565 = MDP_COLOR(0, 0, 0, 1, 0, 0, 16, 0, 0, 0),
+ MDP_COLOR_BGR565 = MDP_COLOR(0, 0, 0, 1, 0, 0, 16, 0, 1, 0),
+ MDP_COLOR_RGB888 = MDP_COLOR(0, 0, 0, 1, 0, 0, 24, 0, 1, 1),
+ MDP_COLOR_BGR888 = MDP_COLOR(0, 0, 0, 1, 0, 0, 24, 0, 0, 1),
+ MDP_COLOR_RGBA8888 = MDP_COLOR(0, 0, 0, 1, 0, 0, 32, 0, 1, 2),
+ MDP_COLOR_BGRA8888 = MDP_COLOR(0, 0, 0, 1, 0, 0, 32, 0, 0, 2),
+ MDP_COLOR_ARGB8888 = MDP_COLOR(0, 0, 0, 1, 0, 0, 32, 0, 1, 3),
+ MDP_COLOR_ABGR8888 = MDP_COLOR(0, 0, 0, 1, 0, 0, 32, 0, 0, 3),
+
+ MDP_COLOR_UYVY = MDP_COLOR(0, 0, 0, 1, 1, 0, 16, 1, 0, 4),
+ MDP_COLOR_VYUY = MDP_COLOR(0, 0, 0, 1, 1, 0, 16, 1, 1, 4),
+ MDP_COLOR_YUYV = MDP_COLOR(0, 0, 0, 1, 1, 0, 16, 1, 0, 5),
+ MDP_COLOR_YVYU = MDP_COLOR(0, 0, 0, 1, 1, 0, 16, 1, 1, 5),
+
+ MDP_COLOR_I420 = MDP_COLOR(0, 0, 0, 3, 1, 1, 8, 1, 0, 8),
+ MDP_COLOR_YV12 = MDP_COLOR(0, 0, 0, 3, 1, 1, 8, 1, 1, 8),
+ MDP_COLOR_I422 = MDP_COLOR(0, 0, 0, 3, 1, 0, 8, 1, 0, 9),
+ MDP_COLOR_YV16 = MDP_COLOR(0, 0, 0, 3, 1, 0, 8, 1, 1, 9),
+ MDP_COLOR_I444 = MDP_COLOR(0, 0, 0, 3, 0, 0, 8, 1, 0, 10),
+ MDP_COLOR_YV24 = MDP_COLOR(0, 0, 0, 3, 0, 0, 8, 1, 1, 10),
+
+ MDP_COLOR_NV12 = MDP_COLOR(0, 0, 0, 2, 1, 1, 8, 1, 0, 12),
+ MDP_COLOR_NV21 = MDP_COLOR(0, 0, 0, 2, 1, 1, 8, 1, 1, 12),
+ MDP_COLOR_NV16 = MDP_COLOR(0, 0, 0, 2, 1, 0, 8, 1, 0, 13),
+ MDP_COLOR_NV61 = MDP_COLOR(0, 0, 0, 2, 1, 0, 8, 1, 1, 13),
+ MDP_COLOR_NV24 = MDP_COLOR(0, 0, 0, 2, 0, 0, 8, 1, 0, 14),
+ MDP_COLOR_NV42 = MDP_COLOR(0, 0, 0, 2, 0, 0, 8, 1, 1, 14),
+
+ /* MediaTek proprietary formats */
+ /* UFO encoded block mode */
+ MDP_COLOR_420_BLK_UFO = MDP_COLOR(0, 0, 5, 2, 1, 1, 256, 1, 0, 12),
+ /* Block mode */
+ MDP_COLOR_420_BLK = MDP_COLOR(0, 0, 1, 2, 1, 1, 256, 1, 0, 12),
+ /* Block mode + field mode */
+ MDP_COLOR_420_BLKI = MDP_COLOR(0, 0, 3, 2, 1, 1, 256, 1, 0, 12),
+ /* Block mode */
+ MDP_COLOR_422_BLK = MDP_COLOR(0, 0, 1, 1, 1, 0, 512, 1, 0, 4),
+
+ MDP_COLOR_IYU2 = MDP_COLOR(0, 0, 0, 1, 0, 0, 24, 1, 0, 25),
+ MDP_COLOR_YUV444 = MDP_COLOR(0, 0, 0, 1, 0, 0, 24, 1, 0, 30),
+
+ /* Packed 10-bit formats */
+ MDP_COLOR_RGBA1010102 = MDP_COLOR(1, 0, 0, 1, 0, 0, 32, 0, 1, 2),
+ MDP_COLOR_BGRA1010102 = MDP_COLOR(1, 0, 0, 1, 0, 0, 32, 0, 0, 2),
+ /* Packed 10-bit UYVY */
+ MDP_COLOR_UYVY_10P = MDP_COLOR(1, 0, 0, 1, 1, 0, 20, 1, 0, 4),
+ /* Packed 10-bit NV21 */
+ MDP_COLOR_NV21_10P = MDP_COLOR(1, 0, 0, 2, 1, 1, 10, 1, 1, 12),
+ /* 10-bit block mode */
+ MDP_COLOR_420_BLK_10_H = MDP_COLOR(1, 0, 1, 2, 1, 1, 320, 1, 0, 12),
+ /* 10-bit HEVC tile mode */
+ MDP_COLOR_420_BLK_10_V = MDP_COLOR(1, 1, 1, 2, 1, 1, 320, 1, 0, 12),
+ /* UFO encoded 10-bit block mode */
+ MDP_COLOR_420_BLK_U10_H = MDP_COLOR(1, 0, 5, 2, 1, 1, 320, 1, 0, 12),
+ /* UFO encoded 10-bit HEVC tile mode */
+ MDP_COLOR_420_BLK_U10_V = MDP_COLOR(1, 1, 5, 2, 1, 1, 320, 1, 0, 12),
+
+ /* Loose 10-bit formats */
+ MDP_COLOR_UYVY_10L = MDP_COLOR(0, 1, 0, 1, 1, 0, 20, 1, 0, 4),
+ MDP_COLOR_VYUY_10L = MDP_COLOR(0, 1, 0, 1, 1, 0, 20, 1, 1, 4),
+ MDP_COLOR_YUYV_10L = MDP_COLOR(0, 1, 0, 1, 1, 0, 20, 1, 0, 5),
+ MDP_COLOR_YVYU_10L = MDP_COLOR(0, 1, 0, 1, 1, 0, 20, 1, 1, 5),
+ MDP_COLOR_NV12_10L = MDP_COLOR(0, 1, 0, 2, 1, 1, 10, 1, 0, 12),
+ MDP_COLOR_NV21_10L = MDP_COLOR(0, 1, 0, 2, 1, 1, 10, 1, 1, 12),
+ MDP_COLOR_NV16_10L = MDP_COLOR(0, 1, 0, 2, 1, 0, 10, 1, 0, 13),
+ MDP_COLOR_NV61_10L = MDP_COLOR(0, 1, 0, 2, 1, 0, 10, 1, 1, 13),
+ MDP_COLOR_YV12_10L = MDP_COLOR(0, 1, 0, 3, 1, 1, 10, 1, 1, 8),
+ MDP_COLOR_I420_10L = MDP_COLOR(0, 1, 0, 3, 1, 1, 10, 1, 0, 8),
+};
+
+static inline bool MDP_COLOR_IS_UV_COPLANE(enum mdp_color c)
+{
+ return (MDP_COLOR_GET_PLANE_COUNT(c) == 2 && MDP_COLOR_IS_YUV(c));
+}
+
+/* Minimum Y stride that is accepted by MDP HW */
+static inline u32 mdp_color_get_min_y_stride(enum mdp_color c, u32 width)
+{
+ return ((MDP_COLOR_BITS_PER_PIXEL(c) * width) + 4) >> 3;
+}
+
+/* Minimum UV stride that is accepted by MDP HW */
+static inline u32 mdp_color_get_min_uv_stride(enum mdp_color c, u32 width)
+{
+ u32 min_stride;
+
+ if (MDP_COLOR_GET_PLANE_COUNT(c) == 1)
+ return 0;
+ min_stride = mdp_color_get_min_y_stride(c, width)
+ >> MDP_COLOR_GET_H_SUBSAMPLE(c);
+ if (MDP_COLOR_IS_UV_COPLANE(c) && !MDP_COLOR_IS_BLOCK_MODE(c))
+ min_stride = min_stride * 2;
+ return min_stride;
+}
+
+/* Minimum Y plane size that is necessary in buffer */
+static inline u32 mdp_color_get_min_y_size(enum mdp_color c,
+ u32 width, u32 height)
+{
+ if (MDP_COLOR_IS_BLOCK_MODE(c))
+ return ((MDP_COLOR_BITS_PER_PIXEL(c) * width) >> 8) * height;
+ return mdp_color_get_min_y_stride(c, width) * height;
+}
+
+/* Minimum UV plane size that is necessary in buffer */
+static inline u32 mdp_color_get_min_uv_size(enum mdp_color c,
+ u32 width, u32 height)
+{
+ height = height >> MDP_COLOR_GET_V_SUBSAMPLE(c);
+ if (MDP_COLOR_IS_BLOCK_MODE(c) && (MDP_COLOR_GET_PLANE_COUNT(c) > 1))
+ return ((MDP_COLOR_BITS_PER_PIXEL(c) * width) >> 8) * height;
+ return mdp_color_get_min_uv_stride(c, width) * height;
+}
+
+/* Combine colorspace, xfer_func, ycbcr_encoding, and quantization */
+enum mdp_ycbcr_profile {
+ /* V4L2_YCBCR_ENC_601 and V4L2_QUANTIZATION_LIM_RANGE */
+ MDP_YCBCR_PROFILE_BT601,
+ /* V4L2_YCBCR_ENC_709 and V4L2_QUANTIZATION_LIM_RANGE */
+ MDP_YCBCR_PROFILE_BT709,
+ /* V4L2_YCBCR_ENC_601 and V4L2_QUANTIZATION_FULL_RANGE */
+ MDP_YCBCR_PROFILE_JPEG,
+ MDP_YCBCR_PROFILE_FULL_BT601 = MDP_YCBCR_PROFILE_JPEG,
+
+ /* Colorspaces not support for capture */
+ /* V4L2_YCBCR_ENC_BT2020 and V4L2_QUANTIZATION_LIM_RANGE */
+ MDP_YCBCR_PROFILE_BT2020,
+ /* V4L2_YCBCR_ENC_709 and V4L2_QUANTIZATION_FULL_RANGE */
+ MDP_YCBCR_PROFILE_FULL_BT709,
+ /* V4L2_YCBCR_ENC_BT2020 and V4L2_QUANTIZATION_FULL_RANGE */
+ MDP_YCBCR_PROFILE_FULL_BT2020,
+};
+
+#define MDP_FMT_FLAG_OUTPUT BIT(0)
+#define MDP_FMT_FLAG_CAPTURE BIT(1)
+
+struct mdp_format {
+ u32 pixelformat;
+ u32 mdp_color;
+ u8 depth[VIDEO_MAX_PLANES];
+ u8 row_depth[VIDEO_MAX_PLANES];
+ u8 num_planes;
+ u8 walign;
+ u8 halign;
+ u8 salign;
+ u32 flags;
+};
+
+struct mdp_pix_limit {
+ u32 wmin;
+ u32 hmin;
+ u32 wmax;
+ u32 hmax;
+};
+
+struct mdp_limit {
+ struct mdp_pix_limit out_limit;
+ struct mdp_pix_limit cap_limit;
+ u32 h_scale_up_max;
+ u32 v_scale_up_max;
+ u32 h_scale_down_max;
+ u32 v_scale_down_max;
+};
+
+enum mdp_stream_type {
+ MDP_STREAM_TYPE_UNKNOWN,
+ MDP_STREAM_TYPE_BITBLT,
+ MDP_STREAM_TYPE_GPU_BITBLT,
+ MDP_STREAM_TYPE_DUAL_BITBLT,
+ MDP_STREAM_TYPE_2ND_BITBLT,
+ MDP_STREAM_TYPE_ISP_IC,
+ MDP_STREAM_TYPE_ISP_VR,
+ MDP_STREAM_TYPE_ISP_ZSD,
+ MDP_STREAM_TYPE_ISP_IP,
+ MDP_STREAM_TYPE_ISP_VSS,
+ MDP_STREAM_TYPE_ISP_ZSD_SLOW,
+ MDP_STREAM_TYPE_WPE,
+ MDP_STREAM_TYPE_WPE2,
+};
+
+struct mdp_crop {
+ struct v4l2_rect c;
+ struct v4l2_fract left_subpix;
+ struct v4l2_fract top_subpix;
+ struct v4l2_fract width_subpix;
+ struct v4l2_fract height_subpix;
+};
+
+struct mdp_frame {
+ struct v4l2_format format;
+ const struct mdp_format *mdp_fmt;
+ u32 ycbcr_prof; /* enum mdp_ycbcr_profile */
+ u32 usage; /* enum mdp_buffer_usage */
+ struct mdp_crop crop;
+ struct v4l2_rect compose;
+ s32 rotation;
+ u32 hflip:1;
+ u32 vflip:1;
+ u32 hdr:1;
+ u32 dre:1;
+ u32 sharpness:1;
+ u32 dither:1;
+};
+
+static inline bool mdp_target_is_crop(u32 target)
+{
+ return (target == V4L2_SEL_TGT_CROP) ||
+ (target == V4L2_SEL_TGT_CROP_DEFAULT) ||
+ (target == V4L2_SEL_TGT_CROP_BOUNDS);
+}
+
+static inline bool mdp_target_is_compose(u32 target)
+{
+ return (target == V4L2_SEL_TGT_COMPOSE) ||
+ (target == V4L2_SEL_TGT_COMPOSE_DEFAULT) ||
+ (target == V4L2_SEL_TGT_COMPOSE_BOUNDS);
+}
+
+#define MDP_MAX_CAPTURES IMG_MAX_HW_OUTPUTS
+
+#define MDP_VPU_INIT BIT(0)
+#define MDP_M2M_CTX_ERROR BIT(1)
+
+struct mdp_frameparam {
+ struct list_head list;
+ struct mdp_m2m_ctx *ctx;
+ atomic_t state;
+ const struct mdp_limit *limit;
+ u32 type; /* enum mdp_stream_type */
+ u32 frame_no;
+ struct mdp_frame output;
+ struct mdp_frame captures[MDP_MAX_CAPTURES];
+ u32 num_captures;
+ enum v4l2_colorspace colorspace;
+ enum v4l2_ycbcr_encoding ycbcr_enc;
+ enum v4l2_xfer_func xfer_func;
+ enum v4l2_quantization quant;
+};
+
+int mdp_enum_fmt_mplane(struct v4l2_fmtdesc *f);
+const struct mdp_format *mdp_try_fmt_mplane(struct v4l2_format *f,
+ struct mdp_frameparam *param,
+ u32 ctx_id);
+enum mdp_ycbcr_profile mdp_map_ycbcr_prof_mplane(struct v4l2_format *f,
+ u32 mdp_color);
+int mdp_try_crop(struct mdp_m2m_ctx *ctx, struct v4l2_rect *r,
+ const struct v4l2_selection *s, struct mdp_frame *frame);
+int mdp_check_scaling_ratio(const struct v4l2_rect *crop,
+ const struct v4l2_rect *compose, s32 rotation,
+ const struct mdp_limit *limit);
+void mdp_set_src_config(struct img_input *in,
+ struct mdp_frame *frame, struct vb2_buffer *vb);
+void mdp_set_dst_config(struct img_output *out,
+ struct mdp_frame *frame, struct vb2_buffer *vb);
+int mdp_frameparam_init(struct mdp_frameparam *param);
+
+#endif /* __MTK_MDP3_REGS_H__ */
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
new file mode 100644
index 000000000000..9f5844385c8f
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#include <linux/remoteproc.h>
+#include <linux/remoteproc/mtk_scp.h>
+#include "mtk-mdp3-vpu.h"
+#include "mtk-mdp3-core.h"
+
+#define MDP_VPU_MESSAGE_TIMEOUT 500U
+#define vpu_alloc_size 0x600000
+
+static inline struct mdp_dev *vpu_to_mdp(struct mdp_vpu_dev *vpu)
+{
+ return container_of(vpu, struct mdp_dev, vpu);
+}
+
+static int mdp_vpu_shared_mem_alloc(struct mdp_vpu_dev *vpu)
+{
+ if (vpu->work && vpu->work_addr)
+ return 0;
+
+ vpu->work = dma_alloc_coherent(scp_get_device(vpu->scp), vpu_alloc_size,
+ &vpu->work_addr, GFP_KERNEL);
+
+ if (!vpu->work)
+ return -ENOMEM;
+ else
+ return 0;
+}
+
+void mdp_vpu_shared_mem_free(struct mdp_vpu_dev *vpu)
+{
+ if (vpu->work && vpu->work_addr)
+ dma_free_coherent(scp_get_device(vpu->scp), vpu_alloc_size,
+ vpu->work, vpu->work_addr);
+}
+
+static void mdp_vpu_ipi_handle_init_ack(void *data, unsigned int len,
+ void *priv)
+{
+ struct mdp_ipi_init_msg *msg = (struct mdp_ipi_init_msg *)data;
+ struct mdp_vpu_dev *vpu =
+ (struct mdp_vpu_dev *)(unsigned long)msg->drv_data;
+
+ if (!vpu->work_size)
+ vpu->work_size = msg->work_size;
+
+ vpu->status = msg->status;
+ complete(&vpu->ipi_acked);
+}
+
+static void mdp_vpu_ipi_handle_deinit_ack(void *data, unsigned int len,
+ void *priv)
+{
+ struct mdp_ipi_deinit_msg *msg = (struct mdp_ipi_deinit_msg *)data;
+ struct mdp_vpu_dev *vpu =
+ (struct mdp_vpu_dev *)(unsigned long)msg->drv_data;
+
+ vpu->status = msg->status;
+ complete(&vpu->ipi_acked);
+}
+
+static void mdp_vpu_ipi_handle_frame_ack(void *data, unsigned int len,
+ void *priv)
+{
+ struct img_sw_addr *addr = (struct img_sw_addr *)data;
+ struct img_ipi_frameparam *param =
+ (struct img_ipi_frameparam *)(unsigned long)addr->va;
+ struct mdp_vpu_ctx *ctx =
+ (struct mdp_vpu_ctx *)(unsigned long)param->drv_data;
+
+ if (param->state) {
+ struct mdp_dev *mdp = vpu_to_mdp(ctx->vpu_dev);
+
+ dev_err(&mdp->pdev->dev, "VPU MDP failure:%d\n", param->state);
+ }
+ ctx->vpu_dev->status = param->state;
+ complete(&ctx->vpu_dev->ipi_acked);
+}
+
+int mdp_vpu_register(struct mdp_dev *mdp)
+{
+ int err;
+ struct mtk_scp *scp = mdp->scp;
+ struct device *dev = &mdp->pdev->dev;
+
+ err = scp_ipi_register(scp, SCP_IPI_MDP_INIT,
+ mdp_vpu_ipi_handle_init_ack, NULL);
+ if (err) {
+ dev_err(dev, "scp_ipi_register failed %d\n", err);
+ goto err_ipi_init;
+ }
+ err = scp_ipi_register(scp, SCP_IPI_MDP_DEINIT,
+ mdp_vpu_ipi_handle_deinit_ack, NULL);
+ if (err) {
+ dev_err(dev, "scp_ipi_register failed %d\n", err);
+ goto err_ipi_deinit;
+ }
+ err = scp_ipi_register(scp, SCP_IPI_MDP_FRAME,
+ mdp_vpu_ipi_handle_frame_ack, NULL);
+ if (err) {
+ dev_err(dev, "scp_ipi_register failed %d\n", err);
+ goto err_ipi_frame;
+ }
+ return 0;
+
+err_ipi_frame:
+ scp_ipi_unregister(scp, SCP_IPI_MDP_DEINIT);
+err_ipi_deinit:
+ scp_ipi_unregister(scp, SCP_IPI_MDP_INIT);
+err_ipi_init:
+
+ return err;
+}
+
+void mdp_vpu_unregister(struct mdp_dev *mdp)
+{
+ scp_ipi_unregister(mdp->scp, SCP_IPI_MDP_INIT);
+ scp_ipi_unregister(mdp->scp, SCP_IPI_MDP_DEINIT);
+ scp_ipi_unregister(mdp->scp, SCP_IPI_MDP_FRAME);
+}
+
+static int mdp_vpu_sendmsg(struct mdp_vpu_dev *vpu, enum scp_ipi_id id,
+ void *buf, unsigned int len)
+{
+ struct mdp_dev *mdp = vpu_to_mdp(vpu);
+ unsigned int t = MDP_VPU_MESSAGE_TIMEOUT;
+ int ret;
+
+ if (!vpu->scp) {
+ dev_dbg(&mdp->pdev->dev, "vpu scp is NULL");
+ return -EINVAL;
+ }
+ ret = scp_ipi_send(vpu->scp, id, buf, len, 2000);
+
+ if (ret) {
+ dev_err(&mdp->pdev->dev, "scp_ipi_send failed %d\n", ret);
+ return -EPERM;
+ }
+ ret = wait_for_completion_timeout(&vpu->ipi_acked,
+ msecs_to_jiffies(t));
+ if (!ret)
+ ret = -ETIME;
+ else if (vpu->status)
+ ret = -EINVAL;
+ else
+ ret = 0;
+ return ret;
+}
+
+int mdp_vpu_dev_init(struct mdp_vpu_dev *vpu, struct mtk_scp *scp,
+ struct mutex *lock)
+{
+ struct mdp_ipi_init_msg msg = {
+ .drv_data = (unsigned long)vpu,
+ };
+ size_t mem_size;
+ phys_addr_t pool;
+ const size_t pool_size = sizeof(struct mdp_config_pool);
+ struct mdp_dev *mdp = vpu_to_mdp(vpu);
+ int err;
+
+ init_completion(&vpu->ipi_acked);
+ vpu->scp = scp;
+ vpu->lock = lock;
+ vpu->work_size = 0;
+ err = mdp_vpu_sendmsg(vpu, SCP_IPI_MDP_INIT, &msg, sizeof(msg));
+ if (err)
+ goto err_work_size;
+ /* vpu work_size was set in mdp_vpu_ipi_handle_init_ack */
+
+ mem_size = vpu_alloc_size;
+ if (mdp_vpu_shared_mem_alloc(vpu)) {
+ dev_err(&mdp->pdev->dev, "VPU memory alloc fail!");
+ goto err_mem_alloc;
+ }
+
+ pool = ALIGN((uintptr_t)vpu->work + vpu->work_size, 8);
+ if (pool + pool_size - (uintptr_t)vpu->work > mem_size) {
+ dev_err(&mdp->pdev->dev,
+ "VPU memory insufficient: %zx + %zx > %zx",
+ vpu->work_size, pool_size, mem_size);
+ err = -ENOMEM;
+ goto err_mem_size;
+ }
+
+ dev_dbg(&mdp->pdev->dev,
+ "VPU work:%pK pa:%pad sz:%zx pool:%pa sz:%zx (mem sz:%zx)",
+ vpu->work, &vpu->work_addr, vpu->work_size,
+ &pool, pool_size, mem_size);
+ vpu->pool = (struct mdp_config_pool *)(uintptr_t)pool;
+ msg.work_addr = vpu->work_addr;
+ msg.work_size = vpu->work_size;
+ err = mdp_vpu_sendmsg(vpu, SCP_IPI_MDP_INIT, &msg, sizeof(msg));
+ if (err)
+ goto err_work_size;
+
+ memset(vpu->pool, 0, sizeof(*vpu->pool));
+ return 0;
+
+err_work_size:
+ switch (vpu->status) {
+ case -MDP_IPI_EBUSY:
+ err = -EBUSY;
+ break;
+ case -MDP_IPI_ENOMEM:
+ err = -ENOSPC; /* -ENOMEM */
+ break;
+ }
+ return err;
+err_mem_size:
+err_mem_alloc:
+ return err;
+}
+
+int mdp_vpu_dev_deinit(struct mdp_vpu_dev *vpu)
+{
+ struct mdp_ipi_deinit_msg msg = {
+ .drv_data = (unsigned long)vpu,
+ .work_addr = vpu->work_addr,
+ };
+
+ return mdp_vpu_sendmsg(vpu, SCP_IPI_MDP_DEINIT, &msg, sizeof(msg));
+}
+
+static struct img_config *mdp_config_get(struct mdp_vpu_dev *vpu,
+ enum mdp_config_id id, uint32_t *addr)
+{
+ struct img_config *config;
+
+ if (id < 0 || id >= MDP_CONFIG_POOL_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(vpu->lock);
+ vpu->pool->cfg_count[id]++;
+ config = &vpu->pool->configs[id];
+ *addr = vpu->work_addr + ((uintptr_t)config - (uintptr_t)vpu->work);
+ mutex_unlock(vpu->lock);
+
+ return config;
+}
+
+static int mdp_config_put(struct mdp_vpu_dev *vpu,
+ enum mdp_config_id id,
+ const struct img_config *config)
+{
+ int err = 0;
+
+ if (id < 0 || id >= MDP_CONFIG_POOL_SIZE)
+ return -EINVAL;
+ if (vpu->lock)
+ mutex_lock(vpu->lock);
+ if (!vpu->pool->cfg_count[id] || config != &vpu->pool->configs[id])
+ err = -EINVAL;
+ else
+ vpu->pool->cfg_count[id]--;
+ if (vpu->lock)
+ mutex_unlock(vpu->lock);
+ return err;
+}
+
+int mdp_vpu_ctx_init(struct mdp_vpu_ctx *ctx, struct mdp_vpu_dev *vpu,
+ enum mdp_config_id id)
+{
+ ctx->config = mdp_config_get(vpu, id, &ctx->inst_addr);
+ if (IS_ERR(ctx->config)) {
+ int err = PTR_ERR(ctx->config);
+
+ ctx->config = NULL;
+ return err;
+ }
+ ctx->config_id = id;
+ ctx->vpu_dev = vpu;
+ return 0;
+}
+
+int mdp_vpu_ctx_deinit(struct mdp_vpu_ctx *ctx)
+{
+ int err = mdp_config_put(ctx->vpu_dev, ctx->config_id, ctx->config);
+
+ ctx->config_id = 0;
+ ctx->config = NULL;
+ ctx->inst_addr = 0;
+ return err;
+}
+
+int mdp_vpu_process(struct mdp_vpu_ctx *ctx, struct img_ipi_frameparam *param)
+{
+ struct mdp_vpu_dev *vpu = ctx->vpu_dev;
+ struct mdp_dev *mdp = vpu_to_mdp(vpu);
+ struct img_sw_addr addr;
+
+ if (!ctx->vpu_dev->work || !ctx->vpu_dev->work_addr) {
+ if (mdp_vpu_shared_mem_alloc(vpu)) {
+ dev_err(&mdp->pdev->dev, "VPU memory alloc fail!");
+ return -ENOMEM;
+ }
+ }
+ memset((void *)ctx->vpu_dev->work, 0, ctx->vpu_dev->work_size);
+ memset(ctx->config, 0, sizeof(*ctx->config));
+ param->config_data.va = (unsigned long)ctx->config;
+ param->config_data.pa = ctx->inst_addr;
+ param->drv_data = (unsigned long)ctx;
+
+ memcpy((void *)ctx->vpu_dev->work, param, sizeof(*param));
+ addr.pa = ctx->vpu_dev->work_addr;
+ addr.va = (uintptr_t)ctx->vpu_dev->work;
+ return mdp_vpu_sendmsg(ctx->vpu_dev, SCP_IPI_MDP_FRAME,
+ &addr, sizeof(addr));
+}
diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.h b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.h
new file mode 100644
index 000000000000..244b3a32d689
--- /dev/null
+++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
+ */
+
+#ifndef __MTK_MDP3_VPU_H__
+#define __MTK_MDP3_VPU_H__
+
+#include <linux/platform_device.h>
+#include "mtk-img-ipi.h"
+
+enum mdp_ipi_result {
+ MDP_IPI_SUCCESS = 0,
+ MDP_IPI_ENOMEM = 12,
+ MDP_IPI_EBUSY = 16,
+ MDP_IPI_EINVAL = 22,
+ MDP_IPI_EMINST = 24,
+ MDP_IPI_ERANGE = 34,
+ MDP_IPI_NR_ERRNO,
+
+ MDP_IPI_EOTHER = MDP_IPI_NR_ERRNO,
+ MDP_IPI_PATH_CANT_MERGE,
+ MDP_IPI_OP_FAIL,
+};
+
+struct mdp_ipi_init_msg {
+ u32 status;
+ u64 drv_data;
+ u32 work_addr; /* [in] working buffer address */
+ u32 work_size; /* [in] working buffer size */
+} __packed;
+
+struct mdp_ipi_deinit_msg {
+ u32 status;
+ u64 drv_data;
+ u32 work_addr;
+} __packed;
+
+enum mdp_config_id {
+ MDP_DEV_M2M = 0,
+ MDP_CONFIG_POOL_SIZE /* ALWAYS keep at the end */
+};
+
+struct mdp_config_pool {
+ u64 cfg_count[MDP_CONFIG_POOL_SIZE];
+ struct img_config configs[MDP_CONFIG_POOL_SIZE];
+};
+
+struct mdp_vpu_dev {
+ /* synchronization protect for accessing vpu working buffer info */
+ struct mutex *lock;
+ struct mtk_scp *scp;
+ struct completion ipi_acked;
+ void *work;
+ dma_addr_t work_addr;
+ size_t work_size;
+ struct mdp_config_pool *pool;
+ u32 status;
+};
+
+struct mdp_vpu_ctx {
+ struct mdp_vpu_dev *vpu_dev;
+ u32 config_id;
+ struct img_config *config;
+ u32 inst_addr;
+};
+
+void mdp_vpu_shared_mem_free(struct mdp_vpu_dev *vpu);
+int mdp_vpu_dev_init(struct mdp_vpu_dev *vpu, struct mtk_scp *scp,
+ struct mutex *lock /* for sync */);
+int mdp_vpu_dev_deinit(struct mdp_vpu_dev *vpu);
+int mdp_vpu_ctx_init(struct mdp_vpu_ctx *ctx, struct mdp_vpu_dev *vpu,
+ enum mdp_config_id id);
+int mdp_vpu_ctx_deinit(struct mdp_vpu_ctx *ctx);
+int mdp_vpu_process(struct mdp_vpu_ctx *vpu, struct img_ipi_frameparam *param);
+
+#endif /* __MTK_MDP3_VPU_H__ */
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
index 7d194a476713..641f533c417f 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec.c
@@ -227,6 +227,8 @@ static int mtk_vcodec_dec_get_chip_name(void *priv)
return 8195;
else if (of_device_is_compatible(dev->of_node, "mediatek,mt8186-vcodec-dec"))
return 8186;
+ else if (of_device_is_compatible(dev->of_node, "mediatek,mt8188-vcodec-dec"))
+ return 8188;
else
return 8173;
}
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
index e0b6ae9d6caa..174a6eec2f54 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_dec_drv.c
@@ -478,6 +478,10 @@ static const struct of_device_id mtk_vcodec_match[] = {
.compatible = "mediatek,mt8195-vcodec-dec",
.data = &mtk_lat_sig_core_pdata,
},
+ {
+ .compatible = "mediatek,mt8188-vcodec-dec",
+ .data = &mtk_lat_sig_core_pdata,
+ },
{},
};
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h
index ef4584a46417..9acab54fd650 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_drv.h
@@ -278,6 +278,7 @@ struct vdec_pic_info {
* @hw_id: hardware index used to identify different hardware.
*
* @msg_queue: msg queue used to store lat buffer information.
+ * @q_mutex: vb2_queue mutex.
*/
struct mtk_vcodec_ctx {
enum mtk_instance_type type;
@@ -324,6 +325,8 @@ struct mtk_vcodec_ctx {
int hw_id;
struct vdec_msg_queue msg_queue;
+
+ struct mutex q_mutex;
};
/*
@@ -401,6 +404,7 @@ struct mtk_vcodec_dec_pdata {
* @output_formats: array of supported output formats
* @num_output_formats: number of entries in output_formats
* @core_id: stand for h264 or vp8 encode index
+ * @uses_34bit: whether the encoder uses 34-bit iova
*/
struct mtk_vcodec_enc_pdata {
bool uses_ext;
@@ -411,9 +415,11 @@ struct mtk_vcodec_enc_pdata {
const struct mtk_video_fmt *output_formats;
size_t num_output_formats;
int core_id;
+ bool uses_34bit;
};
#define MTK_ENC_CTX_IS_EXT(ctx) ((ctx)->dev->venc_pdata->uses_ext)
+#define MTK_ENC_IOVA_IS_34BIT(ctx) ((ctx)->dev->venc_pdata->uses_34bit)
/**
* struct mtk_vcodec_dev - driver data
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
index 25e816863597..d810a78dde51 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc.c
@@ -225,6 +225,8 @@ static int mtk_vcodec_enc_get_chip_name(void *priv)
return 8192;
else if (of_device_is_compatible(dev->of_node, "mediatek,mt8195-vcodec-enc"))
return 8195;
+ else if (of_device_is_compatible(dev->of_node, "mediatek,mt8188-vcodec-enc"))
+ return 8188;
else
return 8173;
}
@@ -503,13 +505,13 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
f->fmt.pix.pixelformat = fmt->fourcc;
}
- ret = vidioc_try_fmt_out(ctx, f, fmt);
+ q_data->visible_width = f->fmt.pix_mp.width;
+ q_data->visible_height = f->fmt.pix_mp.height;
+ q_data->fmt = fmt;
+ ret = vidioc_try_fmt_out(ctx, f, q_data->fmt);
if (ret)
return ret;
- q_data->fmt = fmt;
- q_data->visible_width = f->fmt.pix_mp.width;
- q_data->visible_height = f->fmt.pix_mp.height;
q_data->coded_width = f->fmt.pix_mp.width;
q_data->coded_height = f->fmt.pix_mp.height;
@@ -1300,7 +1302,7 @@ void mtk_vcodec_enc_set_default_params(struct mtk_vcodec_ctx *ctx)
{
struct mtk_q_data *q_data;
- ctx->m2m_ctx->q_lock = &ctx->dev->dev_mutex;
+ ctx->m2m_ctx->q_lock = &ctx->q_mutex;
ctx->fh.m2m_ctx = ctx->m2m_ctx;
ctx->fh.ctrl_handler = &ctx->ctrl_hdl;
INIT_WORK(&ctx->encode_work, mtk_venc_worker);
@@ -1403,7 +1405,8 @@ int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx)
V4L2_MPEG_VIDEO_VP8_PROFILE_0, 0, V4L2_MPEG_VIDEO_VP8_PROFILE_0);
v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
V4L2_MPEG_VIDEO_BITRATE_MODE_CBR,
- 0, V4L2_MPEG_VIDEO_BITRATE_MODE_CBR);
+ ~(1 << V4L2_MPEG_VIDEO_BITRATE_MODE_CBR),
+ V4L2_MPEG_VIDEO_BITRATE_MODE_CBR);
if (handler->error) {
@@ -1435,7 +1438,7 @@ int mtk_vcodec_enc_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->ops = &mtk_venc_vb2_ops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
- src_vq->lock = &ctx->dev->dev_mutex;
+ src_vq->lock = &ctx->q_mutex;
src_vq->dev = &ctx->dev->plat_dev->dev;
ret = vb2_queue_init(src_vq);
@@ -1449,7 +1452,7 @@ int mtk_vcodec_enc_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->ops = &mtk_venc_vb2_ops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
- dst_vq->lock = &ctx->dev->dev_mutex;
+ dst_vq->lock = &ctx->q_mutex;
dst_vq->dev = &ctx->dev->plat_dev->dev;
return vb2_queue_init(dst_vq);
diff --git a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
index 95e8c29ccc65..9095186d5495 100644
--- a/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mediatek/vcodec/mtk_vcodec_enc_drv.c
@@ -130,6 +130,7 @@ static int fops_vcodec_open(struct file *file)
INIT_LIST_HEAD(&ctx->list);
ctx->dev = dev;
init_waitqueue_head(&ctx->queue[0]);
+ mutex_init(&ctx->q_mutex);
ctx->type = MTK_INST_ENCODER;
ret = mtk_vcodec_enc_ctrls_setup(ctx);
@@ -228,7 +229,6 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
{
struct mtk_vcodec_dev *dev;
struct video_device *vfd_enc;
- struct resource *res;
phandle rproc_phandle;
enum mtk_vcodec_fw_type fw_type;
int ret;
@@ -272,14 +272,12 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
goto err_res;
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "failed to get irq resource");
- ret = -ENOENT;
+ dev->enc_irq = platform_get_irq(pdev, 0);
+ if (dev->enc_irq < 0) {
+ ret = dev->enc_irq;
goto err_res;
}
- dev->enc_irq = platform_get_irq(pdev, 0);
irq_set_status_flags(dev->enc_irq, IRQ_NOAUTOEN);
ret = devm_request_irq(&pdev->dev, dev->enc_irq,
mtk_vcodec_enc_irq_handler,
@@ -406,6 +404,18 @@ static const struct mtk_vcodec_enc_pdata mt8183_pdata = {
.core_id = VENC_SYS,
};
+static const struct mtk_vcodec_enc_pdata mt8188_pdata = {
+ .uses_ext = true,
+ .capture_formats = mtk_video_formats_capture_h264,
+ .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_h264),
+ .output_formats = mtk_video_formats_output,
+ .num_output_formats = ARRAY_SIZE(mtk_video_formats_output),
+ .min_bitrate = 64,
+ .max_bitrate = 50000000,
+ .core_id = VENC_SYS,
+ .uses_34bit = true,
+};
+
static const struct mtk_vcodec_enc_pdata mt8192_pdata = {
.uses_ext = true,
.capture_formats = mtk_video_formats_capture_h264,
@@ -434,6 +444,7 @@ static const struct of_device_id mtk_vcodec_enc_match[] = {
{.compatible = "mediatek,mt8173-vcodec-enc-vp8",
.data = &mt8173_vp8_pdata},
{.compatible = "mediatek,mt8183-vcodec-enc", .data = &mt8183_pdata},
+ {.compatible = "mediatek,mt8188-vcodec-enc", .data = &mt8188_pdata},
{.compatible = "mediatek,mt8192-vcodec-enc", .data = &mt8192_pdata},
{.compatible = "mediatek,mt8195-vcodec-enc", .data = &mt8195_pdata},
{},
diff --git a/drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c b/drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c
index 4d9b8798dffe..13c4f860fa69 100644
--- a/drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c
+++ b/drivers/media/platform/mediatek/vcodec/venc/venc_h264_if.c
@@ -127,6 +127,72 @@ struct venc_h264_vsi {
struct venc_h264_vpu_buf work_bufs[VENC_H264_VPU_WORK_BUF_MAX];
};
+/**
+ * struct venc_h264_vpu_config_ext - Structure for h264 encoder configuration
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @input_fourcc: input fourcc
+ * @bitrate: target bitrate (in bps)
+ * @pic_w: picture width. Picture size is visible stream resolution, in pixels,
+ * to be used for display purposes; must be smaller or equal to buffer
+ * size.
+ * @pic_h: picture height
+ * @buf_w: buffer width. Buffer size is stream resolution in pixels aligned to
+ * hardware requirements.
+ * @buf_h: buffer height
+ * @gop_size: group of picture size (idr frame)
+ * @intra_period: intra frame period
+ * @framerate: frame rate in fps
+ * @profile: as specified in standard
+ * @level: as specified in standard
+ * @wfd: WFD mode 1:on, 0:off
+ * @max_qp: max quant parameter
+ * @min_qp: min quant parameter
+ * @reserved: reserved configs
+ */
+struct venc_h264_vpu_config_ext {
+ u32 input_fourcc;
+ u32 bitrate;
+ u32 pic_w;
+ u32 pic_h;
+ u32 buf_w;
+ u32 buf_h;
+ u32 gop_size;
+ u32 intra_period;
+ u32 framerate;
+ u32 profile;
+ u32 level;
+ u32 wfd;
+ u32 max_qp;
+ u32 min_qp;
+ u32 reserved[8];
+};
+
+/**
+ * struct venc_h264_vpu_buf_34 - Structure for 34-bit buffer information
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @iova: 34-bit IO virtual address
+ * @vpua: VPU side memory addr which is used by RC_CODE
+ * @size: buffer size (in bytes)
+ */
+struct venc_h264_vpu_buf_34 {
+ u64 iova;
+ u32 vpua;
+ u32 size;
+};
+
+/**
+ * struct venc_h264_vsi_34 - Structure for VPU driver control and info share
+ * Used for 34-bit iova sharing
+ * @config: h264 encoder configuration
+ * @work_bufs: working buffer information in VPU side
+ */
+struct venc_h264_vsi_34 {
+ struct venc_h264_vpu_config_ext config;
+ struct venc_h264_vpu_buf_34 work_bufs[VENC_H264_VPU_WORK_BUF_MAX];
+};
+
/*
* struct venc_h264_inst - h264 encoder AP driver instance
* @hw_base: h264 encoder hardware register base
@@ -140,6 +206,8 @@ struct venc_h264_vsi {
* @vpu_inst: VPU instance to exchange information between AP and VPU
* @vsi: driver structure allocated by VPU side and shared to AP side for
* control and info share
+ * @vsi_34: driver structure allocated by VPU side and shared to AP side for
+ * control and info share, used for 34-bit iova sharing.
* @ctx: context for v4l2 layer integration
*/
struct venc_h264_inst {
@@ -152,6 +220,7 @@ struct venc_h264_inst {
unsigned int prepend_hdr;
struct venc_vpu_inst vpu_inst;
struct venc_h264_vsi *vsi;
+ struct venc_h264_vsi_34 *vsi_34;
struct mtk_vcodec_ctx *ctx;
};
@@ -244,14 +313,21 @@ static void h264_enc_free_work_buf(struct venc_h264_inst *inst)
mtk_vcodec_debug_leave(inst);
}
-static int h264_enc_alloc_work_buf(struct venc_h264_inst *inst)
+static int h264_enc_alloc_work_buf(struct venc_h264_inst *inst, bool is_34bit)
{
+ struct venc_h264_vpu_buf *wb = NULL;
+ struct venc_h264_vpu_buf_34 *wb_34 = NULL;
int i;
+ u32 vpua, wb_size;
int ret = 0;
- struct venc_h264_vpu_buf *wb = inst->vsi->work_bufs;
mtk_vcodec_debug_enter(inst);
+ if (is_34bit)
+ wb_34 = inst->vsi_34->work_bufs;
+ else
+ wb = inst->vsi->work_bufs;
+
for (i = 0; i < VENC_H264_VPU_WORK_BUF_MAX; i++) {
/*
* This 'wb' structure is set by VPU side and shared to AP for
@@ -269,13 +345,22 @@ static int h264_enc_alloc_work_buf(struct venc_h264_inst *inst)
* address and do some memcpy access to move to bitstream buffer
* assigned by v4l2 layer.
*/
- inst->work_bufs[i].size = wb[i].size;
+ if (is_34bit) {
+ inst->work_bufs[i].size = wb_34[i].size;
+ vpua = wb_34[i].vpua;
+ wb_size = wb_34[i].size;
+ } else {
+ inst->work_bufs[i].size = wb[i].size;
+ vpua = wb[i].vpua;
+ wb_size = wb[i].size;
+ }
+
if (i == VENC_H264_VPU_WORK_BUF_SKIP_FRAME) {
struct mtk_vcodec_fw *handler;
handler = inst->vpu_inst.ctx->dev->fw_handler;
inst->work_bufs[i].va =
- mtk_vcodec_fw_map_dm_addr(handler, wb[i].vpua);
+ mtk_vcodec_fw_map_dm_addr(handler, vpua);
inst->work_bufs[i].dma_addr = 0;
} else {
ret = mtk_vcodec_mem_alloc(inst->ctx,
@@ -297,12 +382,14 @@ static int h264_enc_alloc_work_buf(struct venc_h264_inst *inst)
handler = inst->vpu_inst.ctx->dev->fw_handler;
tmp_va = mtk_vcodec_fw_map_dm_addr(handler,
- wb[i].vpua);
- memcpy(inst->work_bufs[i].va, tmp_va,
- wb[i].size);
+ vpua);
+ memcpy(inst->work_bufs[i].va, tmp_va, wb_size);
}
}
- wb[i].iova = inst->work_bufs[i].dma_addr;
+ if (is_34bit)
+ wb_34[i].iova = inst->work_bufs[i].dma_addr;
+ else
+ wb[i].iova = inst->work_bufs[i].dma_addr;
mtk_vcodec_debug(inst,
"work_buf[%d] va=0x%p iova=%pad size=%zu",
@@ -342,22 +429,22 @@ static unsigned int h264_enc_wait_venc_done(struct venc_h264_inst *inst)
return irq_status;
}
-static int h264_frame_type(struct venc_h264_inst *inst)
+static int h264_frame_type(unsigned int frm_cnt, unsigned int gop_size,
+ unsigned int intra_period)
{
- if ((inst->vsi->config.gop_size != 0 &&
- (inst->frm_cnt % inst->vsi->config.gop_size) == 0) ||
- (inst->frm_cnt == 0 && inst->vsi->config.gop_size == 0)) {
+ if ((gop_size != 0 && (frm_cnt % gop_size) == 0) ||
+ (frm_cnt == 0 && gop_size == 0)) {
/* IDR frame */
return VENC_H264_IDR_FRM;
- } else if ((inst->vsi->config.intra_period != 0 &&
- (inst->frm_cnt % inst->vsi->config.intra_period) == 0) ||
- (inst->frm_cnt == 0 && inst->vsi->config.intra_period == 0)) {
+ } else if ((intra_period != 0 && (frm_cnt % intra_period) == 0) ||
+ (frm_cnt == 0 && intra_period == 0)) {
/* I frame */
return VENC_H264_I_FRM;
} else {
return VENC_H264_P_FRM; /* Note: B frames are not supported */
}
}
+
static int h264_encode_sps(struct venc_h264_inst *inst,
struct mtk_vcodec_mem *bs_buf,
unsigned int *bs_size)
@@ -438,18 +525,32 @@ static int h264_encode_frame(struct venc_h264_inst *inst,
unsigned int *bs_size)
{
int ret = 0;
+ unsigned int gop_size;
+ unsigned int intra_period;
unsigned int irq_status;
struct venc_frame_info frame_info;
+ struct mtk_vcodec_ctx *ctx = inst->ctx;
mtk_vcodec_debug_enter(inst);
mtk_vcodec_debug(inst, "frm_cnt = %d\n ", inst->frm_cnt);
+
+ if (MTK_ENC_IOVA_IS_34BIT(ctx)) {
+ gop_size = inst->vsi_34->config.gop_size;
+ intra_period = inst->vsi_34->config.intra_period;
+ } else {
+ gop_size = inst->vsi->config.gop_size;
+ intra_period = inst->vsi->config.intra_period;
+ }
frame_info.frm_count = inst->frm_cnt;
frame_info.skip_frm_count = inst->skip_frm_cnt;
- frame_info.frm_type = h264_frame_type(inst);
+ frame_info.frm_type = h264_frame_type(inst->frm_cnt, gop_size,
+ intra_period);
mtk_vcodec_debug(inst, "frm_count = %d,skip_frm_count =%d,frm_type=%d.\n",
frame_info.frm_count, frame_info.skip_frm_count,
frame_info.frm_type);
- ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_FRAME, frm_buf, bs_buf, &frame_info);
+
+ ret = vpu_enc_encode(&inst->vpu_inst, H264_BS_MODE_FRAME,
+ frm_buf, bs_buf, &frame_info);
if (ret)
return ret;
@@ -517,7 +618,10 @@ static int h264_enc_init(struct mtk_vcodec_ctx *ctx)
ret = vpu_enc_init(&inst->vpu_inst);
- inst->vsi = (struct venc_h264_vsi *)inst->vpu_inst.vsi;
+ if (MTK_ENC_IOVA_IS_34BIT(ctx))
+ inst->vsi_34 = (struct venc_h264_vsi_34 *)inst->vpu_inst.vsi;
+ else
+ inst->vsi = (struct venc_h264_vsi *)inst->vpu_inst.vsi;
mtk_vcodec_debug_leave(inst);
@@ -624,31 +728,61 @@ encode_err:
return ret;
}
+static void h264_enc_set_vsi_configs(struct venc_h264_inst *inst,
+ struct venc_enc_param *enc_prm)
+{
+ inst->vsi->config.input_fourcc = enc_prm->input_yuv_fmt;
+ inst->vsi->config.bitrate = enc_prm->bitrate;
+ inst->vsi->config.pic_w = enc_prm->width;
+ inst->vsi->config.pic_h = enc_prm->height;
+ inst->vsi->config.buf_w = enc_prm->buf_width;
+ inst->vsi->config.buf_h = enc_prm->buf_height;
+ inst->vsi->config.gop_size = enc_prm->gop_size;
+ inst->vsi->config.framerate = enc_prm->frm_rate;
+ inst->vsi->config.intra_period = enc_prm->intra_period;
+ inst->vsi->config.profile =
+ h264_get_profile(inst, enc_prm->h264_profile);
+ inst->vsi->config.level =
+ h264_get_level(inst, enc_prm->h264_level);
+ inst->vsi->config.wfd = 0;
+}
+
+static void h264_enc_set_vsi_34_configs(struct venc_h264_inst *inst,
+ struct venc_enc_param *enc_prm)
+{
+ inst->vsi_34->config.input_fourcc = enc_prm->input_yuv_fmt;
+ inst->vsi_34->config.bitrate = enc_prm->bitrate;
+ inst->vsi_34->config.pic_w = enc_prm->width;
+ inst->vsi_34->config.pic_h = enc_prm->height;
+ inst->vsi_34->config.buf_w = enc_prm->buf_width;
+ inst->vsi_34->config.buf_h = enc_prm->buf_height;
+ inst->vsi_34->config.gop_size = enc_prm->gop_size;
+ inst->vsi_34->config.framerate = enc_prm->frm_rate;
+ inst->vsi_34->config.intra_period = enc_prm->intra_period;
+ inst->vsi_34->config.profile =
+ h264_get_profile(inst, enc_prm->h264_profile);
+ inst->vsi_34->config.level =
+ h264_get_level(inst, enc_prm->h264_level);
+ inst->vsi_34->config.wfd = 0;
+}
+
static int h264_enc_set_param(void *handle,
enum venc_set_param_type type,
struct venc_enc_param *enc_prm)
{
int ret = 0;
struct venc_h264_inst *inst = (struct venc_h264_inst *)handle;
+ struct mtk_vcodec_ctx *ctx = inst->ctx;
+ const bool is_34bit = MTK_ENC_IOVA_IS_34BIT(ctx);
mtk_vcodec_debug(inst, "->type=%d", type);
switch (type) {
case VENC_SET_PARAM_ENC:
- inst->vsi->config.input_fourcc = enc_prm->input_yuv_fmt;
- inst->vsi->config.bitrate = enc_prm->bitrate;
- inst->vsi->config.pic_w = enc_prm->width;
- inst->vsi->config.pic_h = enc_prm->height;
- inst->vsi->config.buf_w = enc_prm->buf_width;
- inst->vsi->config.buf_h = enc_prm->buf_height;
- inst->vsi->config.gop_size = enc_prm->gop_size;
- inst->vsi->config.framerate = enc_prm->frm_rate;
- inst->vsi->config.intra_period = enc_prm->intra_period;
- inst->vsi->config.profile =
- h264_get_profile(inst, enc_prm->h264_profile);
- inst->vsi->config.level =
- h264_get_level(inst, enc_prm->h264_level);
- inst->vsi->config.wfd = 0;
+ if (is_34bit)
+ h264_enc_set_vsi_34_configs(inst, enc_prm);
+ else
+ h264_enc_set_vsi_configs(inst, enc_prm);
ret = vpu_enc_set_param(&inst->vpu_inst, type, enc_prm);
if (ret)
break;
@@ -656,7 +790,7 @@ static int h264_enc_set_param(void *handle,
h264_enc_free_work_buf(inst);
inst->work_buf_allocated = false;
}
- ret = h264_enc_alloc_work_buf(inst);
+ ret = h264_enc_alloc_work_buf(inst, is_34bit);
if (ret)
break;
inst->work_buf_allocated = true;
diff --git a/drivers/media/platform/mediatek/vcodec/venc_ipi_msg.h b/drivers/media/platform/mediatek/vcodec/venc_ipi_msg.h
index 587a2cf15b76..bb16d96a7f57 100644
--- a/drivers/media/platform/mediatek/vcodec/venc_ipi_msg.h
+++ b/drivers/media/platform/mediatek/vcodec/venc_ipi_msg.h
@@ -101,6 +101,30 @@ struct venc_ap_ipi_msg_enc_ext {
};
/**
+ * struct venc_ap_ipi_msg_enc_ext_34 - AP to SCP extended enc cmd structure
+ * @msg_id: message id (AP_IPIMSG_XXX_ENC_ENCODE)
+ * @vpu_inst_addr: VPU encoder instance addr
+ * @bs_mode: bitstream mode for h264
+ * @reserved: for struct padding
+ * @input_addr: input frame buffer 34 bit address
+ * @bs_addr: output bitstream buffer 34 bit address
+ * @bs_size: bitstream buffer size
+ * @data_item: number of items in the data array
+ * @data: data array to store the set parameters
+ */
+struct venc_ap_ipi_msg_enc_ext_34 {
+ u32 msg_id;
+ u32 vpu_inst_addr;
+ u32 bs_mode;
+ u32 reserved;
+ u64 input_addr[3];
+ u64 bs_addr;
+ u32 bs_size;
+ u32 data_item;
+ u32 data[32];
+};
+
+/**
* struct venc_ap_ipi_msg_deinit - AP to VPU deinit cmd structure
* @msg_id: message id (AP_IPIMSG_XXX_ENC_DEINIT)
* @vpu_inst_addr: VPU encoder instance addr
diff --git a/drivers/media/platform/mediatek/vcodec/venc_vpu_if.c b/drivers/media/platform/mediatek/vcodec/venc_vpu_if.c
index d3570c4c177d..09e7eaa25aab 100644
--- a/drivers/media/platform/mediatek/vcodec/venc_vpu_if.c
+++ b/drivers/media/platform/mediatek/vcodec/venc_vpu_if.c
@@ -222,10 +222,11 @@ int vpu_enc_set_param(struct venc_vpu_inst *vpu,
return 0;
}
-int vpu_enc_encode(struct venc_vpu_inst *vpu, unsigned int bs_mode,
- struct venc_frm_buf *frm_buf,
- struct mtk_vcodec_mem *bs_buf,
- struct venc_frame_info *frame_info)
+static int vpu_enc_encode_32bits(struct venc_vpu_inst *vpu,
+ unsigned int bs_mode,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ struct venc_frame_info *frame_info)
{
const bool is_ext = MTK_ENC_CTX_IS_EXT(vpu->ctx);
size_t msg_size = is_ext ?
@@ -267,6 +268,73 @@ int vpu_enc_encode(struct venc_vpu_inst *vpu, unsigned int bs_mode,
return -EINVAL;
}
+ return 0;
+}
+
+static int vpu_enc_encode_34bits(struct venc_vpu_inst *vpu,
+ unsigned int bs_mode,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ struct venc_frame_info *frame_info)
+{
+ struct venc_ap_ipi_msg_enc_ext_34 out;
+ size_t msg_size = sizeof(struct venc_ap_ipi_msg_enc_ext_34);
+
+ mtk_vcodec_debug(vpu, "bs_mode %d ->", bs_mode);
+
+ memset(&out, 0, sizeof(out));
+ out.msg_id = AP_IPIMSG_ENC_ENCODE;
+ out.vpu_inst_addr = vpu->inst_addr;
+ out.bs_mode = bs_mode;
+
+ if (frm_buf) {
+ if ((frm_buf->fb_addr[0].dma_addr % 16 == 0) &&
+ (frm_buf->fb_addr[1].dma_addr % 16 == 0) &&
+ (frm_buf->fb_addr[2].dma_addr % 16 == 0)) {
+ out.input_addr[0] = frm_buf->fb_addr[0].dma_addr;
+ out.input_addr[1] = frm_buf->fb_addr[1].dma_addr;
+ out.input_addr[2] = frm_buf->fb_addr[2].dma_addr;
+ } else {
+ mtk_vcodec_err(vpu, "dma_addr not align to 16");
+ return -EINVAL;
+ }
+ }
+ if (bs_buf) {
+ out.bs_addr = bs_buf->dma_addr;
+ out.bs_size = bs_buf->size;
+ }
+ if (frame_info) {
+ out.data_item = 3;
+ out.data[0] = frame_info->frm_count;
+ out.data[1] = frame_info->skip_frm_count;
+ out.data[2] = frame_info->frm_type;
+ }
+ if (vpu_enc_send_msg(vpu, &out, msg_size)) {
+ mtk_vcodec_err(vpu, "AP_IPIMSG_ENC_ENCODE %d fail",
+ bs_mode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int vpu_enc_encode(struct venc_vpu_inst *vpu, unsigned int bs_mode,
+ struct venc_frm_buf *frm_buf,
+ struct mtk_vcodec_mem *bs_buf,
+ struct venc_frame_info *frame_info)
+{
+ int ret;
+
+ if (MTK_ENC_IOVA_IS_34BIT(vpu->ctx))
+ ret = vpu_enc_encode_34bits(vpu, bs_mode,
+ frm_buf, bs_buf, frame_info);
+ else
+ ret = vpu_enc_encode_32bits(vpu, bs_mode,
+ frm_buf, bs_buf, frame_info);
+
+ if (ret)
+ return ret;
+
mtk_vcodec_debug(vpu, "bs_mode %d state %d size %d key_frm %d <-",
bs_mode, vpu->state, vpu->bs_size, vpu->is_key_frm);
diff --git a/drivers/media/platform/nxp/Kconfig b/drivers/media/platform/nxp/Kconfig
index 1ac0a6e91111..5917634889b5 100644
--- a/drivers/media/platform/nxp/Kconfig
+++ b/drivers/media/platform/nxp/Kconfig
@@ -15,18 +15,6 @@ config VIDEO_IMX_MIPI_CSIS
Video4Linux2 sub-device driver for the MIPI CSI-2 CSIS receiver
v3.3/v3.6.3 found on some i.MX7 and i.MX8 SoCs.
-config VIDEO_VIU
- tristate "NXP VIU Video Driver"
- depends on V4L_PLATFORM_DRIVERS
- depends on VIDEO_DEV && (PPC_MPC512x || COMPILE_TEST) && I2C
- select VIDEOBUF_DMA_CONTIG
- help
- Support for Freescale VIU video driver. This device captures
- video data, or overlays video on DIU frame buffer.
-
- Say Y here if you want to enable VIU device on MPC5121e Rev2+.
- In doubt, say N.
-
# mem2mem drivers
config VIDEO_IMX_PXP
@@ -51,4 +39,5 @@ config VIDEO_MX2_EMMAPRP
memory to memory. Operations include resizing and format
conversion.
+source "drivers/media/platform/nxp/dw100/Kconfig"
source "drivers/media/platform/nxp/imx-jpeg/Kconfig"
diff --git a/drivers/media/platform/nxp/Makefile b/drivers/media/platform/nxp/Makefile
index efc38c6578ce..81ab304ef31c 100644
--- a/drivers/media/platform/nxp/Makefile
+++ b/drivers/media/platform/nxp/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-y += dw100/
obj-y += imx-jpeg/
obj-$(CONFIG_VIDEO_IMX_MIPI_CSIS) += imx-mipi-csis.o
obj-$(CONFIG_VIDEO_IMX_PXP) += imx-pxp.o
obj-$(CONFIG_VIDEO_MX2_EMMAPRP) += mx2_emmaprp.o
-obj-$(CONFIG_VIDEO_VIU) += fsl-viu.o
diff --git a/drivers/media/platform/nxp/dw100/Kconfig b/drivers/media/platform/nxp/dw100/Kconfig
new file mode 100644
index 000000000000..cd4531bb3110
--- /dev/null
+++ b/drivers/media/platform/nxp/dw100/Kconfig
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config VIDEO_DW100
+ tristate "NXP i.MX DW100 dewarper"
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ depends on ARCH_MXC || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select V4L2_MEM2MEM_DEV
+ select VIDEOBUF2_DMA_CONTIG
+ help
+ DW100 is a memory-to-memory engine performing geometrical
+ transformation on source images through a programmable dewarping map.
+
+ To compile this driver as a module, choose M here: the module
+ will be called dw100.
diff --git a/drivers/media/platform/nxp/dw100/Makefile b/drivers/media/platform/nxp/dw100/Makefile
new file mode 100644
index 000000000000..49db80589e9a
--- /dev/null
+++ b/drivers/media/platform/nxp/dw100/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0+
+
+obj-$(CONFIG_VIDEO_DW100) += dw100.o
diff --git a/drivers/media/platform/nxp/dw100/dw100.c b/drivers/media/platform/nxp/dw100/dw100.c
new file mode 100644
index 000000000000..b3b057798ab6
--- /dev/null
+++ b/drivers/media/platform/nxp/dw100/dw100.c
@@ -0,0 +1,1707 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DW100 Hardware dewarper
+ *
+ * Copyright 2022 NXP
+ * Author: Xavier Roumegue (xavier.roumegue@oss.nxp.com)
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include <uapi/linux/dw100.h>
+
+#include "dw100_regs.h"
+
+#define DRV_NAME "dw100"
+
+#define DW100_MIN_W 176u
+#define DW100_MIN_H 144u
+#define DW100_MAX_W 4096u
+#define DW100_MAX_H 3072u
+#define DW100_ALIGN_W 3
+#define DW100_ALIGN_H 3
+
+#define DW100_BLOCK_SIZE 16
+
+#define DW100_DEF_W 640u
+#define DW100_DEF_H 480u
+#define DW100_DEF_LUT_W (DIV_ROUND_UP(DW100_DEF_W, DW100_BLOCK_SIZE) + 1)
+#define DW100_DEF_LUT_H (DIV_ROUND_UP(DW100_DEF_H, DW100_BLOCK_SIZE) + 1)
+
+/*
+ * 16 controls have been reserved for this driver for future extension, but
+ * let's limit the related driver allocation to the effective number of controls
+ * in use.
+ */
+#define DW100_MAX_CTRLS 1
+#define DW100_CTRL_DEWARPING_MAP 0
+
+enum {
+ DW100_QUEUE_SRC = 0,
+ DW100_QUEUE_DST = 1,
+};
+
+enum {
+ DW100_FMT_CAPTURE = BIT(0),
+ DW100_FMT_OUTPUT = BIT(1),
+};
+
+struct dw100_device {
+ struct platform_device *pdev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct v4l2_device v4l2_dev;
+ struct video_device vfd;
+ struct media_device mdev;
+ /* Video device lock */
+ struct mutex vfd_mutex;
+ void __iomem *mmio;
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct dentry *debugfs_root;
+};
+
+struct dw100_q_data {
+ struct v4l2_pix_format_mplane pix_fmt;
+ unsigned int sequence;
+ const struct dw100_fmt *fmt;
+ struct v4l2_rect crop;
+};
+
+struct dw100_ctx {
+ struct v4l2_fh fh;
+ struct dw100_device *dw_dev;
+ struct v4l2_ctrl_handler hdl;
+ struct v4l2_ctrl *ctrls[DW100_MAX_CTRLS];
+ /* per context m2m queue lock */
+ struct mutex vq_mutex;
+
+ /* Look Up Table for pixel remapping */
+ unsigned int *map;
+ dma_addr_t map_dma;
+ size_t map_size;
+ unsigned int map_width;
+ unsigned int map_height;
+ bool user_map_is_set;
+
+ /* Source and destination queue data */
+ struct dw100_q_data q_data[2];
+};
+
+static const struct v4l2_frmsize_stepwise dw100_frmsize_stepwise = {
+ .min_width = DW100_MIN_W,
+ .min_height = DW100_MIN_H,
+ .max_width = DW100_MAX_W,
+ .max_height = DW100_MAX_H,
+ .step_width = 1UL << DW100_ALIGN_W,
+ .step_height = 1UL << DW100_ALIGN_H,
+};
+
+static const struct dw100_fmt {
+ u32 fourcc;
+ u32 types;
+ u32 reg_format;
+ bool reg_swap_uv;
+} formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .types = DW100_FMT_OUTPUT | DW100_FMT_CAPTURE,
+ .reg_format = DW100_DEWARP_CTRL_FORMAT_YUV422_SP,
+ .reg_swap_uv = false,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV16M,
+ .types = DW100_FMT_OUTPUT | DW100_FMT_CAPTURE,
+ .reg_format = DW100_DEWARP_CTRL_FORMAT_YUV422_SP,
+ .reg_swap_uv = false,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV61,
+ .types = DW100_FMT_CAPTURE,
+ .reg_format = DW100_DEWARP_CTRL_FORMAT_YUV422_SP,
+ .reg_swap_uv = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV61M,
+ .types = DW100_FMT_CAPTURE,
+ .reg_format = DW100_DEWARP_CTRL_FORMAT_YUV422_SP,
+ .reg_swap_uv = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .types = DW100_FMT_OUTPUT | DW100_FMT_CAPTURE,
+ .reg_format = DW100_DEWARP_CTRL_FORMAT_YUV422_PACKED,
+ .reg_swap_uv = false,
+ }, {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .types = DW100_FMT_OUTPUT | DW100_FMT_CAPTURE,
+ .reg_format = DW100_DEWARP_CTRL_FORMAT_YUV422_PACKED,
+ .reg_swap_uv = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .types = DW100_FMT_OUTPUT | DW100_FMT_CAPTURE,
+ .reg_format = DW100_DEWARP_CTRL_FORMAT_YUV420_SP,
+ .reg_swap_uv = false,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .types = DW100_FMT_OUTPUT | DW100_FMT_CAPTURE,
+ .reg_format = DW100_DEWARP_CTRL_FORMAT_YUV420_SP,
+ .reg_swap_uv = false,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV21,
+ .types = DW100_FMT_CAPTURE,
+ .reg_format = DW100_DEWARP_CTRL_FORMAT_YUV420_SP,
+ .reg_swap_uv = true,
+ }, {
+ .fourcc = V4L2_PIX_FMT_NV21M,
+ .types = DW100_FMT_CAPTURE,
+ .reg_format = DW100_DEWARP_CTRL_FORMAT_YUV420_SP,
+ .reg_swap_uv = true,
+ },
+};
+
+static inline int to_dw100_fmt_type(enum v4l2_buf_type type)
+{
+ if (V4L2_TYPE_IS_OUTPUT(type))
+ return DW100_FMT_OUTPUT;
+ else
+ return DW100_FMT_CAPTURE;
+}
+
+static const struct dw100_fmt *dw100_find_pixel_format(u32 pixel_format,
+ int fmt_type)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ const struct dw100_fmt *fmt = &formats[i];
+
+ if (fmt->fourcc == pixel_format && fmt->types & fmt_type)
+ return fmt;
+ }
+
+ return NULL;
+}
+
+static const struct dw100_fmt *dw100_find_format(struct v4l2_format *f)
+{
+ return dw100_find_pixel_format(f->fmt.pix_mp.pixelformat,
+ to_dw100_fmt_type(f->type));
+}
+
+static inline u32 dw100_read(struct dw100_device *dw_dev, u32 reg)
+{
+ return readl(dw_dev->mmio + reg);
+}
+
+static inline void dw100_write(struct dw100_device *dw_dev, u32 reg, u32 val)
+{
+ writel(val, dw_dev->mmio + reg);
+}
+
+static inline int dw100_dump_regs(struct seq_file *m)
+{
+ struct dw100_device *dw_dev = m->private;
+#define __DECLARE_REG(x) { #x, x }
+ unsigned int i;
+ static const struct reg_desc {
+ const char * const name;
+ unsigned int addr;
+ } dw100_regs[] = {
+ __DECLARE_REG(DW100_DEWARP_ID),
+ __DECLARE_REG(DW100_DEWARP_CTRL),
+ __DECLARE_REG(DW100_MAP_LUT_ADDR),
+ __DECLARE_REG(DW100_MAP_LUT_SIZE),
+ __DECLARE_REG(DW100_MAP_LUT_ADDR2),
+ __DECLARE_REG(DW100_MAP_LUT_SIZE2),
+ __DECLARE_REG(DW100_SRC_IMG_Y_BASE),
+ __DECLARE_REG(DW100_SRC_IMG_UV_BASE),
+ __DECLARE_REG(DW100_SRC_IMG_SIZE),
+ __DECLARE_REG(DW100_SRC_IMG_STRIDE),
+ __DECLARE_REG(DW100_DST_IMG_Y_BASE),
+ __DECLARE_REG(DW100_DST_IMG_UV_BASE),
+ __DECLARE_REG(DW100_DST_IMG_SIZE),
+ __DECLARE_REG(DW100_DST_IMG_STRIDE),
+ __DECLARE_REG(DW100_DST_IMG_Y_SIZE1),
+ __DECLARE_REG(DW100_DST_IMG_UV_SIZE1),
+ __DECLARE_REG(DW100_SRC_IMG_Y_BASE2),
+ __DECLARE_REG(DW100_SRC_IMG_UV_BASE2),
+ __DECLARE_REG(DW100_SRC_IMG_SIZE2),
+ __DECLARE_REG(DW100_SRC_IMG_STRIDE2),
+ __DECLARE_REG(DW100_DST_IMG_Y_BASE2),
+ __DECLARE_REG(DW100_DST_IMG_UV_BASE2),
+ __DECLARE_REG(DW100_DST_IMG_SIZE2),
+ __DECLARE_REG(DW100_DST_IMG_STRIDE2),
+ __DECLARE_REG(DW100_DST_IMG_Y_SIZE2),
+ __DECLARE_REG(DW100_DST_IMG_UV_SIZE2),
+ __DECLARE_REG(DW100_SWAP_CONTROL),
+ __DECLARE_REG(DW100_VERTICAL_SPLIT_LINE),
+ __DECLARE_REG(DW100_HORIZON_SPLIT_LINE),
+ __DECLARE_REG(DW100_SCALE_FACTOR),
+ __DECLARE_REG(DW100_ROI_START),
+ __DECLARE_REG(DW100_BOUNDARY_PIXEL),
+ __DECLARE_REG(DW100_INTERRUPT_STATUS),
+ __DECLARE_REG(DW100_BUS_CTRL),
+ __DECLARE_REG(DW100_BUS_CTRL1),
+ __DECLARE_REG(DW100_BUS_TIME_OUT_CYCLE),
+ };
+
+ for (i = 0; i < ARRAY_SIZE(dw100_regs); i++)
+ seq_printf(m, "%s: %#x\n", dw100_regs[i].name,
+ dw100_read(dw_dev, dw100_regs[i].addr));
+
+ return 0;
+}
+
+static inline struct dw100_ctx *dw100_file2ctx(struct file *file)
+{
+ return container_of(file->private_data, struct dw100_ctx, fh);
+}
+
+static struct dw100_q_data *dw100_get_q_data(struct dw100_ctx *ctx,
+ enum v4l2_buf_type type)
+{
+ if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return &ctx->q_data[DW100_QUEUE_SRC];
+ else
+ return &ctx->q_data[DW100_QUEUE_DST];
+}
+
+static u32 dw100_get_n_vertices_from_length(u32 length)
+{
+ return DIV_ROUND_UP(length, DW100_BLOCK_SIZE) + 1;
+}
+
+static u16 dw100_map_convert_to_uq12_4(u32 a)
+{
+ return (u16)((a & 0xfff) << 4);
+}
+
+static u32 dw100_map_format_coordinates(u16 xq, u16 yq)
+{
+ return (u32)((yq << 16) | xq);
+}
+
+static u32 *dw100_get_user_map(struct dw100_ctx *ctx)
+{
+ struct v4l2_ctrl *ctrl = ctx->ctrls[DW100_CTRL_DEWARPING_MAP];
+
+ return ctrl->p_cur.p_u32;
+}
+
+/*
+ * Create the dewarp map used by the hardware from the V4L2 control values which
+ * have been initialized with an identity map or set by the application.
+ */
+static int dw100_create_mapping(struct dw100_ctx *ctx)
+{
+ u32 *user_map;
+
+ if (ctx->map)
+ dma_free_coherent(&ctx->dw_dev->pdev->dev, ctx->map_size,
+ ctx->map, ctx->map_dma);
+
+ ctx->map = dma_alloc_coherent(&ctx->dw_dev->pdev->dev, ctx->map_size,
+ &ctx->map_dma, GFP_KERNEL);
+
+ if (!ctx->map)
+ return -ENOMEM;
+
+ user_map = dw100_get_user_map(ctx);
+ memcpy(ctx->map, user_map, ctx->map_size);
+
+ dev_dbg(&ctx->dw_dev->pdev->dev,
+ "%ux%u %s mapping created (d:%pad-c:%p) for stream %ux%u->%ux%u\n",
+ ctx->map_width, ctx->map_height,
+ ctx->user_map_is_set ? "user" : "identity",
+ &ctx->map_dma, ctx->map,
+ ctx->q_data[DW100_QUEUE_SRC].pix_fmt.width,
+ ctx->q_data[DW100_QUEUE_DST].pix_fmt.height,
+ ctx->q_data[DW100_QUEUE_SRC].pix_fmt.width,
+ ctx->q_data[DW100_QUEUE_DST].pix_fmt.height);
+
+ return 0;
+}
+
+static void dw100_destroy_mapping(struct dw100_ctx *ctx)
+{
+ if (ctx->map) {
+ dma_free_coherent(&ctx->dw_dev->pdev->dev, ctx->map_size,
+ ctx->map, ctx->map_dma);
+ ctx->map = NULL;
+ }
+}
+
+static int dw100_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct dw100_ctx *ctx =
+ container_of(ctrl->handler, struct dw100_ctx, hdl);
+
+ switch (ctrl->id) {
+ case V4L2_CID_DW100_DEWARPING_16x16_VERTEX_MAP:
+ ctx->user_map_is_set = true;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops dw100_ctrl_ops = {
+ .s_ctrl = dw100_s_ctrl,
+};
+
+/*
+ * Initialize the dewarping map with an identity mapping.
+ *
+ * A 16 pixels cell size grid is mapped on the destination image.
+ * The last cells width/height might be lesser than 16 if the destination image
+ * width/height is not divisible by 16. This dewarping grid map specifies the
+ * source image pixel location (x, y) on each grid intersection point.
+ * Bilinear interpolation is used to compute inner cell points locations.
+ *
+ * The coordinates are saved in UQ12.4 fixed point format.
+ */
+static void dw100_ctrl_dewarping_map_init(const struct v4l2_ctrl *ctrl,
+ u32 from_idx, u32 elems,
+ union v4l2_ctrl_ptr ptr)
+{
+ struct dw100_ctx *ctx =
+ container_of(ctrl->handler, struct dw100_ctx, hdl);
+
+ u32 sw, sh, mw, mh, idx;
+ u16 qx, qy, qdx, qdy, qsh, qsw;
+ u32 *map = ctrl->p_cur.p_u32;
+
+ sw = ctx->q_data[DW100_QUEUE_SRC].pix_fmt.width;
+ sh = ctx->q_data[DW100_QUEUE_SRC].pix_fmt.height;
+
+ mw = ctrl->dims[0];
+ mh = ctrl->dims[1];
+
+ qsw = dw100_map_convert_to_uq12_4(sw);
+ qsh = dw100_map_convert_to_uq12_4(sh);
+ qdx = qsw / (mw - 1);
+ qdy = qsh / (mh - 1);
+
+ ctx->map_width = mw;
+ ctx->map_height = mh;
+ ctx->map_size = mh * mw * sizeof(u32);
+
+ for (idx = from_idx; idx < elems; idx++) {
+ qy = min_t(u32, (idx / mw) * qdy, qsh);
+ qx = min_t(u32, (idx % mw) * qdx, qsw);
+ map[idx] = dw100_map_format_coordinates(qx, qy);
+ }
+
+ ctx->user_map_is_set = false;
+}
+
+static const struct v4l2_ctrl_type_ops dw100_ctrl_type_ops = {
+ .init = dw100_ctrl_dewarping_map_init,
+ .validate = v4l2_ctrl_type_op_validate,
+ .log = v4l2_ctrl_type_op_log,
+ .equal = v4l2_ctrl_type_op_equal,
+};
+
+static const struct v4l2_ctrl_config controls[] = {
+ [DW100_CTRL_DEWARPING_MAP] = {
+ .ops = &dw100_ctrl_ops,
+ .type_ops = &dw100_ctrl_type_ops,
+ .id = V4L2_CID_DW100_DEWARPING_16x16_VERTEX_MAP,
+ .name = "Dewarping Vertex Map",
+ .type = V4L2_CTRL_TYPE_U32,
+ .min = 0x00000000,
+ .max = 0xffffffff,
+ .step = 1,
+ .def = 0,
+ .dims = { DW100_DEF_LUT_W, DW100_DEF_LUT_H },
+ },
+};
+
+static int dw100_queue_setup(struct vb2_queue *vq,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct dw100_ctx *ctx = vb2_get_drv_priv(vq);
+ const struct v4l2_pix_format_mplane *format;
+ unsigned int i;
+
+ format = &dw100_get_q_data(ctx, vq->type)->pix_fmt;
+
+ if (*nplanes) {
+ if (*nplanes != format->num_planes)
+ return -EINVAL;
+
+ for (i = 0; i < *nplanes; ++i) {
+ if (sizes[i] < format->plane_fmt[i].sizeimage)
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ *nplanes = format->num_planes;
+
+ for (i = 0; i < format->num_planes; ++i)
+ sizes[i] = format->plane_fmt[i].sizeimage;
+
+ return 0;
+}
+
+static int dw100_buf_prepare(struct vb2_buffer *vb)
+{
+ unsigned int i;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct dw100_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct dw100_device *dw_dev = ctx->dw_dev;
+ const struct v4l2_pix_format_mplane *pix_fmt =
+ &dw100_get_q_data(ctx, vb->vb2_queue->type)->pix_fmt;
+
+ if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
+ if (vbuf->field != V4L2_FIELD_NONE) {
+ dev_dbg(&dw_dev->pdev->dev, "%x field isn't supported\n",
+ vbuf->field);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < pix_fmt->num_planes; i++) {
+ unsigned long size = pix_fmt->plane_fmt[i].sizeimage;
+
+ if (vb2_plane_size(vb, i) < size) {
+ dev_dbg(&dw_dev->pdev->dev,
+ "User buffer too small (%lu < %lu)\n",
+ vb2_plane_size(vb, i), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb, i, size);
+ }
+
+ return 0;
+}
+
+static void dw100_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct dw100_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static void dw100_return_all_buffers(struct vb2_queue *q,
+ enum vb2_buffer_state state)
+{
+ struct dw100_ctx *ctx = vb2_get_drv_priv(q);
+ struct vb2_v4l2_buffer *vbuf;
+
+ for (;;) {
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
+ else
+ vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf)
+ return;
+ v4l2_m2m_buf_done(vbuf, state);
+ }
+}
+
+static int dw100_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct dw100_ctx *ctx = vb2_get_drv_priv(q);
+ struct dw100_q_data *q_data = dw100_get_q_data(ctx, q->type);
+ int ret;
+
+ q_data->sequence = 0;
+
+ ret = dw100_create_mapping(ctx);
+ if (ret)
+ goto err;
+
+ ret = pm_runtime_resume_and_get(&ctx->dw_dev->pdev->dev);
+ if (ret) {
+ dw100_destroy_mapping(ctx);
+ goto err;
+ }
+
+ return 0;
+err:
+ dw100_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
+ return ret;
+}
+
+static void dw100_stop_streaming(struct vb2_queue *q)
+{
+ struct dw100_ctx *ctx = vb2_get_drv_priv(q);
+
+ dw100_return_all_buffers(q, VB2_BUF_STATE_ERROR);
+
+ pm_runtime_put_sync(&ctx->dw_dev->pdev->dev);
+
+ dw100_destroy_mapping(ctx);
+}
+
+static const struct vb2_ops dw100_qops = {
+ .queue_setup = dw100_queue_setup,
+ .buf_prepare = dw100_buf_prepare,
+ .buf_queue = dw100_buf_queue,
+ .start_streaming = dw100_start_streaming,
+ .stop_streaming = dw100_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int dw100_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct dw100_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->ops = &dw100_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->vq_mutex;
+ src_vq->dev = ctx->dw_dev->v4l2_dev.dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ dst_vq->ops = &dw100_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->vq_mutex;
+ dst_vq->dev = ctx->dw_dev->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int dw100_open(struct file *file)
+{
+ struct dw100_device *dw_dev = video_drvdata(file);
+ struct dw100_ctx *ctx;
+ struct v4l2_ctrl_handler *hdl;
+ struct v4l2_pix_format_mplane *pix_fmt;
+ int ret, i;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mutex_init(&ctx->vq_mutex);
+ v4l2_fh_init(&ctx->fh, video_devdata(file));
+ file->private_data = &ctx->fh;
+ ctx->dw_dev = dw_dev;
+
+ ctx->q_data[DW100_QUEUE_SRC].fmt = &formats[0];
+
+ pix_fmt = &ctx->q_data[DW100_QUEUE_SRC].pix_fmt;
+ pix_fmt->field = V4L2_FIELD_NONE;
+ pix_fmt->colorspace = V4L2_COLORSPACE_REC709;
+ pix_fmt->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix_fmt->colorspace);
+ pix_fmt->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix_fmt->colorspace);
+ pix_fmt->quantization =
+ V4L2_MAP_QUANTIZATION_DEFAULT(false, pix_fmt->colorspace,
+ pix_fmt->ycbcr_enc);
+
+ v4l2_fill_pixfmt_mp(pix_fmt, formats[0].fourcc, DW100_DEF_W, DW100_DEF_H);
+
+ ctx->q_data[DW100_QUEUE_SRC].crop.top = 0;
+ ctx->q_data[DW100_QUEUE_SRC].crop.left = 0;
+ ctx->q_data[DW100_QUEUE_SRC].crop.width = DW100_DEF_W;
+ ctx->q_data[DW100_QUEUE_SRC].crop.height = DW100_DEF_H;
+
+ ctx->q_data[DW100_QUEUE_DST] = ctx->q_data[DW100_QUEUE_SRC];
+
+ hdl = &ctx->hdl;
+ v4l2_ctrl_handler_init(hdl, ARRAY_SIZE(controls));
+ for (i = 0; i < ARRAY_SIZE(controls); i++) {
+ ctx->ctrls[i] = v4l2_ctrl_new_custom(hdl, &controls[i], NULL);
+ if (hdl->error) {
+ dev_err(&ctx->dw_dev->pdev->dev,
+ "Adding control (%d) failed\n", i);
+ ret = hdl->error;
+ goto err;
+ }
+ }
+ ctx->fh.ctrl_handler = hdl;
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dw_dev->m2m_dev,
+ ctx, &dw100_m2m_queue_init);
+
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto err;
+ }
+
+ v4l2_fh_add(&ctx->fh);
+
+ return 0;
+
+err:
+ v4l2_ctrl_handler_free(hdl);
+ v4l2_fh_exit(&ctx->fh);
+ mutex_destroy(&ctx->vq_mutex);
+ kfree(ctx);
+
+ return ret;
+}
+
+static int dw100_release(struct file *file)
+{
+ struct dw100_ctx *ctx = dw100_file2ctx(file);
+
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->hdl);
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ mutex_destroy(&ctx->vq_mutex);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations dw100_fops = {
+ .owner = THIS_MODULE,
+ .open = dw100_open,
+ .release = dw100_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int dw100_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ strscpy(cap->driver, DRV_NAME, sizeof(cap->driver));
+ strscpy(cap->card, "DW100 dewarper", sizeof(cap->card));
+
+ return 0;
+}
+
+static int dw100_enum_fmt_vid(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ int i, num = 0;
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (formats[i].types & to_dw100_fmt_type(f->type)) {
+ if (num == f->index) {
+ f->pixelformat = formats[i].fourcc;
+ return 0;
+ }
+ ++num;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int dw100_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ const struct dw100_fmt *fmt;
+
+ if (fsize->index)
+ return -EINVAL;
+
+ fmt = dw100_find_pixel_format(fsize->pixel_format,
+ DW100_FMT_OUTPUT | DW100_FMT_CAPTURE);
+ if (!fmt)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise = dw100_frmsize_stepwise;
+
+ return 0;
+}
+
+static int dw100_g_fmt_vid(struct file *file, void *priv, struct v4l2_format *f)
+{
+ struct dw100_ctx *ctx = dw100_file2ctx(file);
+ struct vb2_queue *vq;
+ struct dw100_q_data *q_data;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = dw100_get_q_data(ctx, f->type);
+
+ f->fmt.pix_mp = q_data->pix_fmt;
+
+ return 0;
+}
+
+static int dw100_try_fmt(struct file *file, struct v4l2_format *f)
+{
+ struct dw100_ctx *ctx = dw100_file2ctx(file);
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ const struct dw100_fmt *fmt;
+
+ fmt = dw100_find_format(f);
+ if (!fmt) {
+ fmt = &formats[0];
+ pix->pixelformat = fmt->fourcc;
+ }
+
+ v4l2_apply_frmsize_constraints(&pix->width, &pix->height,
+ &dw100_frmsize_stepwise);
+
+ v4l2_fill_pixfmt_mp(pix, fmt->fourcc, pix->width, pix->height);
+
+ pix->field = V4L2_FIELD_NONE;
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ if (pix->colorspace == V4L2_COLORSPACE_DEFAULT)
+ pix->colorspace = V4L2_COLORSPACE_REC709;
+ if (pix->xfer_func == V4L2_XFER_FUNC_DEFAULT)
+ pix->xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(pix->colorspace);
+ if (pix->ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
+ pix->ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(pix->colorspace);
+ if (pix->quantization == V4L2_QUANTIZATION_DEFAULT)
+ pix->quantization =
+ V4L2_MAP_QUANTIZATION_DEFAULT(false,
+ pix->colorspace,
+ pix->ycbcr_enc);
+ } else {
+ /*
+ * The DW100 can't perform colorspace conversion, the colorspace
+ * on the capture queue must be identical to the output queue.
+ */
+ const struct dw100_q_data *q_data =
+ dw100_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ pix->colorspace = q_data->pix_fmt.colorspace;
+ pix->xfer_func = q_data->pix_fmt.xfer_func;
+ pix->ycbcr_enc = q_data->pix_fmt.ycbcr_enc;
+ pix->quantization = q_data->pix_fmt.quantization;
+ }
+
+ return 0;
+}
+
+static int dw100_s_fmt(struct dw100_ctx *ctx, struct v4l2_format *f)
+{
+ struct dw100_q_data *q_data;
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ q_data = dw100_get_q_data(ctx, f->type);
+ if (!q_data)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ dev_dbg(&ctx->dw_dev->pdev->dev, "%s queue busy\n", __func__);
+ return -EBUSY;
+ }
+
+ q_data->fmt = dw100_find_format(f);
+ q_data->pix_fmt = f->fmt.pix_mp;
+ q_data->crop.top = 0;
+ q_data->crop.left = 0;
+ q_data->crop.width = f->fmt.pix_mp.width;
+ q_data->crop.height = f->fmt.pix_mp.height;
+
+ /* Propagate buffers encoding */
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ struct dw100_q_data *dst_q_data =
+ dw100_get_q_data(ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ dst_q_data->pix_fmt.colorspace = q_data->pix_fmt.colorspace;
+ dst_q_data->pix_fmt.ycbcr_enc = q_data->pix_fmt.ycbcr_enc;
+ dst_q_data->pix_fmt.quantization = q_data->pix_fmt.quantization;
+ dst_q_data->pix_fmt.xfer_func = q_data->pix_fmt.xfer_func;
+ }
+
+ dev_dbg(&ctx->dw_dev->pdev->dev,
+ "Setting format for type %u, wxh: %ux%u, fmt: %p4cc\n",
+ f->type, q_data->pix_fmt.width, q_data->pix_fmt.height,
+ &q_data->pix_fmt.pixelformat);
+
+ if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ int ret;
+ u32 dims[V4L2_CTRL_MAX_DIMS] = {};
+ struct v4l2_ctrl *ctrl = ctx->ctrls[DW100_CTRL_DEWARPING_MAP];
+
+ dims[0] = dw100_get_n_vertices_from_length(q_data->pix_fmt.width);
+ dims[1] = dw100_get_n_vertices_from_length(q_data->pix_fmt.height);
+
+ ret = v4l2_ctrl_modify_dimensions(ctrl, dims);
+
+ if (ret) {
+ dev_err(&ctx->dw_dev->pdev->dev,
+ "Modifying LUT dimensions failed with error %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int dw100_try_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ return dw100_try_fmt(file, f);
+}
+
+static int dw100_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct dw100_ctx *ctx = dw100_file2ctx(file);
+ int ret;
+
+ ret = dw100_try_fmt_vid_cap(file, priv, f);
+ if (ret)
+ return ret;
+
+ ret = dw100_s_fmt(ctx, f);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int dw100_try_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ return dw100_try_fmt(file, f);
+}
+
+static int dw100_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct dw100_ctx *ctx = dw100_file2ctx(file);
+ int ret;
+
+ ret = dw100_try_fmt_vid_out(file, priv, f);
+ if (ret)
+ return ret;
+
+ ret = dw100_s_fmt(ctx, f);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int dw100_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct dw100_ctx *ctx = dw100_file2ctx(file);
+ struct dw100_q_data *src_q_data;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ src_q_data = dw100_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = src_q_data->pix_fmt.width;
+ sel->r.height = src_q_data->pix_fmt.height;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ sel->r.top = src_q_data->crop.top;
+ sel->r.left = src_q_data->crop.left;
+ sel->r.width = src_q_data->crop.width;
+ sel->r.height = src_q_data->crop.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int dw100_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *sel)
+{
+ struct dw100_ctx *ctx = dw100_file2ctx(file);
+ struct dw100_q_data *src_q_data;
+ u32 qscalex, qscaley, qscale;
+ int x, y, w, h;
+ unsigned int wframe, hframe;
+
+ if (sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ src_q_data = dw100_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ dev_dbg(&ctx->dw_dev->pdev->dev,
+ ">>> Buffer Type: %u Target: %u Rect: %ux%u@%d.%d\n",
+ sel->type, sel->target,
+ sel->r.width, sel->r.height, sel->r.left, sel->r.top);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ wframe = src_q_data->pix_fmt.width;
+ hframe = src_q_data->pix_fmt.height;
+
+ sel->r.top = clamp_t(int, sel->r.top, 0, hframe - DW100_MIN_H);
+ sel->r.left = clamp_t(int, sel->r.left, 0, wframe - DW100_MIN_W);
+ sel->r.height =
+ clamp(sel->r.height, DW100_MIN_H, hframe - sel->r.top);
+ sel->r.width =
+ clamp(sel->r.width, DW100_MIN_W, wframe - sel->r.left);
+
+ /* UQ16.16 for float operations */
+ qscalex = (sel->r.width << 16) / wframe;
+ qscaley = (sel->r.height << 16) / hframe;
+ y = sel->r.top;
+ x = sel->r.left;
+ if (qscalex == qscaley) {
+ qscale = qscalex;
+ } else {
+ switch (sel->flags) {
+ case 0:
+ qscale = (qscalex + qscaley) / 2;
+ break;
+ case V4L2_SEL_FLAG_GE:
+ qscale = max(qscaley, qscalex);
+ break;
+ case V4L2_SEL_FLAG_LE:
+ qscale = min(qscaley, qscalex);
+ break;
+ case V4L2_SEL_FLAG_LE | V4L2_SEL_FLAG_GE:
+ return -ERANGE;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ w = (u32)((((u64)wframe << 16) * qscale) >> 32);
+ h = (u32)((((u64)hframe << 16) * qscale) >> 32);
+ x = x + (sel->r.width - w) / 2;
+ y = y + (sel->r.height - h) / 2;
+ x = min(wframe - w, (unsigned int)max(0, x));
+ y = min(hframe - h, (unsigned int)max(0, y));
+
+ sel->r.top = y;
+ sel->r.left = x;
+ sel->r.width = w;
+ sel->r.height = h;
+
+ src_q_data->crop.top = sel->r.top;
+ src_q_data->crop.left = sel->r.left;
+ src_q_data->crop.width = sel->r.width;
+ src_q_data->crop.height = sel->r.height;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ dev_dbg(&ctx->dw_dev->pdev->dev,
+ "<<< Buffer Type: %u Target: %u Rect: %ux%u@%d.%d\n",
+ sel->type, sel->target,
+ sel->r.width, sel->r.height, sel->r.left, sel->r.top);
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops dw100_ioctl_ops = {
+ .vidioc_querycap = dw100_querycap,
+
+ .vidioc_enum_fmt_vid_cap = dw100_enum_fmt_vid,
+ .vidioc_enum_framesizes = dw100_enum_framesizes,
+ .vidioc_g_fmt_vid_cap_mplane = dw100_g_fmt_vid,
+ .vidioc_try_fmt_vid_cap_mplane = dw100_try_fmt_vid_cap,
+ .vidioc_s_fmt_vid_cap_mplane = dw100_s_fmt_vid_cap,
+
+ .vidioc_enum_fmt_vid_out = dw100_enum_fmt_vid,
+ .vidioc_g_fmt_vid_out_mplane = dw100_g_fmt_vid,
+ .vidioc_try_fmt_vid_out_mplane = dw100_try_fmt_vid_out,
+ .vidioc_s_fmt_vid_out_mplane = dw100_s_fmt_vid_out,
+
+ .vidioc_g_selection = dw100_g_selection,
+ .vidioc_s_selection = dw100_s_selection,
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+};
+
+static void dw100_job_finish(struct dw100_device *dw_dev, bool with_error)
+{
+ struct dw100_ctx *curr_ctx;
+ struct vb2_v4l2_buffer *src_vb, *dst_vb;
+ enum vb2_buffer_state buf_state;
+
+ curr_ctx = v4l2_m2m_get_curr_priv(dw_dev->m2m_dev);
+
+ if (!curr_ctx) {
+ dev_err(&dw_dev->pdev->dev,
+ "Instance released before the end of transaction\n");
+ return;
+ }
+
+ src_vb = v4l2_m2m_src_buf_remove(curr_ctx->fh.m2m_ctx);
+ dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->fh.m2m_ctx);
+
+ if (likely(!with_error))
+ buf_state = VB2_BUF_STATE_DONE;
+ else
+ buf_state = VB2_BUF_STATE_ERROR;
+
+ v4l2_m2m_buf_done(src_vb, buf_state);
+ v4l2_m2m_buf_done(dst_vb, buf_state);
+
+ dev_dbg(&dw_dev->pdev->dev, "Finishing transaction with%s error(s)\n",
+ with_error ? "" : "out");
+
+ v4l2_m2m_job_finish(dw_dev->m2m_dev, curr_ctx->fh.m2m_ctx);
+}
+
+static void dw100_hw_reset(struct dw100_device *dw_dev)
+{
+ u32 val;
+
+ val = dw100_read(dw_dev, DW100_DEWARP_CTRL);
+ val |= DW100_DEWARP_CTRL_ENABLE;
+ val |= DW100_DEWARP_CTRL_SOFT_RESET;
+ dw100_write(dw_dev, DW100_DEWARP_CTRL, val);
+ val &= ~DW100_DEWARP_CTRL_SOFT_RESET;
+ dw100_write(dw_dev, DW100_DEWARP_CTRL, val);
+}
+
+static void _dw100_hw_set_master_bus_enable(struct dw100_device *dw_dev,
+ unsigned int enable)
+{
+ u32 val;
+
+ dev_dbg(&dw_dev->pdev->dev, "%sable master bus\n",
+ enable ? "En" : "Dis");
+
+ val = dw100_read(dw_dev, DW100_BUS_CTRL);
+
+ if (enable)
+ val |= DW100_BUS_CTRL_AXI_MASTER_ENABLE;
+ else
+ val &= ~DW100_BUS_CTRL_AXI_MASTER_ENABLE;
+
+ dw100_write(dw_dev, DW100_BUS_CTRL, val);
+}
+
+static void dw100_hw_master_bus_enable(struct dw100_device *dw_dev)
+{
+ _dw100_hw_set_master_bus_enable(dw_dev, 1);
+}
+
+static void dw100_hw_master_bus_disable(struct dw100_device *dw_dev)
+{
+ _dw100_hw_set_master_bus_enable(dw_dev, 0);
+}
+
+static void dw100_hw_dewarp_start(struct dw100_device *dw_dev)
+{
+ u32 val;
+
+ val = dw100_read(dw_dev, DW100_DEWARP_CTRL);
+
+ dev_dbg(&dw_dev->pdev->dev, "Starting Hardware CTRL:0x%08x\n", val);
+ dw100_write(dw_dev, DW100_DEWARP_CTRL, val | DW100_DEWARP_CTRL_START);
+ dw100_write(dw_dev, DW100_DEWARP_CTRL, val);
+}
+
+static void dw100_hw_init_ctrl(struct dw100_device *dw_dev)
+{
+ u32 val;
+ /*
+ * Input format YUV422_SP
+ * Output format YUV422_SP
+ * No hardware handshake (SW)
+ * No automatic double src buffering (Single)
+ * No automatic double dst buffering (Single)
+ * No Black Line
+ * Prefetch image pixel traversal
+ */
+
+ val = DW100_DEWARP_CTRL_ENABLE
+ /* Valid only for auto prefetch mode*/
+ | DW100_DEWARP_CTRL_PREFETCH_THRESHOLD(32);
+
+ /*
+ * Calculation mode required to support any scaling factor,
+ * but x4 slower than traversal mode.
+ *
+ * DW100_DEWARP_CTRL_PREFETCH_MODE_TRAVERSAL
+ * DW100_DEWARP_CTRL_PREFETCH_MODE_CALCULATION
+ * DW100_DEWARP_CTRL_PREFETCH_MODE_AUTO
+ *
+ * TODO: Find heuristics requiring calculation mode
+ */
+ val |= DW100_DEWARP_CTRL_PREFETCH_MODE_CALCULATION;
+
+ dw100_write(dw_dev, DW100_DEWARP_CTRL, val);
+}
+
+static void dw100_hw_set_pixel_boundary(struct dw100_device *dw_dev)
+{
+ u32 val;
+
+ val = DW100_BOUNDARY_PIXEL_V(128)
+ | DW100_BOUNDARY_PIXEL_U(128)
+ | DW100_BOUNDARY_PIXEL_Y(0);
+
+ dw100_write(dw_dev, DW100_BOUNDARY_PIXEL, val);
+}
+
+static void dw100_hw_set_scale(struct dw100_device *dw_dev, u8 scale)
+{
+ dev_dbg(&dw_dev->pdev->dev, "Setting scale factor to %u\n", scale);
+
+ dw100_write(dw_dev, DW100_SCALE_FACTOR, scale);
+}
+
+static void dw100_hw_set_roi(struct dw100_device *dw_dev, u32 x, u32 y)
+{
+ u32 val;
+
+ dev_dbg(&dw_dev->pdev->dev, "Setting ROI region to %u.%u\n", x, y);
+
+ val = DW100_ROI_START_X(x) | DW100_ROI_START_Y(y);
+
+ dw100_write(dw_dev, DW100_ROI_START, val);
+}
+
+static void dw100_hw_set_src_crop(struct dw100_device *dw_dev,
+ const struct dw100_q_data *src_q_data,
+ const struct dw100_q_data *dst_q_data)
+{
+ const struct v4l2_rect *rect = &src_q_data->crop;
+ u32 src_scale, qscale, left_scale, top_scale;
+
+ /* HW Scale is UQ1.7 encoded */
+ src_scale = (rect->width << 7) / src_q_data->pix_fmt.width;
+ dw100_hw_set_scale(dw_dev, src_scale);
+
+ qscale = (dst_q_data->pix_fmt.width << 7) / src_q_data->pix_fmt.width;
+
+ left_scale = ((rect->left << 7) * qscale) >> 14;
+ top_scale = ((rect->top << 7) * qscale) >> 14;
+
+ dw100_hw_set_roi(dw_dev, left_scale, top_scale);
+}
+
+static void dw100_hw_set_source(struct dw100_device *dw_dev,
+ const struct dw100_q_data *q_data,
+ struct vb2_buffer *buffer)
+{
+ u32 width, height, stride, fourcc, val;
+ const struct dw100_fmt *fmt = q_data->fmt;
+ dma_addr_t addr_y = vb2_dma_contig_plane_dma_addr(buffer, 0);
+ dma_addr_t addr_uv;
+
+ width = q_data->pix_fmt.width;
+ height = q_data->pix_fmt.height;
+ stride = q_data->pix_fmt.plane_fmt[0].bytesperline;
+ fourcc = q_data->fmt->fourcc;
+
+ if (q_data->pix_fmt.num_planes == 2)
+ addr_uv = vb2_dma_contig_plane_dma_addr(buffer, 1);
+ else
+ addr_uv = addr_y + (stride * height);
+
+ dev_dbg(&dw_dev->pdev->dev,
+ "Set HW source registers for %ux%u - stride %u, pixfmt: %p4cc, dma:%pad\n",
+ width, height, stride, &fourcc, &addr_y);
+
+ /* Pixel Format */
+ val = dw100_read(dw_dev, DW100_DEWARP_CTRL);
+
+ val &= ~DW100_DEWARP_CTRL_INPUT_FORMAT_MASK;
+ val |= DW100_DEWARP_CTRL_INPUT_FORMAT(fmt->reg_format);
+
+ dw100_write(dw_dev, DW100_DEWARP_CTRL, val);
+
+ /* Swap */
+ val = dw100_read(dw_dev, DW100_SWAP_CONTROL);
+
+ val &= ~DW100_SWAP_CONTROL_SRC_MASK;
+ /*
+ * Data swapping is performed only on Y plane for source image.
+ */
+ if (fmt->reg_swap_uv &&
+ fmt->reg_format == DW100_DEWARP_CTRL_FORMAT_YUV422_PACKED)
+ val |= DW100_SWAP_CONTROL_SRC(DW100_SWAP_CONTROL_Y
+ (DW100_SWAP_CONTROL_BYTE));
+
+ dw100_write(dw_dev, DW100_SWAP_CONTROL, val);
+
+ /* Image resolution */
+ dw100_write(dw_dev, DW100_SRC_IMG_SIZE,
+ DW100_IMG_SIZE_WIDTH(width) | DW100_IMG_SIZE_HEIGHT(height));
+
+ dw100_write(dw_dev, DW100_SRC_IMG_STRIDE, stride);
+
+ /* Buffers */
+ dw100_write(dw_dev, DW100_SRC_IMG_Y_BASE, DW100_IMG_Y_BASE(addr_y));
+ dw100_write(dw_dev, DW100_SRC_IMG_UV_BASE, DW100_IMG_UV_BASE(addr_uv));
+}
+
+static void dw100_hw_set_destination(struct dw100_device *dw_dev,
+ const struct dw100_q_data *q_data,
+ const struct dw100_fmt *ifmt,
+ struct vb2_buffer *buffer)
+{
+ u32 width, height, stride, fourcc, val, size_y, size_uv;
+ const struct dw100_fmt *fmt = q_data->fmt;
+ dma_addr_t addr_y, addr_uv;
+
+ width = q_data->pix_fmt.width;
+ height = q_data->pix_fmt.height;
+ stride = q_data->pix_fmt.plane_fmt[0].bytesperline;
+ fourcc = fmt->fourcc;
+
+ addr_y = vb2_dma_contig_plane_dma_addr(buffer, 0);
+ size_y = q_data->pix_fmt.plane_fmt[0].sizeimage;
+
+ if (q_data->pix_fmt.num_planes == 2) {
+ addr_uv = vb2_dma_contig_plane_dma_addr(buffer, 1);
+ size_uv = q_data->pix_fmt.plane_fmt[1].sizeimage;
+ } else {
+ addr_uv = addr_y + ALIGN(stride * height, 16);
+ size_uv = size_y;
+ if (fmt->reg_format == DW100_DEWARP_CTRL_FORMAT_YUV420_SP)
+ size_uv /= 2;
+ }
+
+ dev_dbg(&dw_dev->pdev->dev,
+ "Set HW source registers for %ux%u - stride %u, pixfmt: %p4cc, dma:%pad\n",
+ width, height, stride, &fourcc, &addr_y);
+
+ /* Pixel Format */
+ val = dw100_read(dw_dev, DW100_DEWARP_CTRL);
+
+ val &= ~DW100_DEWARP_CTRL_OUTPUT_FORMAT_MASK;
+ val |= DW100_DEWARP_CTRL_OUTPUT_FORMAT(fmt->reg_format);
+
+ dw100_write(dw_dev, DW100_DEWARP_CTRL, val);
+
+ /* Swap */
+ val = dw100_read(dw_dev, DW100_SWAP_CONTROL);
+
+ val &= ~DW100_SWAP_CONTROL_DST_MASK;
+
+ /*
+ * Avoid to swap twice
+ */
+ if (fmt->reg_swap_uv ^
+ (ifmt->reg_swap_uv && ifmt->reg_format !=
+ DW100_DEWARP_CTRL_FORMAT_YUV422_PACKED)) {
+ if (fmt->reg_format == DW100_DEWARP_CTRL_FORMAT_YUV422_PACKED)
+ val |= DW100_SWAP_CONTROL_DST(DW100_SWAP_CONTROL_Y
+ (DW100_SWAP_CONTROL_BYTE));
+ else
+ val |= DW100_SWAP_CONTROL_DST(DW100_SWAP_CONTROL_UV
+ (DW100_SWAP_CONTROL_BYTE));
+ }
+
+ dw100_write(dw_dev, DW100_SWAP_CONTROL, val);
+
+ /* Image resolution */
+ dw100_write(dw_dev, DW100_DST_IMG_SIZE,
+ DW100_IMG_SIZE_WIDTH(width) | DW100_IMG_SIZE_HEIGHT(height));
+ dw100_write(dw_dev, DW100_DST_IMG_STRIDE, stride);
+ dw100_write(dw_dev, DW100_DST_IMG_Y_BASE, DW100_IMG_Y_BASE(addr_y));
+ dw100_write(dw_dev, DW100_DST_IMG_UV_BASE, DW100_IMG_UV_BASE(addr_uv));
+ dw100_write(dw_dev, DW100_DST_IMG_Y_SIZE1, DW100_DST_IMG_Y_SIZE(size_y));
+ dw100_write(dw_dev, DW100_DST_IMG_UV_SIZE1,
+ DW100_DST_IMG_UV_SIZE(size_uv));
+}
+
+static void dw100_hw_set_mapping(struct dw100_device *dw_dev, dma_addr_t addr,
+ u32 width, u32 height)
+{
+ dev_dbg(&dw_dev->pdev->dev,
+ "Set HW mapping registers for %ux%u addr:%pad",
+ width, height, &addr);
+
+ dw100_write(dw_dev, DW100_MAP_LUT_ADDR, DW100_MAP_LUT_ADDR_ADDR(addr));
+ dw100_write(dw_dev, DW100_MAP_LUT_SIZE, DW100_MAP_LUT_SIZE_WIDTH(width)
+ | DW100_MAP_LUT_SIZE_HEIGHT(height));
+}
+
+static void dw100_hw_clear_irq(struct dw100_device *dw_dev, unsigned int irq)
+{
+ dw100_write(dw_dev, DW100_INTERRUPT_STATUS,
+ DW100_INTERRUPT_STATUS_INT_CLEAR(irq));
+}
+
+static void dw100_hw_enable_irq(struct dw100_device *dw_dev)
+{
+ dw100_write(dw_dev, DW100_INTERRUPT_STATUS,
+ DW100_INTERRUPT_STATUS_INT_ENABLE_MASK);
+}
+
+static void dw100_hw_disable_irq(struct dw100_device *dw_dev)
+{
+ dw100_write(dw_dev, DW100_INTERRUPT_STATUS, 0);
+}
+
+static u32 dw_hw_get_pending_irqs(struct dw100_device *dw_dev)
+{
+ u32 val;
+
+ val = dw100_read(dw_dev, DW100_INTERRUPT_STATUS);
+
+ return DW100_INTERRUPT_STATUS_INT_STATUS(val);
+}
+
+static irqreturn_t dw100_irq_handler(int irq, void *dev_id)
+{
+ struct dw100_device *dw_dev = dev_id;
+ u32 pending_irqs, err_irqs, frame_done_irq;
+ bool with_error = true;
+
+ pending_irqs = dw_hw_get_pending_irqs(dw_dev);
+ frame_done_irq = pending_irqs & DW100_INTERRUPT_STATUS_INT_FRAME_DONE;
+ err_irqs = DW100_INTERRUPT_STATUS_INT_ERR_STATUS(pending_irqs);
+
+ if (frame_done_irq) {
+ dev_dbg(&dw_dev->pdev->dev, "Frame done interrupt\n");
+ with_error = false;
+ err_irqs &= ~DW100_INTERRUPT_STATUS_INT_ERR_STATUS
+ (DW100_INTERRUPT_STATUS_INT_ERR_FRAME_DONE);
+ }
+
+ if (err_irqs)
+ dev_err(&dw_dev->pdev->dev, "Interrupt error: %#x\n", err_irqs);
+
+ dw100_hw_disable_irq(dw_dev);
+ dw100_hw_master_bus_disable(dw_dev);
+ dw100_hw_clear_irq(dw_dev, pending_irqs |
+ DW100_INTERRUPT_STATUS_INT_ERR_TIME_OUT);
+
+ dw100_job_finish(dw_dev, with_error);
+
+ return IRQ_HANDLED;
+}
+
+static void dw100_start(struct dw100_ctx *ctx, struct vb2_v4l2_buffer *in_vb,
+ struct vb2_v4l2_buffer *out_vb)
+{
+ struct dw100_device *dw_dev = ctx->dw_dev;
+
+ out_vb->sequence =
+ dw100_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)->sequence++;
+ in_vb->sequence =
+ dw100_get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)->sequence++;
+
+ dev_dbg(&ctx->dw_dev->pdev->dev,
+ "Starting queues %p->%p, sequence %u->%u\n",
+ v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE),
+ v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE),
+ in_vb->sequence, out_vb->sequence);
+
+ v4l2_m2m_buf_copy_metadata(in_vb, out_vb, true);
+
+ /* Now, let's deal with hardware ... */
+ dw100_hw_master_bus_disable(dw_dev);
+ dw100_hw_init_ctrl(dw_dev);
+ dw100_hw_set_pixel_boundary(dw_dev);
+ dw100_hw_set_src_crop(dw_dev, &ctx->q_data[DW100_QUEUE_SRC],
+ &ctx->q_data[DW100_QUEUE_DST]);
+ dw100_hw_set_source(dw_dev, &ctx->q_data[DW100_QUEUE_SRC],
+ &in_vb->vb2_buf);
+ dw100_hw_set_destination(dw_dev, &ctx->q_data[DW100_QUEUE_DST],
+ ctx->q_data[DW100_QUEUE_SRC].fmt,
+ &out_vb->vb2_buf);
+ dw100_hw_set_mapping(dw_dev, ctx->map_dma,
+ ctx->map_width, ctx->map_height);
+ dw100_hw_enable_irq(dw_dev);
+ dw100_hw_dewarp_start(dw_dev);
+
+ /* Enable Bus */
+ dw100_hw_master_bus_enable(dw_dev);
+}
+
+static void dw100_device_run(void *priv)
+{
+ struct dw100_ctx *ctx = priv;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ dw100_start(ctx, src_buf, dst_buf);
+}
+
+static const struct v4l2_m2m_ops dw100_m2m_ops = {
+ .device_run = dw100_device_run,
+};
+
+static struct video_device *dw100_init_video_device(struct dw100_device *dw_dev)
+{
+ struct video_device *vfd = &dw_dev->vfd;
+
+ vfd->vfl_dir = VFL_DIR_M2M;
+ vfd->fops = &dw100_fops;
+ vfd->device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING;
+ vfd->ioctl_ops = &dw100_ioctl_ops;
+ vfd->minor = -1;
+ vfd->release = video_device_release_empty;
+ vfd->v4l2_dev = &dw_dev->v4l2_dev;
+ vfd->lock = &dw_dev->vfd_mutex;
+
+ strscpy(vfd->name, DRV_NAME, sizeof(vfd->name));
+ mutex_init(vfd->lock);
+ video_set_drvdata(vfd, dw_dev);
+
+ return vfd;
+}
+
+static int dw100_dump_regs_show(struct seq_file *m, void *private)
+{
+ struct dw100_device *dw_dev = m->private;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(&dw_dev->pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = dw100_dump_regs(m);
+
+ pm_runtime_put_sync(&dw_dev->pdev->dev);
+
+ return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(dw100_dump_regs);
+
+static void dw100_debugfs_init(struct dw100_device *dw_dev)
+{
+ dw_dev->debugfs_root =
+ debugfs_create_dir(dev_name(&dw_dev->pdev->dev), NULL);
+
+ debugfs_create_file("dump_regs", 0600, dw_dev->debugfs_root, dw_dev,
+ &dw100_dump_regs_fops);
+}
+
+static void dw100_debugfs_exit(struct dw100_device *dw_dev)
+{
+ debugfs_remove_recursive(dw_dev->debugfs_root);
+}
+
+static int dw100_probe(struct platform_device *pdev)
+{
+ struct dw100_device *dw_dev;
+ struct video_device *vfd;
+ struct resource *res;
+ int ret, irq;
+
+ dw_dev = devm_kzalloc(&pdev->dev, sizeof(*dw_dev), GFP_KERNEL);
+ if (!dw_dev)
+ return -ENOMEM;
+ dw_dev->pdev = pdev;
+
+ ret = devm_clk_bulk_get_all(&pdev->dev, &dw_dev->clks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to get clocks: %d\n", ret);
+ return ret;
+ }
+ dw_dev->num_clks = ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dw_dev->mmio = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dw_dev->mmio))
+ return PTR_ERR(dw_dev->mmio);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ platform_set_drvdata(pdev, dw_dev);
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to resume the device: %d\n", ret);
+ goto err_pm;
+ }
+
+ pm_runtime_put_sync(&pdev->dev);
+
+ ret = devm_request_irq(&pdev->dev, irq, dw100_irq_handler, IRQF_ONESHOT,
+ dev_name(&pdev->dev), dw_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to request irq: %d\n", ret);
+ return ret;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &dw_dev->v4l2_dev);
+ if (ret)
+ goto err_pm;
+
+ vfd = dw100_init_video_device(dw_dev);
+
+ dw_dev->m2m_dev = v4l2_m2m_init(&dw100_m2m_ops);
+ if (IS_ERR(dw_dev->m2m_dev)) {
+ dev_err(&pdev->dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(dw_dev->m2m_dev);
+ goto err_v4l2;
+ }
+
+ dw_dev->mdev.dev = &pdev->dev;
+ strscpy(dw_dev->mdev.model, "dw100", sizeof(dw_dev->mdev.model));
+ media_device_init(&dw_dev->mdev);
+ dw_dev->v4l2_dev.mdev = &dw_dev->mdev;
+
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register video device\n");
+ goto err_m2m;
+ }
+
+ ret = v4l2_m2m_register_media_controller(dw_dev->m2m_dev, vfd,
+ MEDIA_ENT_F_PROC_VIDEO_SCALER);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init mem2mem media controller\n");
+ goto error_v4l2;
+ }
+
+ ret = media_device_register(&dw_dev->mdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register mem2mem media device\n");
+ goto error_m2m_mc;
+ }
+
+ dw100_debugfs_init(dw_dev);
+
+ dev_info(&pdev->dev,
+ "dw100 v4l2 m2m registered as /dev/video%u\n", vfd->num);
+
+ return 0;
+
+error_m2m_mc:
+ v4l2_m2m_unregister_media_controller(dw_dev->m2m_dev);
+error_v4l2:
+ video_unregister_device(vfd);
+err_m2m:
+ media_device_cleanup(&dw_dev->mdev);
+ v4l2_m2m_release(dw_dev->m2m_dev);
+err_v4l2:
+ v4l2_device_unregister(&dw_dev->v4l2_dev);
+err_pm:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static int dw100_remove(struct platform_device *pdev)
+{
+ struct dw100_device *dw_dev = platform_get_drvdata(pdev);
+
+ dw100_debugfs_exit(dw_dev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ media_device_unregister(&dw_dev->mdev);
+ v4l2_m2m_unregister_media_controller(dw_dev->m2m_dev);
+ media_device_cleanup(&dw_dev->mdev);
+
+ video_unregister_device(&dw_dev->vfd);
+ mutex_destroy(dw_dev->vfd.lock);
+ v4l2_m2m_release(dw_dev->m2m_dev);
+ v4l2_device_unregister(&dw_dev->v4l2_dev);
+
+ return 0;
+}
+
+static int __maybe_unused dw100_runtime_suspend(struct device *dev)
+{
+ struct dw100_device *dw_dev = dev_get_drvdata(dev);
+
+ clk_bulk_disable_unprepare(dw_dev->num_clks, dw_dev->clks);
+
+ return 0;
+}
+
+static int __maybe_unused dw100_runtime_resume(struct device *dev)
+{
+ int ret;
+ struct dw100_device *dw_dev = dev_get_drvdata(dev);
+
+ ret = clk_bulk_prepare_enable(dw_dev->num_clks, dw_dev->clks);
+
+ if (ret)
+ return ret;
+
+ dw100_hw_reset(dw_dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops dw100_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(dw100_runtime_suspend,
+ dw100_runtime_resume, NULL)
+};
+
+static const struct of_device_id dw100_dt_ids[] = {
+ { .compatible = "nxp,imx8mp-dw100", .data = NULL },
+ { },
+};
+MODULE_DEVICE_TABLE(of, dw100_dt_ids);
+
+static struct platform_driver dw100_driver = {
+ .probe = dw100_probe,
+ .remove = dw100_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = &dw100_pm,
+ .of_match_table = dw100_dt_ids,
+ },
+};
+
+module_platform_driver(dw100_driver);
+
+MODULE_DESCRIPTION("DW100 Hardware dewarper");
+MODULE_AUTHOR("Xavier Roumegue <Xavier.Roumegue@oss.nxp.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/nxp/dw100/dw100_regs.h b/drivers/media/platform/nxp/dw100/dw100_regs.h
new file mode 100644
index 000000000000..e85dfeff9056
--- /dev/null
+++ b/drivers/media/platform/nxp/dw100/dw100_regs.h
@@ -0,0 +1,117 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * DW100 Hardware dewarper
+ *
+ * Copyright 2022 NXP
+ * Author: Xavier Roumegue (xavier.roumegue@oss.nxp.com)
+ */
+
+#ifndef _DW100_REGS_H_
+#define _DW100_REGS_H_
+
+/* AHB register offset */
+#define DW100_DEWARP_ID 0x00
+#define DW100_DEWARP_CTRL 0x04
+#define DW100_DEWARP_CTRL_ENABLE BIT(0)
+#define DW100_DEWARP_CTRL_START BIT(1)
+#define DW100_DEWARP_CTRL_SOFT_RESET BIT(2)
+#define DW100_DEWARP_CTRL_FORMAT_YUV422_SP 0UL
+#define DW100_DEWARP_CTRL_FORMAT_YUV422_PACKED 1UL
+#define DW100_DEWARP_CTRL_FORMAT_YUV420_SP 2UL
+#define DW100_DEWARP_CTRL_INPUT_FORMAT_MASK GENMASK(5, 4)
+#define DW100_DEWARP_CTRL_INPUT_FORMAT(x) ((x) << 4)
+#define DW100_DEWARP_CTRL_OUTPUT_FORMAT(x) ((x) << 6)
+#define DW100_DEWARP_CTRL_OUTPUT_FORMAT_MASK GENMASK(7, 6)
+#define DW100_DEWARP_CTRL_SRC_AUTO_SHADOW BIT(8)
+#define DW100_DEWARP_CTRL_HW_HANDSHAKE BIT(9)
+#define DW100_DEWARP_CTRL_DST_AUTO_SHADOW BIT(10)
+#define DW100_DEWARP_CTRL_SPLIT_LINE BIT(11)
+#define DW100_DEWARP_CTRL_PREFETCH_MODE_MASK GENMASK(17, 16)
+#define DW100_DEWARP_CTRL_PREFETCH_MODE_TRAVERSAL (0UL << 16)
+#define DW100_DEWARP_CTRL_PREFETCH_MODE_CALCULATION (1UL << 16)
+#define DW100_DEWARP_CTRL_PREFETCH_MODE_AUTO (2UL << 16)
+#define DW100_DEWARP_CTRL_PREFETCH_THRESHOLD_MASK GENMASK(24, 18)
+#define DW100_DEWARP_CTRL_PREFETCH_THRESHOLD(x) ((x) << 18)
+
+#define DW100_MAP_LUT_ADDR 0x08
+#define DW100_MAP_LUT_ADDR_ADDR(addr) (((addr) >> 4) & GENMASK(29, 0))
+#define DW100_MAP_LUT_SIZE 0x0c
+#define DW100_MAP_LUT_SIZE_WIDTH(w) (((w) & GENMASK(10, 0)) << 0)
+#define DW100_MAP_LUT_SIZE_HEIGHT(h) (((h) & GENMASK(10, 0)) << 16)
+#define DW100_SRC_IMG_Y_BASE 0x10
+#define DW100_IMG_Y_BASE(base) (((base) >> 4) & GENMASK(29, 0))
+#define DW100_SRC_IMG_UV_BASE 0x14
+#define DW100_IMG_UV_BASE(base) (((base) >> 4) & GENMASK(29, 0))
+#define DW100_SRC_IMG_SIZE 0x18
+#define DW100_IMG_SIZE_WIDTH(w) (((w) & GENMASK(12, 0)) << 0)
+#define DW100_IMG_SIZE_HEIGHT(h) (((h) & GENMASK(12, 0)) << 16)
+
+#define DW100_SRC_IMG_STRIDE 0x1c
+#define DW100_MAP_LUT_ADDR2 0x20
+#define DW100_MAP_LUT_SIZE2 0x24
+#define DW100_SRC_IMG_Y_BASE2 0x28
+#define DW100_SRC_IMG_UV_BASE2 0x2c
+#define DW100_SRC_IMG_SIZE2 0x30
+#define DW100_SRC_IMG_STRIDE2 0x34
+#define DW100_DST_IMG_Y_BASE 0x38
+#define DW100_DST_IMG_UV_BASE 0x3c
+#define DW100_DST_IMG_SIZE 0x40
+#define DW100_DST_IMG_STRIDE 0x44
+#define DW100_DST_IMG_Y_BASE2 0x48
+#define DW100_DST_IMG_UV_BASE2 0x4c
+#define DW100_DST_IMG_SIZE2 0x50
+#define DW100_DST_IMG_STRIDE2 0x54
+#define DW100_SWAP_CONTROL 0x58
+#define DW100_SWAP_CONTROL_BYTE BIT(0)
+#define DW100_SWAP_CONTROL_SHORT BIT(1)
+#define DW100_SWAP_CONTROL_WORD BIT(2)
+#define DW100_SWAP_CONTROL_LONG BIT(3)
+#define DW100_SWAP_CONTROL_Y(x) (((x) & GENMASK(3, 0)) << 0)
+#define DW100_SWAP_CONTROL_UV(x) (((x) & GENMASK(3, 0)) << 4)
+#define DW100_SWAP_CONTROL_SRC(x) (((x) & GENMASK(7, 0)) << 0)
+#define DW100_SWAP_CONTROL_DST(x) (((x) & GENMASK(7, 0)) << 8)
+#define DW100_SWAP_CONTROL_SRC2(x) (((x) & GENMASK(7, 0)) << 16)
+#define DW100_SWAP_CONTROL_DST2(x) (((x) & GENMASK(7, 0)) << 24)
+#define DW100_SWAP_CONTROL_SRC_MASK GENMASK(7, 0)
+#define DW100_SWAP_CONTROL_DST_MASK GENMASK(15, 8)
+#define DW100_SWAP_CONTROL_SRC2_MASK GENMASK(23, 16)
+#define DW100_SWAP_CONTROL_DST2_MASK GENMASK(31, 24)
+#define DW100_VERTICAL_SPLIT_LINE 0x5c
+#define DW100_HORIZON_SPLIT_LINE 0x60
+#define DW100_SCALE_FACTOR 0x64
+#define DW100_ROI_START 0x68
+#define DW100_ROI_START_X(x) (((x) & GENMASK(12, 0)) << 0)
+#define DW100_ROI_START_Y(y) (((y) & GENMASK(12, 0)) << 16)
+#define DW100_BOUNDARY_PIXEL 0x6c
+#define DW100_BOUNDARY_PIXEL_V(v) (((v) & GENMASK(7, 0)) << 0)
+#define DW100_BOUNDARY_PIXEL_U(u) (((u) & GENMASK(7, 0)) << 8)
+#define DW100_BOUNDARY_PIXEL_Y(y) (((y) & GENMASK(7, 0)) << 16)
+
+#define DW100_INTERRUPT_STATUS 0x70
+#define DW100_INTERRUPT_STATUS_INT_FRAME_DONE BIT(0)
+#define DW100_INTERRUPT_STATUS_INT_ERR_TIME_OUT BIT(1)
+#define DW100_INTERRUPT_STATUS_INT_ERR_AXI_RESP BIT(2)
+#define DW100_INTERRUPT_STATUS_INT_ERR_X BIT(3)
+#define DW100_INTERRUPT_STATUS_INT_ERR_MB_FETCH BIT(4)
+#define DW100_INTERRUPT_STATUS_INT_ERR_FRAME2 BIT(5)
+#define DW100_INTERRUPT_STATUS_INT_ERR_FRAME3 BIT(6)
+#define DW100_INTERRUPT_STATUS_INT_ERR_FRAME_DONE BIT(7)
+#define DW100_INTERRUPT_STATUS_INT_ERR_STATUS(x) (((x) >> 1) & 0x7f)
+#define DW100_INTERRUPT_STATUS_INT_STATUS(x) ((x) & 0xff)
+
+#define DW100_INTERRUPT_STATUS_INT_ENABLE_MASK GENMASK(15, 8)
+#define DW100_INTERRUPT_STATUS_INT_ENABLE(x) (((x) & GENMASK(7, 0)) << 8)
+#define DW100_INTERRUPT_STATUS_FRAME_BUSY BIT(16)
+#define DW100_INTERRUPT_STATUS_INT_CLEAR(x) (((x) & GENMASK(7, 0)) << 24)
+#define DW100_BUS_CTRL 0x74
+#define DW100_BUS_CTRL_AXI_MASTER_ENABLE BIT(31)
+#define DW100_BUS_CTRL1 0x78
+#define DW100_BUS_TIME_OUT_CYCLE 0x7c
+#define DW100_DST_IMG_Y_SIZE1 0x80
+#define DW100_DST_IMG_Y_SIZE(sz) (((sz) >> 4) & GENMASK(29, 0))
+#define DW100_DST_IMG_UV_SIZE(sz) (((sz) >> 4) & GENMASK(29, 0))
+#define DW100_DST_IMG_UV_SIZE1 0x84
+#define DW100_DST_IMG_Y_SIZE2 0x88
+#define DW100_DST_IMG_UV_SIZE2 0x8c
+
+#endif /* _DW100_REGS_H_ */
diff --git a/drivers/media/platform/nxp/fsl-viu.c b/drivers/media/platform/nxp/fsl-viu.c
deleted file mode 100644
index afc96f6db2a1..000000000000
--- a/drivers/media/platform/nxp/fsl-viu.c
+++ /dev/null
@@ -1,1599 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * Freescale VIU video driver
- *
- * Authors: Hongjun Chen <hong-jun.chen@freescale.com>
- * Porting to 2.6.35 by DENX Software Engineering,
- * Anatolij Gustschin <agust@denx.de>
- */
-
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <linux/kernel.h>
-#include <linux/i2c.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
-#include <linux/slab.h>
-#include <media/v4l2-common.h>
-#include <media/v4l2-device.h>
-#include <media/v4l2-ioctl.h>
-#include <media/v4l2-ctrls.h>
-#include <media/v4l2-fh.h>
-#include <media/v4l2-event.h>
-#include <media/videobuf-dma-contig.h>
-
-#define DRV_NAME "fsl_viu"
-#define VIU_VERSION "0.5.1"
-
-#define BUFFER_TIMEOUT msecs_to_jiffies(500) /* 0.5 seconds */
-
-#define VIU_VID_MEM_LIMIT 4 /* Video memory limit, in Mb */
-
-/* I2C address of video decoder chip is 0x4A */
-#define VIU_VIDEO_DECODER_ADDR 0x25
-
-static int info_level;
-
-#define dprintk(level, fmt, arg...) \
- do { \
- if (level <= info_level) \
- printk(KERN_DEBUG "viu: " fmt , ## arg); \
- } while (0)
-
-/*
- * Basic structures
- */
-struct viu_fmt {
- u32 fourcc; /* v4l2 format id */
- u32 pixelformat;
- int depth;
-};
-
-static struct viu_fmt formats[] = {
- {
- .fourcc = V4L2_PIX_FMT_RGB565,
- .pixelformat = V4L2_PIX_FMT_RGB565,
- .depth = 16,
- }, {
- .fourcc = V4L2_PIX_FMT_RGB32,
- .pixelformat = V4L2_PIX_FMT_RGB32,
- .depth = 32,
- }
-};
-
-struct viu_dev;
-struct viu_buf;
-
-/* buffer for one video frame */
-struct viu_buf {
- /* common v4l buffer stuff -- must be first */
- struct videobuf_buffer vb;
- struct viu_fmt *fmt;
-};
-
-struct viu_dmaqueue {
- struct viu_dev *dev;
- struct list_head active;
- struct list_head queued;
- struct timer_list timeout;
-};
-
-struct viu_status {
- u32 field_irq;
- u32 vsync_irq;
- u32 hsync_irq;
- u32 vstart_irq;
- u32 dma_end_irq;
- u32 error_irq;
-};
-
-struct viu_reg {
- u32 status_cfg;
- u32 luminance;
- u32 chroma_r;
- u32 chroma_g;
- u32 chroma_b;
- u32 field_base_addr;
- u32 dma_inc;
- u32 picture_count;
- u32 req_alarm;
- u32 alpha;
-} __attribute__ ((packed));
-
-struct viu_dev {
- struct v4l2_device v4l2_dev;
- struct v4l2_ctrl_handler hdl;
- struct mutex lock;
- spinlock_t slock;
- int users;
-
- struct device *dev;
- /* various device info */
- struct video_device *vdev;
- struct viu_dmaqueue vidq;
- enum v4l2_field capfield;
- int field;
- int first;
- int dma_done;
-
- /* Hardware register area */
- struct viu_reg __iomem *vr;
-
- /* Interrupt vector */
- int irq;
- struct viu_status irqs;
-
- /* video overlay */
- struct v4l2_framebuffer ovbuf;
- struct viu_fmt *ovfmt;
- unsigned int ovenable;
- enum v4l2_field ovfield;
-
- /* crop */
- struct v4l2_rect crop_current;
-
- /* clock pointer */
- struct clk *clk;
-
- /* decoder */
- struct v4l2_subdev *decoder;
-
- v4l2_std_id std;
-};
-
-struct viu_fh {
- /* must remain the first field of this struct */
- struct v4l2_fh fh;
- struct viu_dev *dev;
-
- /* video capture */
- struct videobuf_queue vb_vidq;
- spinlock_t vbq_lock; /* spinlock for the videobuf queue */
-
- /* video overlay */
- struct v4l2_window win;
- struct v4l2_clip clips[1];
-
- /* video capture */
- struct viu_fmt *fmt;
- int width, height, sizeimage;
- enum v4l2_buf_type type;
-};
-
-static struct viu_reg reg_val;
-
-/*
- * Macro definitions of VIU registers
- */
-
-/* STATUS_CONFIG register */
-enum status_config {
- SOFT_RST = 1 << 0,
-
- ERR_MASK = 0x0f << 4, /* Error code mask */
- ERR_NO = 0x00, /* No error */
- ERR_DMA_V = 0x01 << 4, /* DMA in vertical active */
- ERR_DMA_VB = 0x02 << 4, /* DMA in vertical blanking */
- ERR_LINE_TOO_LONG = 0x04 << 4, /* Line too long */
- ERR_TOO_MANG_LINES = 0x05 << 4, /* Too many lines in field */
- ERR_LINE_TOO_SHORT = 0x06 << 4, /* Line too short */
- ERR_NOT_ENOUGH_LINE = 0x07 << 4, /* Not enough lines in field */
- ERR_FIFO_OVERFLOW = 0x08 << 4, /* FIFO overflow */
- ERR_FIFO_UNDERFLOW = 0x09 << 4, /* FIFO underflow */
- ERR_1bit_ECC = 0x0a << 4, /* One bit ECC error */
- ERR_MORE_ECC = 0x0b << 4, /* Two/more bits ECC error */
-
- INT_FIELD_EN = 0x01 << 8, /* Enable field interrupt */
- INT_VSYNC_EN = 0x01 << 9, /* Enable vsync interrupt */
- INT_HSYNC_EN = 0x01 << 10, /* Enable hsync interrupt */
- INT_VSTART_EN = 0x01 << 11, /* Enable vstart interrupt */
- INT_DMA_END_EN = 0x01 << 12, /* Enable DMA end interrupt */
- INT_ERROR_EN = 0x01 << 13, /* Enable error interrupt */
- INT_ECC_EN = 0x01 << 14, /* Enable ECC interrupt */
-
- INT_FIELD_STATUS = 0x01 << 16, /* field interrupt status */
- INT_VSYNC_STATUS = 0x01 << 17, /* vsync interrupt status */
- INT_HSYNC_STATUS = 0x01 << 18, /* hsync interrupt status */
- INT_VSTART_STATUS = 0x01 << 19, /* vstart interrupt status */
- INT_DMA_END_STATUS = 0x01 << 20, /* DMA end interrupt status */
- INT_ERROR_STATUS = 0x01 << 21, /* error interrupt status */
-
- DMA_ACT = 0x01 << 27, /* Enable DMA transfer */
- FIELD_NO = 0x01 << 28, /* Field number */
- DITHER_ON = 0x01 << 29, /* Dithering is on */
- ROUND_ON = 0x01 << 30, /* Round is on */
- MODE_32BIT = 1UL << 31, /* Data in RGBa888,
- * 0 in RGB565
- */
-};
-
-#define norm_maxw() 720
-#define norm_maxh() 576
-
-#define INT_ALL_STATUS (INT_FIELD_STATUS | INT_VSYNC_STATUS | \
- INT_HSYNC_STATUS | INT_VSTART_STATUS | \
- INT_DMA_END_STATUS | INT_ERROR_STATUS)
-
-#define NUM_FORMATS ARRAY_SIZE(formats)
-
-static irqreturn_t viu_intr(int irq, void *dev_id);
-
-static struct viu_fmt *format_by_fourcc(int fourcc)
-{
- int i;
-
- for (i = 0; i < NUM_FORMATS; i++) {
- if (formats[i].pixelformat == fourcc)
- return formats + i;
- }
-
- dprintk(0, "unknown pixelformat:'%4.4s'\n", (char *)&fourcc);
- return NULL;
-}
-
-static void viu_start_dma(struct viu_dev *dev)
-{
- struct viu_reg __iomem *vr = dev->vr;
-
- dev->field = 0;
-
- /* Enable DMA operation */
- iowrite32be(SOFT_RST, &vr->status_cfg);
- iowrite32be(INT_FIELD_EN, &vr->status_cfg);
-}
-
-static void viu_stop_dma(struct viu_dev *dev)
-{
- struct viu_reg __iomem *vr = dev->vr;
- int cnt = 100;
- u32 status_cfg;
-
- iowrite32be(0, &vr->status_cfg);
-
- /* Clear pending interrupts */
- status_cfg = ioread32be(&vr->status_cfg);
- if (status_cfg & 0x3f0000)
- iowrite32be(status_cfg & 0x3f0000, &vr->status_cfg);
-
- if (status_cfg & DMA_ACT) {
- do {
- status_cfg = ioread32be(&vr->status_cfg);
- if (status_cfg & INT_DMA_END_STATUS)
- break;
- } while (cnt--);
-
- if (cnt < 0) {
- /* timed out, issue soft reset */
- iowrite32be(SOFT_RST, &vr->status_cfg);
- iowrite32be(0, &vr->status_cfg);
- } else {
- /* clear DMA_END and other pending irqs */
- iowrite32be(status_cfg & 0x3f0000, &vr->status_cfg);
- }
- }
-
- dev->field = 0;
-}
-
-static int restart_video_queue(struct viu_dmaqueue *vidq)
-{
- struct viu_buf *buf, *prev;
-
- dprintk(1, "%s vidq=%p\n", __func__, vidq);
- if (!list_empty(&vidq->active)) {
- buf = list_entry(vidq->active.next, struct viu_buf, vb.queue);
- dprintk(2, "restart_queue [%p/%d]: restart dma\n",
- buf, buf->vb.i);
-
- viu_stop_dma(vidq->dev);
-
- /* cancel all outstanding capture requests */
- list_for_each_entry_safe(buf, prev, &vidq->active, vb.queue) {
- list_del(&buf->vb.queue);
- buf->vb.state = VIDEOBUF_ERROR;
- wake_up(&buf->vb.done);
- }
- mod_timer(&vidq->timeout, jiffies+BUFFER_TIMEOUT);
- return 0;
- }
-
- prev = NULL;
- for (;;) {
- if (list_empty(&vidq->queued))
- return 0;
- buf = list_entry(vidq->queued.next, struct viu_buf, vb.queue);
- if (prev == NULL) {
- list_move_tail(&buf->vb.queue, &vidq->active);
-
- dprintk(1, "Restarting video dma\n");
- viu_stop_dma(vidq->dev);
- viu_start_dma(vidq->dev);
-
- buf->vb.state = VIDEOBUF_ACTIVE;
- mod_timer(&vidq->timeout, jiffies+BUFFER_TIMEOUT);
- dprintk(2, "[%p/%d] restart_queue - first active\n",
- buf, buf->vb.i);
-
- } else if (prev->vb.width == buf->vb.width &&
- prev->vb.height == buf->vb.height &&
- prev->fmt == buf->fmt) {
- list_move_tail(&buf->vb.queue, &vidq->active);
- buf->vb.state = VIDEOBUF_ACTIVE;
- dprintk(2, "[%p/%d] restart_queue - move to active\n",
- buf, buf->vb.i);
- } else {
- return 0;
- }
- prev = buf;
- }
-}
-
-static void viu_vid_timeout(struct timer_list *t)
-{
- struct viu_dev *dev = from_timer(dev, t, vidq.timeout);
- struct viu_buf *buf;
- struct viu_dmaqueue *vidq = &dev->vidq;
-
- while (!list_empty(&vidq->active)) {
- buf = list_entry(vidq->active.next, struct viu_buf, vb.queue);
- list_del(&buf->vb.queue);
- buf->vb.state = VIDEOBUF_ERROR;
- wake_up(&buf->vb.done);
- dprintk(1, "viu/0: [%p/%d] timeout\n", buf, buf->vb.i);
- }
-
- restart_video_queue(vidq);
-}
-
-/*
- * Videobuf operations
- */
-static int buffer_setup(struct videobuf_queue *vq, unsigned int *count,
- unsigned int *size)
-{
- struct viu_fh *fh = vq->priv_data;
-
- *size = fh->width * fh->height * fh->fmt->depth >> 3;
- if (*count == 0)
- *count = 32;
-
- while (*size * *count > VIU_VID_MEM_LIMIT * 1024 * 1024)
- (*count)--;
-
- dprintk(1, "%s, count=%d, size=%d\n", __func__, *count, *size);
- return 0;
-}
-
-static void free_buffer(struct videobuf_queue *vq, struct viu_buf *buf)
-{
- struct videobuf_buffer *vb = &buf->vb;
- void *vaddr = NULL;
-
- videobuf_waiton(vq, &buf->vb, 0, 0);
-
- if (vq->int_ops && vq->int_ops->vaddr)
- vaddr = vq->int_ops->vaddr(vb);
-
- if (vaddr)
- videobuf_dma_contig_free(vq, &buf->vb);
-
- buf->vb.state = VIDEOBUF_NEEDS_INIT;
-}
-
-inline int buffer_activate(struct viu_dev *dev, struct viu_buf *buf)
-{
- struct viu_reg __iomem *vr = dev->vr;
- int bpp;
-
- /* setup the DMA base address */
- reg_val.field_base_addr = videobuf_to_dma_contig(&buf->vb);
-
- dprintk(1, "buffer_activate [%p/%d]: dma addr 0x%lx\n",
- buf, buf->vb.i, (unsigned long)reg_val.field_base_addr);
-
- /* interlace is on by default, set horizontal DMA increment */
- reg_val.status_cfg = 0;
- bpp = buf->fmt->depth >> 3;
- switch (bpp) {
- case 2:
- reg_val.status_cfg &= ~MODE_32BIT;
- reg_val.dma_inc = buf->vb.width * 2;
- break;
- case 4:
- reg_val.status_cfg |= MODE_32BIT;
- reg_val.dma_inc = buf->vb.width * 4;
- break;
- default:
- dprintk(0, "doesn't support color depth(%d)\n",
- bpp * 8);
- return -EINVAL;
- }
-
- /* setup picture_count register */
- reg_val.picture_count = (buf->vb.height / 2) << 16 |
- buf->vb.width;
-
- reg_val.status_cfg |= DMA_ACT | INT_DMA_END_EN | INT_FIELD_EN;
-
- buf->vb.state = VIDEOBUF_ACTIVE;
- dev->capfield = buf->vb.field;
-
- /* reset dma increment if needed */
- if (!V4L2_FIELD_HAS_BOTH(buf->vb.field))
- reg_val.dma_inc = 0;
-
- iowrite32be(reg_val.dma_inc, &vr->dma_inc);
- iowrite32be(reg_val.picture_count, &vr->picture_count);
- iowrite32be(reg_val.field_base_addr, &vr->field_base_addr);
- mod_timer(&dev->vidq.timeout, jiffies + BUFFER_TIMEOUT);
- return 0;
-}
-
-static int buffer_prepare(struct videobuf_queue *vq,
- struct videobuf_buffer *vb,
- enum v4l2_field field)
-{
- struct viu_fh *fh = vq->priv_data;
- struct viu_buf *buf = container_of(vb, struct viu_buf, vb);
- int rc;
-
- BUG_ON(fh->fmt == NULL);
-
- if (fh->width < 48 || fh->width > norm_maxw() ||
- fh->height < 32 || fh->height > norm_maxh())
- return -EINVAL;
- buf->vb.size = (fh->width * fh->height * fh->fmt->depth) >> 3;
- if (buf->vb.baddr != 0 && buf->vb.bsize < buf->vb.size)
- return -EINVAL;
-
- if (buf->fmt != fh->fmt ||
- buf->vb.width != fh->width ||
- buf->vb.height != fh->height ||
- buf->vb.field != field) {
- buf->fmt = fh->fmt;
- buf->vb.width = fh->width;
- buf->vb.height = fh->height;
- buf->vb.field = field;
- }
-
- if (buf->vb.state == VIDEOBUF_NEEDS_INIT) {
- rc = videobuf_iolock(vq, &buf->vb, NULL);
- if (rc != 0)
- goto fail;
-
- buf->vb.width = fh->width;
- buf->vb.height = fh->height;
- buf->vb.field = field;
- buf->fmt = fh->fmt;
- }
-
- buf->vb.state = VIDEOBUF_PREPARED;
- return 0;
-
-fail:
- free_buffer(vq, buf);
- return rc;
-}
-
-static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
-{
- struct viu_buf *buf = container_of(vb, struct viu_buf, vb);
- struct viu_fh *fh = vq->priv_data;
- struct viu_dev *dev = fh->dev;
- struct viu_dmaqueue *vidq = &dev->vidq;
- struct viu_buf *prev;
-
- if (!list_empty(&vidq->queued)) {
- dprintk(1, "adding vb queue=%p\n", &buf->vb.queue);
- dprintk(1, "vidq pointer 0x%p, queued 0x%p\n",
- vidq, &vidq->queued);
- dprintk(1, "dev %p, queued: self %p, next %p, head %p\n",
- dev, &vidq->queued, vidq->queued.next,
- vidq->queued.prev);
- list_add_tail(&buf->vb.queue, &vidq->queued);
- buf->vb.state = VIDEOBUF_QUEUED;
- dprintk(2, "[%p/%d] buffer_queue - append to queued\n",
- buf, buf->vb.i);
- } else if (list_empty(&vidq->active)) {
- dprintk(1, "adding vb active=%p\n", &buf->vb.queue);
- list_add_tail(&buf->vb.queue, &vidq->active);
- buf->vb.state = VIDEOBUF_ACTIVE;
- mod_timer(&vidq->timeout, jiffies+BUFFER_TIMEOUT);
- dprintk(2, "[%p/%d] buffer_queue - first active\n",
- buf, buf->vb.i);
-
- buffer_activate(dev, buf);
- } else {
- dprintk(1, "adding vb queue2=%p\n", &buf->vb.queue);
- prev = list_entry(vidq->active.prev, struct viu_buf, vb.queue);
- if (prev->vb.width == buf->vb.width &&
- prev->vb.height == buf->vb.height &&
- prev->fmt == buf->fmt) {
- list_add_tail(&buf->vb.queue, &vidq->active);
- buf->vb.state = VIDEOBUF_ACTIVE;
- dprintk(2, "[%p/%d] buffer_queue - append to active\n",
- buf, buf->vb.i);
- } else {
- list_add_tail(&buf->vb.queue, &vidq->queued);
- buf->vb.state = VIDEOBUF_QUEUED;
- dprintk(2, "[%p/%d] buffer_queue - first queued\n",
- buf, buf->vb.i);
- }
- }
-}
-
-static void buffer_release(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
-{
- struct viu_buf *buf = container_of(vb, struct viu_buf, vb);
- struct viu_fh *fh = vq->priv_data;
- struct viu_dev *dev = (struct viu_dev *)fh->dev;
-
- viu_stop_dma(dev);
- free_buffer(vq, buf);
-}
-
-static const struct videobuf_queue_ops viu_video_qops = {
- .buf_setup = buffer_setup,
- .buf_prepare = buffer_prepare,
- .buf_queue = buffer_queue,
- .buf_release = buffer_release,
-};
-
-/*
- * IOCTL vidioc handling
- */
-static int vidioc_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- strscpy(cap->driver, "viu", sizeof(cap->driver));
- strscpy(cap->card, "viu", sizeof(cap->card));
- strscpy(cap->bus_info, "platform:viu", sizeof(cap->bus_info));
- return 0;
-}
-
-static int vidioc_enum_fmt(struct file *file, void *priv,
- struct v4l2_fmtdesc *f)
-{
- int index = f->index;
-
- if (f->index >= NUM_FORMATS)
- return -EINVAL;
-
- f->pixelformat = formats[index].fourcc;
- return 0;
-}
-
-static int vidioc_g_fmt_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct viu_fh *fh = priv;
-
- f->fmt.pix.width = fh->width;
- f->fmt.pix.height = fh->height;
- f->fmt.pix.field = fh->vb_vidq.field;
- f->fmt.pix.pixelformat = fh->fmt->pixelformat;
- f->fmt.pix.bytesperline =
- (f->fmt.pix.width * fh->fmt->depth) >> 3;
- f->fmt.pix.sizeimage = fh->sizeimage;
- f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- return 0;
-}
-
-static int vidioc_try_fmt_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct viu_fmt *fmt;
- unsigned int maxw, maxh;
-
- fmt = format_by_fourcc(f->fmt.pix.pixelformat);
- if (!fmt) {
- dprintk(1, "Fourcc format (0x%08x) invalid.",
- f->fmt.pix.pixelformat);
- return -EINVAL;
- }
-
- maxw = norm_maxw();
- maxh = norm_maxh();
-
- f->fmt.pix.field = V4L2_FIELD_INTERLACED;
- if (f->fmt.pix.height < 32)
- f->fmt.pix.height = 32;
- if (f->fmt.pix.height > maxh)
- f->fmt.pix.height = maxh;
- if (f->fmt.pix.width < 48)
- f->fmt.pix.width = 48;
- if (f->fmt.pix.width > maxw)
- f->fmt.pix.width = maxw;
- f->fmt.pix.width &= ~0x03;
- f->fmt.pix.bytesperline =
- (f->fmt.pix.width * fmt->depth) >> 3;
- f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
- f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
-
- return 0;
-}
-
-static int vidioc_s_fmt_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct viu_fh *fh = priv;
- int ret;
-
- ret = vidioc_try_fmt_cap(file, fh, f);
- if (ret < 0)
- return ret;
-
- fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
- fh->width = f->fmt.pix.width;
- fh->height = f->fmt.pix.height;
- fh->sizeimage = f->fmt.pix.sizeimage;
- fh->vb_vidq.field = f->fmt.pix.field;
- fh->type = f->type;
- return 0;
-}
-
-static int vidioc_g_fmt_overlay(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct viu_fh *fh = priv;
-
- f->fmt.win = fh->win;
- return 0;
-}
-
-static int verify_preview(struct viu_dev *dev, struct v4l2_window *win)
-{
- enum v4l2_field field;
- int maxw, maxh;
-
- if (dev->ovbuf.base == NULL)
- return -EINVAL;
- if (dev->ovfmt == NULL)
- return -EINVAL;
- if (win->w.width < 48 || win->w.height < 32)
- return -EINVAL;
-
- field = win->field;
- maxw = dev->crop_current.width;
- maxh = dev->crop_current.height;
-
- if (field == V4L2_FIELD_ANY) {
- field = (win->w.height > maxh/2)
- ? V4L2_FIELD_INTERLACED
- : V4L2_FIELD_TOP;
- }
- switch (field) {
- case V4L2_FIELD_TOP:
- case V4L2_FIELD_BOTTOM:
- maxh = maxh / 2;
- break;
- case V4L2_FIELD_INTERLACED:
- break;
- default:
- return -EINVAL;
- }
-
- win->field = field;
- if (win->w.width > maxw)
- win->w.width = maxw;
- if (win->w.height > maxh)
- win->w.height = maxh;
- return 0;
-}
-
-inline void viu_activate_overlay(struct viu_reg __iomem *vr)
-{
- iowrite32be(reg_val.field_base_addr, &vr->field_base_addr);
- iowrite32be(reg_val.dma_inc, &vr->dma_inc);
- iowrite32be(reg_val.picture_count, &vr->picture_count);
-}
-
-static int viu_setup_preview(struct viu_dev *dev, struct viu_fh *fh)
-{
- int bpp;
-
- dprintk(1, "%s %dx%d\n", __func__,
- fh->win.w.width, fh->win.w.height);
-
- reg_val.status_cfg = 0;
-
- /* setup window */
- reg_val.picture_count = (fh->win.w.height / 2) << 16 |
- fh->win.w.width;
-
- /* setup color depth and dma increment */
- bpp = dev->ovfmt->depth / 8;
- switch (bpp) {
- case 2:
- reg_val.status_cfg &= ~MODE_32BIT;
- reg_val.dma_inc = fh->win.w.width * 2;
- break;
- case 4:
- reg_val.status_cfg |= MODE_32BIT;
- reg_val.dma_inc = fh->win.w.width * 4;
- break;
- default:
- dprintk(0, "device doesn't support color depth(%d)\n",
- bpp * 8);
- return -EINVAL;
- }
-
- dev->ovfield = fh->win.field;
- if (!V4L2_FIELD_HAS_BOTH(dev->ovfield))
- reg_val.dma_inc = 0;
-
- reg_val.status_cfg |= DMA_ACT | INT_DMA_END_EN | INT_FIELD_EN;
-
- /* setup the base address of the overlay buffer */
- reg_val.field_base_addr = (u32)(long)dev->ovbuf.base;
-
- return 0;
-}
-
-static int vidioc_s_fmt_overlay(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct viu_fh *fh = priv;
- struct viu_dev *dev = (struct viu_dev *)fh->dev;
- unsigned long flags;
- int err;
-
- err = verify_preview(dev, &f->fmt.win);
- if (err)
- return err;
-
- fh->win = f->fmt.win;
-
- spin_lock_irqsave(&dev->slock, flags);
- viu_setup_preview(dev, fh);
- spin_unlock_irqrestore(&dev->slock, flags);
- return 0;
-}
-
-static int vidioc_try_fmt_overlay(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- return 0;
-}
-
-static int vidioc_overlay(struct file *file, void *priv, unsigned int on)
-{
- struct viu_fh *fh = priv;
- struct viu_dev *dev = (struct viu_dev *)fh->dev;
- unsigned long flags;
-
- if (on) {
- spin_lock_irqsave(&dev->slock, flags);
- viu_activate_overlay(dev->vr);
- dev->ovenable = 1;
-
- /* start dma */
- viu_start_dma(dev);
- spin_unlock_irqrestore(&dev->slock, flags);
- } else {
- viu_stop_dma(dev);
- dev->ovenable = 0;
- }
-
- return 0;
-}
-
-static int vidioc_g_fbuf(struct file *file, void *priv, struct v4l2_framebuffer *arg)
-{
- struct viu_fh *fh = priv;
- struct viu_dev *dev = fh->dev;
- struct v4l2_framebuffer *fb = arg;
-
- *fb = dev->ovbuf;
- fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
- return 0;
-}
-
-static int vidioc_s_fbuf(struct file *file, void *priv, const struct v4l2_framebuffer *arg)
-{
- struct viu_fh *fh = priv;
- struct viu_dev *dev = fh->dev;
- const struct v4l2_framebuffer *fb = arg;
- struct viu_fmt *fmt;
-
- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
- return -EPERM;
-
- /* check args */
- fmt = format_by_fourcc(fb->fmt.pixelformat);
- if (fmt == NULL)
- return -EINVAL;
-
- /* ok, accept it */
- dev->ovbuf = *fb;
- dev->ovfmt = fmt;
- if (dev->ovbuf.fmt.bytesperline == 0) {
- dev->ovbuf.fmt.bytesperline =
- dev->ovbuf.fmt.width * fmt->depth / 8;
- }
- return 0;
-}
-
-static int vidioc_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *p)
-{
- struct viu_fh *fh = priv;
-
- return videobuf_reqbufs(&fh->vb_vidq, p);
-}
-
-static int vidioc_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct viu_fh *fh = priv;
-
- return videobuf_querybuf(&fh->vb_vidq, p);
-}
-
-static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct viu_fh *fh = priv;
-
- return videobuf_qbuf(&fh->vb_vidq, p);
-}
-
-static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
-{
- struct viu_fh *fh = priv;
-
- return videobuf_dqbuf(&fh->vb_vidq, p,
- file->f_flags & O_NONBLOCK);
-}
-
-static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct viu_fh *fh = priv;
- struct viu_dev *dev = fh->dev;
-
- if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- if (fh->type != i)
- return -EINVAL;
-
- if (dev->ovenable)
- dev->ovenable = 0;
-
- viu_start_dma(fh->dev);
-
- return videobuf_streamon(&fh->vb_vidq);
-}
-
-static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
-{
- struct viu_fh *fh = priv;
-
- if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- if (fh->type != i)
- return -EINVAL;
-
- viu_stop_dma(fh->dev);
-
- return videobuf_streamoff(&fh->vb_vidq);
-}
-
-#define decoder_call(viu, o, f, args...) \
- v4l2_subdev_call(viu->decoder, o, f, ##args)
-
-static int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
-{
- struct viu_fh *fh = priv;
-
- decoder_call(fh->dev, video, querystd, std_id);
- return 0;
-}
-
-static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id id)
-{
- struct viu_fh *fh = priv;
-
- fh->dev->std = id;
- decoder_call(fh->dev, video, s_std, id);
- return 0;
-}
-
-static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
-{
- struct viu_fh *fh = priv;
-
- *std_id = fh->dev->std;
- return 0;
-}
-
-/* only one input in this driver */
-static int vidioc_enum_input(struct file *file, void *priv,
- struct v4l2_input *inp)
-{
- struct viu_fh *fh = priv;
-
- if (inp->index != 0)
- return -EINVAL;
-
- inp->type = V4L2_INPUT_TYPE_CAMERA;
- inp->std = fh->dev->vdev->tvnorms;
- strscpy(inp->name, "Camera", sizeof(inp->name));
- return 0;
-}
-
-static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
-{
- *i = 0;
- return 0;
-}
-
-static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
-{
- struct viu_fh *fh = priv;
-
- if (i)
- return -EINVAL;
-
- decoder_call(fh->dev, video, s_routing, i, 0, 0);
- return 0;
-}
-
-inline void viu_activate_next_buf(struct viu_dev *dev,
- struct viu_dmaqueue *viuq)
-{
- struct viu_dmaqueue *vidq = viuq;
- struct viu_buf *buf;
-
- /* launch another DMA operation for an active/queued buffer */
- if (!list_empty(&vidq->active)) {
- buf = list_entry(vidq->active.next, struct viu_buf,
- vb.queue);
- dprintk(1, "start another queued buffer: 0x%p\n", buf);
- buffer_activate(dev, buf);
- } else if (!list_empty(&vidq->queued)) {
- buf = list_entry(vidq->queued.next, struct viu_buf,
- vb.queue);
- list_del(&buf->vb.queue);
-
- dprintk(1, "start another queued buffer: 0x%p\n", buf);
- list_add_tail(&buf->vb.queue, &vidq->active);
- buf->vb.state = VIDEOBUF_ACTIVE;
- buffer_activate(dev, buf);
- }
-}
-
-inline void viu_default_settings(struct viu_reg __iomem *vr)
-{
- iowrite32be(0x9512A254, &vr->luminance);
- iowrite32be(0x03310000, &vr->chroma_r);
- iowrite32be(0x06600F38, &vr->chroma_g);
- iowrite32be(0x00000409, &vr->chroma_b);
- iowrite32be(0x000000ff, &vr->alpha);
- iowrite32be(0x00000090, &vr->req_alarm);
- dprintk(1, "status reg: 0x%08x, field base: 0x%08x\n",
- ioread32be(&vr->status_cfg), ioread32be(&vr->field_base_addr));
-}
-
-static void viu_overlay_intr(struct viu_dev *dev, u32 status)
-{
- struct viu_reg __iomem *vr = dev->vr;
-
- if (status & INT_DMA_END_STATUS)
- dev->dma_done = 1;
-
- if (status & INT_FIELD_STATUS) {
- if (dev->dma_done) {
- u32 addr = reg_val.field_base_addr;
-
- dev->dma_done = 0;
- if (status & FIELD_NO)
- addr += reg_val.dma_inc;
-
- iowrite32be(addr, &vr->field_base_addr);
- iowrite32be(reg_val.dma_inc, &vr->dma_inc);
- iowrite32be((status & 0xffc0ffff) |
- (status & INT_ALL_STATUS) |
- reg_val.status_cfg, &vr->status_cfg);
- } else if (status & INT_VSYNC_STATUS) {
- iowrite32be((status & 0xffc0ffff) |
- (status & INT_ALL_STATUS) |
- reg_val.status_cfg, &vr->status_cfg);
- }
- }
-}
-
-static void viu_capture_intr(struct viu_dev *dev, u32 status)
-{
- struct viu_dmaqueue *vidq = &dev->vidq;
- struct viu_reg __iomem *vr = dev->vr;
- struct viu_buf *buf;
- int field_num;
- int need_two;
- int dma_done = 0;
-
- field_num = status & FIELD_NO;
- need_two = V4L2_FIELD_HAS_BOTH(dev->capfield);
-
- if (status & INT_DMA_END_STATUS) {
- dma_done = 1;
- if (((field_num == 0) && (dev->field == 0)) ||
- (field_num && (dev->field == 1)))
- dev->field++;
- }
-
- if (status & INT_FIELD_STATUS) {
- dprintk(1, "irq: field %d, done %d\n",
- !!field_num, dma_done);
- if (unlikely(dev->first)) {
- if (field_num == 0) {
- dev->first = 0;
- dprintk(1, "activate first buf\n");
- viu_activate_next_buf(dev, vidq);
- } else
- dprintk(1, "wait field 0\n");
- return;
- }
-
- /* setup buffer address for next dma operation */
- if (!list_empty(&vidq->active)) {
- u32 addr = reg_val.field_base_addr;
-
- if (field_num && need_two) {
- addr += reg_val.dma_inc;
- dprintk(1, "field 1, 0x%lx, dev field %d\n",
- (unsigned long)addr, dev->field);
- }
- iowrite32be(addr, &vr->field_base_addr);
- iowrite32be(reg_val.dma_inc, &vr->dma_inc);
- iowrite32be((status & 0xffc0ffff) |
- (status & INT_ALL_STATUS) |
- reg_val.status_cfg, &vr->status_cfg);
- return;
- }
- }
-
- if (dma_done && field_num && (dev->field == 2)) {
- dev->field = 0;
- buf = list_entry(vidq->active.next,
- struct viu_buf, vb.queue);
- dprintk(1, "viu/0: [%p/%d] 0x%lx/0x%lx: dma complete\n",
- buf, buf->vb.i,
- (unsigned long)videobuf_to_dma_contig(&buf->vb),
- (unsigned long)ioread32be(&vr->field_base_addr));
-
- if (waitqueue_active(&buf->vb.done)) {
- list_del(&buf->vb.queue);
- buf->vb.ts = ktime_get_ns();
- buf->vb.state = VIDEOBUF_DONE;
- buf->vb.field_count++;
- wake_up(&buf->vb.done);
- }
- /* activate next dma buffer */
- viu_activate_next_buf(dev, vidq);
- }
-}
-
-static irqreturn_t viu_intr(int irq, void *dev_id)
-{
- struct viu_dev *dev = (struct viu_dev *)dev_id;
- struct viu_reg __iomem *vr = dev->vr;
- u32 status;
- u32 error;
-
- status = ioread32be(&vr->status_cfg);
-
- if (status & INT_ERROR_STATUS) {
- dev->irqs.error_irq++;
- error = status & ERR_MASK;
- if (error)
- dprintk(1, "Err: error(%d), times:%d!\n",
- error >> 4, dev->irqs.error_irq);
- /* Clear interrupt error bit and error flags */
- iowrite32be((status & 0xffc0ffff) | INT_ERROR_STATUS,
- &vr->status_cfg);
- }
-
- if (status & INT_DMA_END_STATUS) {
- dev->irqs.dma_end_irq++;
- dev->dma_done = 1;
- dprintk(2, "VIU DMA end interrupt times: %d\n",
- dev->irqs.dma_end_irq);
- }
-
- if (status & INT_HSYNC_STATUS)
- dev->irqs.hsync_irq++;
-
- if (status & INT_FIELD_STATUS) {
- dev->irqs.field_irq++;
- dprintk(2, "VIU field interrupt times: %d\n",
- dev->irqs.field_irq);
- }
-
- if (status & INT_VSTART_STATUS)
- dev->irqs.vstart_irq++;
-
- if (status & INT_VSYNC_STATUS) {
- dev->irqs.vsync_irq++;
- dprintk(2, "VIU vsync interrupt times: %d\n",
- dev->irqs.vsync_irq);
- }
-
- /* clear all pending irqs */
- status = ioread32be(&vr->status_cfg);
- iowrite32be((status & 0xffc0ffff) | (status & INT_ALL_STATUS),
- &vr->status_cfg);
-
- if (dev->ovenable) {
- viu_overlay_intr(dev, status);
- return IRQ_HANDLED;
- }
-
- /* Capture mode */
- viu_capture_intr(dev, status);
- return IRQ_HANDLED;
-}
-
-/*
- * File operations for the device
- */
-static int viu_open(struct file *file)
-{
- struct video_device *vdev = video_devdata(file);
- struct viu_dev *dev = video_get_drvdata(vdev);
- struct viu_fh *fh;
- struct viu_reg __iomem *vr;
- int minor = vdev->minor;
- u32 status_cfg;
-
- dprintk(1, "viu: open (minor=%d)\n", minor);
-
- dev->users++;
- if (dev->users > 1) {
- dev->users--;
- return -EBUSY;
- }
-
- vr = dev->vr;
-
- dprintk(1, "open minor=%d type=%s users=%d\n", minor,
- v4l2_type_names[V4L2_BUF_TYPE_VIDEO_CAPTURE], dev->users);
-
- if (mutex_lock_interruptible(&dev->lock)) {
- dev->users--;
- return -ERESTARTSYS;
- }
-
- /* allocate and initialize per filehandle data */
- fh = kzalloc(sizeof(*fh), GFP_KERNEL);
- if (!fh) {
- dev->users--;
- mutex_unlock(&dev->lock);
- return -ENOMEM;
- }
-
- v4l2_fh_init(&fh->fh, vdev);
- file->private_data = fh;
- fh->dev = dev;
-
- fh->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- fh->fmt = format_by_fourcc(V4L2_PIX_FMT_RGB32);
- fh->width = norm_maxw();
- fh->height = norm_maxh();
- dev->crop_current.width = fh->width;
- dev->crop_current.height = fh->height;
-
- dprintk(1, "Open: fh=%p, dev=%p, dev->vidq=%p\n", fh, dev, &dev->vidq);
- dprintk(1, "Open: list_empty queued=%d\n",
- list_empty(&dev->vidq.queued));
- dprintk(1, "Open: list_empty active=%d\n",
- list_empty(&dev->vidq.active));
-
- viu_default_settings(vr);
-
- status_cfg = ioread32be(&vr->status_cfg);
- iowrite32be(status_cfg & ~(INT_VSYNC_EN | INT_HSYNC_EN |
- INT_FIELD_EN | INT_VSTART_EN |
- INT_DMA_END_EN | INT_ERROR_EN | INT_ECC_EN),
- &vr->status_cfg);
-
- status_cfg = ioread32be(&vr->status_cfg);
- iowrite32be(status_cfg | INT_ALL_STATUS, &vr->status_cfg);
-
- spin_lock_init(&fh->vbq_lock);
- videobuf_queue_dma_contig_init(&fh->vb_vidq, &viu_video_qops,
- dev->dev, &fh->vbq_lock,
- fh->type, V4L2_FIELD_INTERLACED,
- sizeof(struct viu_buf), fh,
- &fh->dev->lock);
- v4l2_fh_add(&fh->fh);
- mutex_unlock(&dev->lock);
- return 0;
-}
-
-static ssize_t viu_read(struct file *file, char __user *data, size_t count,
- loff_t *ppos)
-{
- struct viu_fh *fh = file->private_data;
- struct viu_dev *dev = fh->dev;
- int ret = 0;
-
- dprintk(2, "%s\n", __func__);
- if (dev->ovenable)
- dev->ovenable = 0;
-
- if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
- viu_start_dma(dev);
- ret = videobuf_read_stream(&fh->vb_vidq, data, count,
- ppos, 0, file->f_flags & O_NONBLOCK);
- mutex_unlock(&dev->lock);
- return ret;
- }
- return 0;
-}
-
-static __poll_t viu_poll(struct file *file, struct poll_table_struct *wait)
-{
- struct viu_fh *fh = file->private_data;
- struct videobuf_queue *q = &fh->vb_vidq;
- struct viu_dev *dev = fh->dev;
- __poll_t req_events = poll_requested_events(wait);
- __poll_t res = v4l2_ctrl_poll(file, wait);
-
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type)
- return EPOLLERR;
-
- if (!(req_events & (EPOLLIN | EPOLLRDNORM)))
- return res;
-
- mutex_lock(&dev->lock);
- res |= videobuf_poll_stream(file, q, wait);
- mutex_unlock(&dev->lock);
- return res;
-}
-
-static int viu_release(struct file *file)
-{
- struct viu_fh *fh = file->private_data;
- struct viu_dev *dev = fh->dev;
- int minor = video_devdata(file)->minor;
-
- mutex_lock(&dev->lock);
- viu_stop_dma(dev);
- videobuf_stop(&fh->vb_vidq);
- videobuf_mmap_free(&fh->vb_vidq);
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
- mutex_unlock(&dev->lock);
-
- kfree(fh);
-
- dev->users--;
- dprintk(1, "close (minor=%d, users=%d)\n",
- minor, dev->users);
- return 0;
-}
-
-static void viu_reset(struct viu_reg __iomem *reg)
-{
- iowrite32be(0, &reg->status_cfg);
- iowrite32be(0x9512a254, &reg->luminance);
- iowrite32be(0x03310000, &reg->chroma_r);
- iowrite32be(0x06600f38, &reg->chroma_g);
- iowrite32be(0x00000409, &reg->chroma_b);
- iowrite32be(0, &reg->field_base_addr);
- iowrite32be(0, &reg->dma_inc);
- iowrite32be(0x01e002d0, &reg->picture_count);
- iowrite32be(0x00000090, &reg->req_alarm);
- iowrite32be(0x000000ff, &reg->alpha);
-}
-
-static int viu_mmap(struct file *file, struct vm_area_struct *vma)
-{
- struct viu_fh *fh = file->private_data;
- struct viu_dev *dev = fh->dev;
- int ret;
-
- dprintk(1, "mmap called, vma=%p\n", vma);
-
- if (mutex_lock_interruptible(&dev->lock))
- return -ERESTARTSYS;
- ret = videobuf_mmap_mapper(&fh->vb_vidq, vma);
- mutex_unlock(&dev->lock);
-
- dprintk(1, "vma start=0x%08lx, size=%ld, ret=%d\n",
- (unsigned long)vma->vm_start,
- (unsigned long)vma->vm_end-(unsigned long)vma->vm_start,
- ret);
-
- return ret;
-}
-
-static const struct v4l2_file_operations viu_fops = {
- .owner = THIS_MODULE,
- .open = viu_open,
- .release = viu_release,
- .read = viu_read,
- .poll = viu_poll,
- .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */
- .mmap = viu_mmap,
-};
-
-static const struct v4l2_ioctl_ops viu_ioctl_ops = {
- .vidioc_querycap = vidioc_querycap,
- .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt,
- .vidioc_g_fmt_vid_cap = vidioc_g_fmt_cap,
- .vidioc_try_fmt_vid_cap = vidioc_try_fmt_cap,
- .vidioc_s_fmt_vid_cap = vidioc_s_fmt_cap,
- .vidioc_enum_fmt_vid_overlay = vidioc_enum_fmt,
- .vidioc_g_fmt_vid_overlay = vidioc_g_fmt_overlay,
- .vidioc_try_fmt_vid_overlay = vidioc_try_fmt_overlay,
- .vidioc_s_fmt_vid_overlay = vidioc_s_fmt_overlay,
- .vidioc_overlay = vidioc_overlay,
- .vidioc_g_fbuf = vidioc_g_fbuf,
- .vidioc_s_fbuf = vidioc_s_fbuf,
- .vidioc_reqbufs = vidioc_reqbufs,
- .vidioc_querybuf = vidioc_querybuf,
- .vidioc_qbuf = vidioc_qbuf,
- .vidioc_dqbuf = vidioc_dqbuf,
- .vidioc_g_std = vidioc_g_std,
- .vidioc_s_std = vidioc_s_std,
- .vidioc_querystd = vidioc_querystd,
- .vidioc_enum_input = vidioc_enum_input,
- .vidioc_g_input = vidioc_g_input,
- .vidioc_s_input = vidioc_s_input,
- .vidioc_streamon = vidioc_streamon,
- .vidioc_streamoff = vidioc_streamoff,
- .vidioc_log_status = v4l2_ctrl_log_status,
- .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
- .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
-};
-
-static const struct video_device viu_template = {
- .name = "FSL viu",
- .fops = &viu_fops,
- .minor = -1,
- .ioctl_ops = &viu_ioctl_ops,
- .release = video_device_release,
-
- .tvnorms = V4L2_STD_NTSC_M | V4L2_STD_PAL,
- .device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
- V4L2_CAP_VIDEO_OVERLAY | V4L2_CAP_READWRITE,
-};
-
-static int viu_of_probe(struct platform_device *op)
-{
- struct viu_dev *viu_dev;
- struct video_device *vdev;
- struct resource r;
- struct viu_reg __iomem *viu_regs;
- struct i2c_adapter *ad;
- int ret, viu_irq;
- struct clk *clk;
-
- ret = of_address_to_resource(op->dev.of_node, 0, &r);
- if (ret) {
- dev_err(&op->dev, "Can't parse device node resource\n");
- return -ENODEV;
- }
-
- viu_irq = irq_of_parse_and_map(op->dev.of_node, 0);
- if (!viu_irq) {
- dev_err(&op->dev, "Error while mapping the irq\n");
- return -EINVAL;
- }
-
- /* request mem region */
- if (!devm_request_mem_region(&op->dev, r.start,
- sizeof(struct viu_reg), DRV_NAME)) {
- dev_err(&op->dev, "Error while requesting mem region\n");
- ret = -EBUSY;
- goto err_irq;
- }
-
- /* remap registers */
- viu_regs = devm_ioremap(&op->dev, r.start, sizeof(struct viu_reg));
- if (!viu_regs) {
- dev_err(&op->dev, "Can't map register set\n");
- ret = -ENOMEM;
- goto err_irq;
- }
-
- /* Prepare our private structure */
- viu_dev = devm_kzalloc(&op->dev, sizeof(struct viu_dev), GFP_KERNEL);
- if (!viu_dev) {
- dev_err(&op->dev, "Can't allocate private structure\n");
- ret = -ENOMEM;
- goto err_irq;
- }
-
- viu_dev->vr = viu_regs;
- viu_dev->irq = viu_irq;
- viu_dev->dev = &op->dev;
-
- /* init video dma queues */
- INIT_LIST_HEAD(&viu_dev->vidq.active);
- INIT_LIST_HEAD(&viu_dev->vidq.queued);
-
- snprintf(viu_dev->v4l2_dev.name,
- sizeof(viu_dev->v4l2_dev.name), "%s", "VIU");
- ret = v4l2_device_register(viu_dev->dev, &viu_dev->v4l2_dev);
- if (ret < 0) {
- dev_err(&op->dev, "v4l2_device_register() failed: %d\n", ret);
- goto err_irq;
- }
-
- ad = i2c_get_adapter(0);
- if (!ad) {
- ret = -EFAULT;
- dev_err(&op->dev, "couldn't get i2c adapter\n");
- goto err_v4l2;
- }
-
- v4l2_ctrl_handler_init(&viu_dev->hdl, 5);
- if (viu_dev->hdl.error) {
- ret = viu_dev->hdl.error;
- dev_err(&op->dev, "couldn't register control\n");
- goto err_i2c;
- }
- /* This control handler will inherit the control(s) from the
- sub-device(s). */
- viu_dev->v4l2_dev.ctrl_handler = &viu_dev->hdl;
- viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad,
- "saa7113", VIU_VIDEO_DECODER_ADDR, NULL);
-
- timer_setup(&viu_dev->vidq.timeout, viu_vid_timeout, 0);
- viu_dev->std = V4L2_STD_NTSC_M;
- viu_dev->first = 1;
-
- /* Allocate memory for video device */
- vdev = video_device_alloc();
- if (vdev == NULL) {
- ret = -ENOMEM;
- goto err_hdl;
- }
-
- *vdev = viu_template;
-
- vdev->v4l2_dev = &viu_dev->v4l2_dev;
-
- viu_dev->vdev = vdev;
-
- /* initialize locks */
- mutex_init(&viu_dev->lock);
- viu_dev->vdev->lock = &viu_dev->lock;
- spin_lock_init(&viu_dev->slock);
-
- video_set_drvdata(viu_dev->vdev, viu_dev);
-
- mutex_lock(&viu_dev->lock);
-
- ret = video_register_device(viu_dev->vdev, VFL_TYPE_VIDEO, -1);
- if (ret < 0) {
- video_device_release(viu_dev->vdev);
- goto err_unlock;
- }
-
- /* enable VIU clock */
- clk = devm_clk_get(&op->dev, "ipg");
- if (IS_ERR(clk)) {
- dev_err(&op->dev, "failed to lookup the clock!\n");
- ret = PTR_ERR(clk);
- goto err_vdev;
- }
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(&op->dev, "failed to enable the clock!\n");
- goto err_vdev;
- }
- viu_dev->clk = clk;
-
- /* reset VIU module */
- viu_reset(viu_dev->vr);
-
- /* install interrupt handler */
- if (request_irq(viu_dev->irq, viu_intr, 0, "viu", (void *)viu_dev)) {
- dev_err(&op->dev, "Request VIU IRQ failed.\n");
- ret = -ENODEV;
- goto err_clk;
- }
-
- mutex_unlock(&viu_dev->lock);
-
- dev_info(&op->dev, "Freescale VIU Video Capture Board\n");
- return ret;
-
-err_clk:
- clk_disable_unprepare(viu_dev->clk);
-err_vdev:
- video_unregister_device(viu_dev->vdev);
-err_unlock:
- mutex_unlock(&viu_dev->lock);
-err_hdl:
- v4l2_ctrl_handler_free(&viu_dev->hdl);
-err_i2c:
- i2c_put_adapter(ad);
-err_v4l2:
- v4l2_device_unregister(&viu_dev->v4l2_dev);
-err_irq:
- irq_dispose_mapping(viu_irq);
- return ret;
-}
-
-static int viu_of_remove(struct platform_device *op)
-{
- struct v4l2_device *v4l2_dev = platform_get_drvdata(op);
- struct viu_dev *dev = container_of(v4l2_dev, struct viu_dev, v4l2_dev);
- struct v4l2_subdev *sdev = list_entry(v4l2_dev->subdevs.next,
- struct v4l2_subdev, list);
- struct i2c_client *client = v4l2_get_subdevdata(sdev);
-
- free_irq(dev->irq, (void *)dev);
- irq_dispose_mapping(dev->irq);
-
- clk_disable_unprepare(dev->clk);
-
- v4l2_ctrl_handler_free(&dev->hdl);
- video_unregister_device(dev->vdev);
- i2c_put_adapter(client->adapter);
- v4l2_device_unregister(&dev->v4l2_dev);
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int viu_suspend(struct platform_device *op, pm_message_t state)
-{
- struct v4l2_device *v4l2_dev = platform_get_drvdata(op);
- struct viu_dev *dev = container_of(v4l2_dev, struct viu_dev, v4l2_dev);
-
- clk_disable(dev->clk);
- return 0;
-}
-
-static int viu_resume(struct platform_device *op)
-{
- struct v4l2_device *v4l2_dev = platform_get_drvdata(op);
- struct viu_dev *dev = container_of(v4l2_dev, struct viu_dev, v4l2_dev);
-
- clk_enable(dev->clk);
- return 0;
-}
-#endif
-
-/*
- * Initialization and module stuff
- */
-static const struct of_device_id mpc512x_viu_of_match[] = {
- {
- .compatible = "fsl,mpc5121-viu",
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, mpc512x_viu_of_match);
-
-static struct platform_driver viu_of_platform_driver = {
- .probe = viu_of_probe,
- .remove = viu_of_remove,
-#ifdef CONFIG_PM
- .suspend = viu_suspend,
- .resume = viu_resume,
-#endif
- .driver = {
- .name = DRV_NAME,
- .of_match_table = mpc512x_viu_of_match,
- },
-};
-
-module_platform_driver(viu_of_platform_driver);
-
-MODULE_DESCRIPTION("Freescale Video-In(VIU)");
-MODULE_AUTHOR("Hongjun Chen");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(VIU_VERSION);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1.h b/drivers/media/platform/renesas/vsp1/vsp1.h
index 37cf33c7e6ca..2f6f0c6ae555 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1.h
@@ -22,6 +22,7 @@
struct clk;
struct device;
struct rcar_fcp_device;
+struct reset_control;
struct vsp1_drm;
struct vsp1_entity;
@@ -54,6 +55,7 @@ struct vsp1_uif;
#define VSP1_HAS_HGT BIT(8)
#define VSP1_HAS_BRS BIT(9)
#define VSP1_HAS_EXT_DL BIT(10)
+#define VSP1_HAS_NON_ZERO_LBA BIT(11)
struct vsp1_device_info {
u32 version;
@@ -66,6 +68,7 @@ struct vsp1_device_info {
unsigned int uif_count;
unsigned int wpf_count;
unsigned int num_bru_inputs;
+ u8 soc;
bool uapi;
};
@@ -79,6 +82,7 @@ struct vsp1_device {
void __iomem *mmio;
struct rcar_fcp_device *fcp;
struct device *bus_master;
+ struct reset_control *rstc;
struct vsp1_brx *brs;
struct vsp1_brx *bru;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drm.c b/drivers/media/platform/renesas/vsp1/vsp1_drm.c
index 0c2507dc03d6..c6f25200982c 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drm.c
@@ -856,6 +856,8 @@ int vsp1_du_atomic_update(struct device *dev, unsigned int pipe_index,
rpf->mem.addr[1] = cfg->mem[1];
rpf->mem.addr[2] = cfg->mem[2];
+ rpf->format.flags = cfg->premult ? V4L2_PIX_FMT_FLAG_PREMUL_ALPHA : 0;
+
vsp1->drm->inputs[rpf_index].crop = cfg->src;
vsp1->drm->inputs[rpf_index].compose = cfg->dst;
vsp1->drm->inputs[rpf_index].zpos = cfg->zpos;
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_drv.c b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
index 1f73c48eb738..c260d318d298 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_drv.c
@@ -16,6 +16,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/videodev2.h>
#include <media/rcar-fcp.h>
@@ -622,6 +623,7 @@ static int __maybe_unused vsp1_pm_runtime_suspend(struct device *dev)
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
rcar_fcp_disable(vsp1->fcp);
+ reset_control_assert(vsp1->rstc);
return 0;
}
@@ -631,13 +633,31 @@ static int __maybe_unused vsp1_pm_runtime_resume(struct device *dev)
struct vsp1_device *vsp1 = dev_get_drvdata(dev);
int ret;
+ ret = reset_control_deassert(vsp1->rstc);
+ if (ret < 0)
+ return ret;
+
if (vsp1->info) {
+ /*
+ * On R-Car Gen2 and RZ/G1, vsp1 register access after deassert
+ * can cause lock-up. It is a special case and needs some delay
+ * to avoid this lock-up.
+ */
+ if (vsp1->info->gen == 2)
+ udelay(1);
+
ret = vsp1_device_init(vsp1);
if (ret < 0)
- return ret;
+ goto done;
}
- return rcar_fcp_enable(vsp1->fcp);
+ ret = rcar_fcp_enable(vsp1->fcp);
+
+done:
+ if (ret < 0)
+ reset_control_assert(vsp1->rstc);
+
+ return ret;
}
static const struct dev_pm_ops vsp1_pm_ops = {
@@ -768,6 +788,7 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
}, {
.version = VI6_IP_VERSION_MODEL_VSPD_V3,
.model = "VSP2-D",
+ .soc = VI6_IP_VERSION_SOC_V3H,
.gen = 3,
.features = VSP1_HAS_BRS | VSP1_HAS_BRU,
.lif_count = 1,
@@ -776,6 +797,17 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.wpf_count = 1,
.num_bru_inputs = 5,
}, {
+ .version = VI6_IP_VERSION_MODEL_VSPD_V3,
+ .model = "VSP2-D",
+ .soc = VI6_IP_VERSION_SOC_V3M,
+ .gen = 3,
+ .features = VSP1_HAS_BRS | VSP1_HAS_BRU | VSP1_HAS_NON_ZERO_LBA,
+ .lif_count = 1,
+ .rpf_count = 5,
+ .uif_count = 1,
+ .wpf_count = 1,
+ .num_bru_inputs = 5,
+ }, {
.version = VI6_IP_VERSION_MODEL_VSPDL_GEN3,
.model = "VSP2-DL",
.gen = 3,
@@ -798,11 +830,55 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
},
};
+static const struct vsp1_device_info rzg2l_vsp2_device_info = {
+ .version = VI6_IP_VERSION_MODEL_VSPD_RZG2L,
+ .model = "VSP2-D",
+ .soc = VI6_IP_VERSION_SOC_RZG2L,
+ .gen = 3,
+ .features = VSP1_HAS_BRS | VSP1_HAS_WPF_VFLIP | VSP1_HAS_EXT_DL
+ | VSP1_HAS_NON_ZERO_LBA,
+ .lif_count = 1,
+ .rpf_count = 2,
+ .wpf_count = 1,
+};
+
+static const struct vsp1_device_info *vsp1_lookup_info(struct vsp1_device *vsp1)
+{
+ const struct vsp1_device_info *info;
+ unsigned int i;
+ u32 model;
+ u32 soc;
+
+ /*
+ * Try the info stored in match data first for devices that don't have
+ * a version register.
+ */
+ info = of_device_get_match_data(vsp1->dev);
+ if (info) {
+ vsp1->version = VI6_IP_VERSION_VSP_SW | info->version | info->soc;
+ return info;
+ }
+
+ vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION);
+ model = vsp1->version & VI6_IP_VERSION_MODEL_MASK;
+ soc = vsp1->version & VI6_IP_VERSION_SOC_MASK;
+
+ for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) {
+ info = &vsp1_device_infos[i];
+
+ if (model == info->version && (!info->soc || soc == info->soc))
+ return info;
+ }
+
+ dev_err(vsp1->dev, "unsupported IP version 0x%08x\n", vsp1->version);
+
+ return NULL;
+}
+
static int vsp1_probe(struct platform_device *pdev)
{
struct vsp1_device *vsp1;
struct device_node *fcp_node;
- unsigned int i;
int ret;
int irq;
@@ -825,6 +901,11 @@ static int vsp1_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
+ vsp1->rstc = devm_reset_control_get_shared(&pdev->dev, NULL);
+ if (IS_ERR(vsp1->rstc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(vsp1->rstc),
+ "failed to get reset control\n");
+
/* FCP (optional). */
fcp_node = of_parse_phandle(pdev->dev.of_node, "renesas,fcp", 0);
if (fcp_node) {
@@ -853,19 +934,8 @@ static int vsp1_probe(struct platform_device *pdev)
if (ret < 0)
goto done;
- vsp1->version = vsp1_read(vsp1, VI6_IP_VERSION);
-
- for (i = 0; i < ARRAY_SIZE(vsp1_device_infos); ++i) {
- if ((vsp1->version & VI6_IP_VERSION_MODEL_MASK) ==
- vsp1_device_infos[i].version) {
- vsp1->info = &vsp1_device_infos[i];
- break;
- }
- }
-
+ vsp1->info = vsp1_lookup_info(vsp1);
if (!vsp1->info) {
- dev_err(&pdev->dev, "unsupported IP version 0x%08x\n",
- vsp1->version);
vsp1_device_put(vsp1);
ret = -ENXIO;
goto done;
@@ -922,6 +992,7 @@ static int vsp1_remove(struct platform_device *pdev)
static const struct of_device_id vsp1_of_match[] = {
{ .compatible = "renesas,vsp1" },
{ .compatible = "renesas,vsp2" },
+ { .compatible = "renesas,r9a07g044-vsp2", .data = &rzg2l_vsp2_device_info },
{ },
};
MODULE_DEVICE_TABLE(of, vsp1_of_match);
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_lif.c b/drivers/media/platform/renesas/vsp1/vsp1_lif.c
index 6a6857ac9327..186a5730e1e3 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_lif.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_lif.c
@@ -107,6 +107,7 @@ static void lif_configure_stream(struct vsp1_entity *entity,
case VI6_IP_VERSION_MODEL_VSPDL_GEN3:
case VI6_IP_VERSION_MODEL_VSPD_V3:
+ case VI6_IP_VERSION_MODEL_VSPD_RZG2L:
hbth = 0;
obth = 1500;
lbth = 0;
@@ -130,13 +131,12 @@ static void lif_configure_stream(struct vsp1_entity *entity,
VI6_LIF_CTRL_REQSEL | VI6_LIF_CTRL_LIF_EN);
/*
- * On R-Car V3M the LIF0 buffer attribute register has to be set to a
- * non-default value to guarantee proper operation (otherwise artifacts
- * may appear on the output). The value required by the manual is not
- * explained but is likely a buffer size or threshold.
+ * On R-Car V3M and RZ/G2L the LIF0 buffer attribute register has to be
+ * set to a non-default value to guarantee proper operation (otherwise
+ * artifacts may appear on the output). The value required by the
+ * manual is not explained but is likely a buffer size or threshold.
*/
- if ((entity->vsp1->version & VI6_IP_VERSION_MASK) ==
- (VI6_IP_VERSION_MODEL_VSPD_V3 | VI6_IP_VERSION_SOC_V3M))
+ if (vsp1_feature(entity->vsp1, VSP1_HAS_NON_ZERO_LBA))
vsp1_lif_write(lif, dlb, VI6_LIF_LBA,
VI6_LIF_LBA_LBA0 |
(1536 << VI6_LIF_LBA_LBA1_SHIFT));
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_regs.h b/drivers/media/platform/renesas/vsp1/vsp1_regs.h
index fae7286eb01e..8928f4c6bb55 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/renesas/vsp1/vsp1_regs.h
@@ -767,6 +767,8 @@
#define VI6_IP_VERSION_MODEL_VSPDL_GEN3 (0x19 << 8)
#define VI6_IP_VERSION_MODEL_VSPBS_GEN3 (0x1a << 8)
#define VI6_IP_VERSION_MODEL_VSPD_V3U (0x1c << 8)
+/* RZ/G2L SoCs have no version register, So use 0x80 as the model version */
+#define VI6_IP_VERSION_MODEL_VSPD_RZG2L (0x80 << 8)
#define VI6_IP_VERSION_SOC_MASK (0xff << 0)
#define VI6_IP_VERSION_SOC_H2 (0x01 << 0)
@@ -780,6 +782,10 @@
#define VI6_IP_VERSION_SOC_M3N (0x04 << 0)
#define VI6_IP_VERSION_SOC_E3 (0x04 << 0)
#define VI6_IP_VERSION_SOC_V3U (0x05 << 0)
+/* RZ/G2L SoCs have no version register, So use 0x80 for SoC Identification */
+#define VI6_IP_VERSION_SOC_RZG2L (0x80 << 0)
+
+#define VI6_IP_VERSION_VSP_SW (0xfffe << 16) /* SW VSP version */
/* -----------------------------------------------------------------------------
* RPF CLUT Registers
diff --git a/drivers/media/platform/renesas/vsp1/vsp1_video.c b/drivers/media/platform/renesas/vsp1/vsp1_video.c
index e8e0ee5f2277..df1606b49d77 100644
--- a/drivers/media/platform/renesas/vsp1/vsp1_video.c
+++ b/drivers/media/platform/renesas/vsp1/vsp1_video.c
@@ -305,7 +305,7 @@ static int vsp1_video_pipeline_setup_partitions(struct vsp1_pipeline *pipe)
* @video: the video node
*
* This function completes the current buffer by filling its sequence number,
- * time stamp and payload size, and hands it back to the videobuf core.
+ * time stamp and payload size, and hands it back to the vb2 core.
*
* Return the next queued buffer or NULL if the queue is empty.
*/
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index 2f8df74ad0fd..61b25fcf826e 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -816,7 +816,7 @@ static int rga_probe(struct platform_device *pdev)
ret = rga_parse_dt(rga);
if (ret)
- dev_err(&pdev->dev, "Unable to parse OF data\n");
+ return dev_err_probe(&pdev->dev, ret, "Unable to parse OF data\n");
pm_runtime_enable(rga->dev);
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-core.h b/drivers/media/platform/samsung/exynos4-is/fimc-core.h
index 7a058f3e6298..2b0760add092 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-core.h
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-core.h
@@ -215,7 +215,7 @@ struct fimc_addr {
/**
* struct fimc_vid_buffer - the driver's video buffer
- * @vb: v4l videobuf buffer
+ * @vb: v4l vb2 buffer
* @list: linked list structure for buffer queue
* @addr: precalculated DMA address set
* @index: buffer index for the output DMA engine
diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-is.c b/drivers/media/platform/samsung/exynos4-is/fimc-is.c
index e3072d69c49f..a7704ff069d6 100644
--- a/drivers/media/platform/samsung/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/samsung/exynos4-is/fimc-is.c
@@ -213,6 +213,7 @@ static int fimc_is_register_subdevs(struct fimc_is *is)
if (ret < 0 || index >= FIMC_IS_SENSORS_NUM) {
of_node_put(child);
+ of_node_put(i2c_bus);
return ret;
}
index++;
diff --git a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
index 761341934925..fca5c6405eec 100644
--- a/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/samsung/s5p-mfc/s5p_mfc.c
@@ -323,7 +323,7 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
}
ctx->sequence++;
/* The MFC returns address of the buffer, now we have to
- * check which videobuf does it correspond to */
+ * check which vb2_buffer does it correspond to */
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
u32 addr = (u32)vb2_dma_contig_plane_dma_addr(&dst_buf->b->vb2_buf, 0);
@@ -1399,6 +1399,7 @@ static int s5p_mfc_probe(struct platform_device *pdev)
/* Deinit MFC if probe had failed */
err_enc_reg:
video_unregister_device(dev->vfd_dec);
+ dev->vfd_dec = NULL;
err_dec_reg:
video_device_release(dev->vfd_enc);
err_enc_alloc:
@@ -1444,8 +1445,6 @@ static int s5p_mfc_remove(struct platform_device *pdev)
video_unregister_device(dev->vfd_enc);
video_unregister_device(dev->vfd_dec);
- video_device_release(dev->vfd_enc);
- video_device_release(dev->vfd_dec);
v4l2_device_unregister(&dev->v4l2_dev);
s5p_mfc_unconfigure_dma_memory(dev);
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
index 1d46e113d01d..74d64a20ba5b 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
@@ -177,7 +177,7 @@ static int sun6i_video_start_streaming(struct vb2_queue *vq, unsigned int count)
/*
* CSI will lookup the next dma buffer for next frame before the
- * the current frame done IRQ triggered. This is not documented
+ * current frame done IRQ triggered. This is not documented
* but reported by Ondřej Jirman.
* The BSP code has workaround for this too. It skip to mark the
* first buffer as frame done for VB2 and pass the second buffer
diff --git a/drivers/media/platform/ti/am437x/am437x-vpfe.h b/drivers/media/platform/ti/am437x/am437x-vpfe.h
index 05ee37db0273..f8b4e917b91a 100644
--- a/drivers/media/platform/ti/am437x/am437x-vpfe.h
+++ b/drivers/media/platform/ti/am437x/am437x-vpfe.h
@@ -267,7 +267,7 @@ struct vpfe_device {
* is different from the image window
*/
struct v4l2_rect crop;
- /* Buffer queue used in video-buf */
+ /* Buffer queue used in vb2 */
struct vb2_queue buffer_queue;
/* Queue of filled frames */
struct list_head dma_queue;
diff --git a/drivers/media/platform/ti/cal/cal-camerarx.c b/drivers/media/platform/ti/cal/cal-camerarx.c
index e136d70b4048..16ae52879a79 100644
--- a/drivers/media/platform/ti/cal/cal-camerarx.c
+++ b/drivers/media/platform/ti/cal/cal-camerarx.c
@@ -622,12 +622,12 @@ static inline struct cal_camerarx *to_cal_camerarx(struct v4l2_subdev *sd)
static struct v4l2_mbus_framefmt *
cal_camerarx_get_pad_format(struct cal_camerarx *phy,
- struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_state *state,
unsigned int pad, u32 which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
- return v4l2_subdev_get_try_format(&phy->subdev, sd_state, pad);
+ return v4l2_subdev_get_try_format(&phy->subdev, state, pad);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &phy->formats[pad];
default:
@@ -653,7 +653,7 @@ static int cal_camerarx_sd_s_stream(struct v4l2_subdev *sd, int enable)
}
static int cal_camerarx_sd_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_mbus_code_enum *code)
{
struct cal_camerarx *phy = to_cal_camerarx(sd);
@@ -670,7 +670,7 @@ static int cal_camerarx_sd_enum_mbus_code(struct v4l2_subdev *sd,
goto out;
}
- fmt = cal_camerarx_get_pad_format(phy, sd_state,
+ fmt = cal_camerarx_get_pad_format(phy, state,
CAL_CAMERARX_PAD_SINK,
code->which);
code->code = fmt->code;
@@ -690,7 +690,7 @@ out:
}
static int cal_camerarx_sd_enum_frame_size(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_frame_size_enum *fse)
{
struct cal_camerarx *phy = to_cal_camerarx(sd);
@@ -706,7 +706,7 @@ static int cal_camerarx_sd_enum_frame_size(struct v4l2_subdev *sd,
if (cal_rx_pad_is_source(fse->pad)) {
struct v4l2_mbus_framefmt *fmt;
- fmt = cal_camerarx_get_pad_format(phy, sd_state,
+ fmt = cal_camerarx_get_pad_format(phy, state,
CAL_CAMERARX_PAD_SINK,
fse->which);
if (fse->code != fmt->code) {
@@ -738,7 +738,7 @@ out:
}
static int cal_camerarx_sd_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
struct cal_camerarx *phy = to_cal_camerarx(sd);
@@ -746,7 +746,7 @@ static int cal_camerarx_sd_get_fmt(struct v4l2_subdev *sd,
mutex_lock(&phy->mutex);
- fmt = cal_camerarx_get_pad_format(phy, sd_state, format->pad,
+ fmt = cal_camerarx_get_pad_format(phy, state, format->pad,
format->which);
format->format = *fmt;
@@ -756,7 +756,7 @@ static int cal_camerarx_sd_get_fmt(struct v4l2_subdev *sd,
}
static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_state *state,
struct v4l2_subdev_format *format)
{
struct cal_camerarx *phy = to_cal_camerarx(sd);
@@ -766,7 +766,7 @@ static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd,
/* No transcoding, source and sink formats must match. */
if (cal_rx_pad_is_source(format->pad))
- return cal_camerarx_sd_get_fmt(sd, sd_state, format);
+ return cal_camerarx_sd_get_fmt(sd, state, format);
/*
* Default to the first format if the requested media bus code isn't
@@ -792,12 +792,12 @@ static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd,
mutex_lock(&phy->mutex);
- fmt = cal_camerarx_get_pad_format(phy, sd_state,
+ fmt = cal_camerarx_get_pad_format(phy, state,
CAL_CAMERARX_PAD_SINK,
format->which);
*fmt = format->format;
- fmt = cal_camerarx_get_pad_format(phy, sd_state,
+ fmt = cal_camerarx_get_pad_format(phy, state,
CAL_CAMERARX_PAD_FIRST_SOURCE,
format->which);
*fmt = format->format;
@@ -808,10 +808,10 @@ static int cal_camerarx_sd_set_fmt(struct v4l2_subdev *sd,
}
static int cal_camerarx_sd_init_cfg(struct v4l2_subdev *sd,
- struct v4l2_subdev_state *sd_state)
+ struct v4l2_subdev_state *state)
{
struct v4l2_subdev_format format = {
- .which = sd_state ? V4L2_SUBDEV_FORMAT_TRY
+ .which = state ? V4L2_SUBDEV_FORMAT_TRY
: V4L2_SUBDEV_FORMAT_ACTIVE,
.pad = CAL_CAMERARX_PAD_SINK,
.format = {
@@ -826,7 +826,7 @@ static int cal_camerarx_sd_init_cfg(struct v4l2_subdev *sd,
},
};
- return cal_camerarx_sd_set_fmt(sd, sd_state, &format);
+ return cal_camerarx_sd_set_fmt(sd, state, &format);
}
static const struct v4l2_subdev_video_ops cal_camerarx_video_ops = {
@@ -871,6 +871,7 @@ struct cal_camerarx *cal_camerarx_create(struct cal_dev *cal,
phy->cal = cal;
phy->instance = instance;
+ spin_lock_init(&phy->vc_lock);
mutex_init(&phy->mutex);
phy->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
diff --git a/drivers/media/platform/ti/cal/cal-video.c b/drivers/media/platform/ti/cal/cal-video.c
index 776da0cfcdbe..21e3d0aabf70 100644
--- a/drivers/media/platform/ti/cal/cal-video.c
+++ b/drivers/media/platform/ti/cal/cal-video.c
@@ -191,7 +191,7 @@ static int cal_legacy_try_fmt_vid_cap(struct file *file, void *priv,
struct cal_ctx *ctx = video_drvdata(file);
const struct cal_format_info *fmtinfo;
struct v4l2_subdev_frame_size_enum fse;
- int ret, found;
+ int found;
fmtinfo = find_format_by_pix(ctx, f->fmt.pix.pixelformat);
if (!fmtinfo) {
@@ -206,12 +206,13 @@ static int cal_legacy_try_fmt_vid_cap(struct file *file, void *priv,
f->fmt.pix.field = ctx->v_fmt.fmt.pix.field;
/* check for/find a valid width/height */
- ret = 0;
found = false;
fse.pad = 0;
fse.code = fmtinfo->code;
fse.which = V4L2_SUBDEV_FORMAT_ACTIVE;
for (fse.index = 0; ; fse.index++) {
+ int ret;
+
ret = v4l2_subdev_call(ctx->phy->source, pad, enum_frame_size,
NULL, &fse);
if (ret)
diff --git a/drivers/media/platform/ti/cal/cal.c b/drivers/media/platform/ti/cal/cal.c
index 425b4f4b7ed7..56b61c0583cf 100644
--- a/drivers/media/platform/ti/cal/cal.c
+++ b/drivers/media/platform/ti/cal/cal.c
@@ -543,7 +543,22 @@ void cal_ctx_unprepare(struct cal_ctx *ctx)
void cal_ctx_start(struct cal_ctx *ctx)
{
- ctx->sequence = 0;
+ struct cal_camerarx *phy = ctx->phy;
+
+ /*
+ * Reset the frame number & sequence number, but only if the
+ * virtual channel is not already in use.
+ */
+
+ spin_lock(&phy->vc_lock);
+
+ if (phy->vc_enable_count[ctx->vc]++ == 0) {
+ phy->vc_frame_number[ctx->vc] = 0;
+ phy->vc_sequence[ctx->vc] = 0;
+ }
+
+ spin_unlock(&phy->vc_lock);
+
ctx->dma.state = CAL_DMA_RUNNING;
/* Configure the CSI-2, pixel processing and write DMA contexts. */
@@ -563,8 +578,15 @@ void cal_ctx_start(struct cal_ctx *ctx)
void cal_ctx_stop(struct cal_ctx *ctx)
{
+ struct cal_camerarx *phy = ctx->phy;
long timeout;
+ WARN_ON(phy->vc_enable_count[ctx->vc] == 0);
+
+ spin_lock(&phy->vc_lock);
+ phy->vc_enable_count[ctx->vc]--;
+ spin_unlock(&phy->vc_lock);
+
/*
* Request DMA stop and wait until it completes. If completion times
* out, forcefully disable the DMA.
@@ -601,6 +623,34 @@ void cal_ctx_stop(struct cal_ctx *ctx)
* ------------------------------------------------------------------
*/
+/*
+ * Track a sequence number for each virtual channel, which is shared by
+ * all contexts using the same virtual channel. This is done using the
+ * CSI-2 frame number as a base.
+ */
+static void cal_update_seq_number(struct cal_ctx *ctx)
+{
+ struct cal_dev *cal = ctx->cal;
+ struct cal_camerarx *phy = ctx->phy;
+ u16 prev_frame_num, frame_num;
+ u8 vc = ctx->vc;
+
+ frame_num =
+ cal_read(cal, CAL_CSI2_STATUS(phy->instance, ctx->csi2_ctx)) &
+ 0xffff;
+
+ if (phy->vc_frame_number[vc] != frame_num) {
+ prev_frame_num = phy->vc_frame_number[vc];
+
+ if (prev_frame_num >= frame_num)
+ phy->vc_sequence[vc] += 1;
+ else
+ phy->vc_sequence[vc] += frame_num - prev_frame_num;
+
+ phy->vc_frame_number[vc] = frame_num;
+ }
+}
+
static inline void cal_irq_wdma_start(struct cal_ctx *ctx)
{
spin_lock(&ctx->dma.lock);
@@ -631,6 +681,8 @@ static inline void cal_irq_wdma_start(struct cal_ctx *ctx)
}
spin_unlock(&ctx->dma.lock);
+
+ cal_update_seq_number(ctx);
}
static inline void cal_irq_wdma_end(struct cal_ctx *ctx)
@@ -657,27 +709,62 @@ static inline void cal_irq_wdma_end(struct cal_ctx *ctx)
if (buf) {
buf->vb.vb2_buf.timestamp = ktime_get_ns();
buf->vb.field = ctx->v_fmt.fmt.pix.field;
- buf->vb.sequence = ctx->sequence++;
+ buf->vb.sequence = ctx->phy->vc_sequence[ctx->vc];
+
vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
}
}
+static void cal_irq_handle_wdma(struct cal_ctx *ctx, bool start, bool end)
+{
+ /*
+ * CAL HW interrupts are inherently racy. If we get both start and end
+ * interrupts, we don't know what has happened: did the DMA for a single
+ * frame start and end, or did one frame end and a new frame start?
+ *
+ * Usually for normal pixel frames we get the interrupts separately. If
+ * we do get both, we have to guess. The assumption in the code below is
+ * that the active vertical area is larger than the blanking vertical
+ * area, and thus it is more likely that we get the end of the old frame
+ * and the start of a new frame.
+ *
+ * However, for embedded data, which is only a few lines high, we always
+ * get both interrupts. Here the assumption is that we get both for the
+ * same frame.
+ */
+ if (ctx->v_fmt.fmt.pix.height < 10) {
+ if (start)
+ cal_irq_wdma_start(ctx);
+
+ if (end)
+ cal_irq_wdma_end(ctx);
+ } else {
+ if (end)
+ cal_irq_wdma_end(ctx);
+
+ if (start)
+ cal_irq_wdma_start(ctx);
+ }
+}
+
static irqreturn_t cal_irq(int irq_cal, void *data)
{
struct cal_dev *cal = data;
- u32 status;
-
- status = cal_read(cal, CAL_HL_IRQSTATUS(0));
- if (status) {
- unsigned int i;
+ u32 status[3];
+ unsigned int i;
- cal_write(cal, CAL_HL_IRQSTATUS(0), status);
+ for (i = 0; i < 3; ++i) {
+ status[i] = cal_read(cal, CAL_HL_IRQSTATUS(i));
+ if (status[i])
+ cal_write(cal, CAL_HL_IRQSTATUS(i), status[i]);
+ }
- if (status & CAL_HL_IRQ_OCPO_ERR_MASK)
+ if (status[0]) {
+ if (status[0] & CAL_HL_IRQ_OCPO_ERR_MASK)
dev_err_ratelimited(cal->dev, "OCPO ERROR\n");
for (i = 0; i < cal->data->num_csi2_phy; ++i) {
- if (status & CAL_HL_IRQ_CIO_MASK(i)) {
+ if (status[0] & CAL_HL_IRQ_CIO_MASK(i)) {
u32 cio_stat = cal_read(cal,
CAL_CSI2_COMPLEXIO_IRQSTATUS(i));
@@ -688,7 +775,7 @@ static irqreturn_t cal_irq(int irq_cal, void *data)
cio_stat);
}
- if (status & CAL_HL_IRQ_VC_MASK(i)) {
+ if (status[0] & CAL_HL_IRQ_VC_MASK(i)) {
u32 vc_stat = cal_read(cal, CAL_CSI2_VC_IRQSTATUS(i));
dev_err_ratelimited(cal->dev,
@@ -700,32 +787,12 @@ static irqreturn_t cal_irq(int irq_cal, void *data)
}
}
- /* Check which DMA just finished */
- status = cal_read(cal, CAL_HL_IRQSTATUS(1));
- if (status) {
- unsigned int i;
-
- /* Clear Interrupt status */
- cal_write(cal, CAL_HL_IRQSTATUS(1), status);
-
- for (i = 0; i < cal->num_contexts; ++i) {
- if (status & CAL_HL_IRQ_WDMA_END_MASK(i))
- cal_irq_wdma_end(cal->ctx[i]);
- }
- }
-
- /* Check which DMA just started */
- status = cal_read(cal, CAL_HL_IRQSTATUS(2));
- if (status) {
- unsigned int i;
-
- /* Clear Interrupt status */
- cal_write(cal, CAL_HL_IRQSTATUS(2), status);
+ for (i = 0; i < cal->num_contexts; ++i) {
+ bool end = !!(status[1] & CAL_HL_IRQ_WDMA_END_MASK(i));
+ bool start = !!(status[2] & CAL_HL_IRQ_WDMA_START_MASK(i));
- for (i = 0; i < cal->num_contexts; ++i) {
- if (status & CAL_HL_IRQ_WDMA_START_MASK(i))
- cal_irq_wdma_start(cal->ctx[i]);
- }
+ if (start || end)
+ cal_irq_handle_wdma(cal->ctx[i], start, end);
}
return IRQ_HANDLED;
diff --git a/drivers/media/platform/ti/cal/cal.h b/drivers/media/platform/ti/cal/cal.h
index 61409ddced98..80f2c9c73c71 100644
--- a/drivers/media/platform/ti/cal/cal.h
+++ b/drivers/media/platform/ti/cal/cal.h
@@ -180,6 +180,12 @@ struct cal_camerarx {
struct media_pad pads[CAL_CAMERARX_NUM_PADS];
struct v4l2_mbus_framefmt formats[CAL_CAMERARX_NUM_PADS];
+ /* protects the vc_* fields below */
+ spinlock_t vc_lock;
+ u8 vc_enable_count[4];
+ u16 vc_frame_number[4];
+ u32 vc_sequence[4];
+
/*
* Lock for camerarx ops. Protects:
* - formats
@@ -242,7 +248,6 @@ struct cal_ctx {
const struct cal_format_info **active_fmt;
unsigned int num_active_fmt;
- unsigned int sequence;
struct vb2_queue vb_vidq;
u8 dma_ctx;
u8 cport;
diff --git a/drivers/media/platform/ti/davinci/Kconfig b/drivers/media/platform/ti/davinci/Kconfig
index c61e697aeb12..96d4bed7fe9e 100644
--- a/drivers/media/platform/ti/davinci/Kconfig
+++ b/drivers/media/platform/ti/davinci/Kconfig
@@ -32,55 +32,6 @@ config VIDEO_DAVINCI_VPIF_CAPTURE
To compile this driver as a module, choose M here. There will
be two modules called vpif.ko and vpif_capture.ko
-config VIDEO_DM6446_CCDC
- tristate "TI DM6446 CCDC video capture driver"
- depends on V4L_PLATFORM_DRIVERS
- depends on VIDEO_DEV
- depends on ARCH_DAVINCI || COMPILE_TEST
- depends on I2C
- select VIDEOBUF_DMA_CONTIG
- help
- Enables DaVinci CCD hw module. DaVinci CCDC hw interfaces
- with decoder modules such as TVP5146 over BT656 or
- sensor module such as MT9T001 over a raw interface. This
- module configures the interface and CCDC/ISIF to do
- video frame capture from slave decoders.
-
- To compile this driver as a module, choose M here. There will
- be three modules called vpfe_capture.ko, vpss.ko and dm644x_ccdc.ko
-
-config VIDEO_DM355_CCDC
- tristate "TI DM355 CCDC video capture driver"
- depends on V4L_PLATFORM_DRIVERS
- depends on VIDEO_DEV
- depends on ARCH_DAVINCI || COMPILE_TEST
- depends on I2C
- select VIDEOBUF_DMA_CONTIG
- help
- Enables DM355 CCD hw module. DM355 CCDC hw interfaces
- with decoder modules such as TVP5146 over BT656 or
- sensor module such as MT9T001 over a raw interface. This
- module configures the interface and CCDC/ISIF to do
- video frame capture from a slave decoders
-
- To compile this driver as a module, choose M here. There will
- be three modules called vpfe_capture.ko, vpss.ko and dm355_ccdc.ko
-
-config VIDEO_DM365_ISIF
- tristate "TI DM365 ISIF video capture driver"
- depends on V4L_PLATFORM_DRIVERS
- depends on VIDEO_DEV
- depends on ARCH_DAVINCI || COMPILE_TEST
- depends on I2C
- select VIDEOBUF_DMA_CONTIG
- help
- Enables ISIF hw module. This is the hardware module for
- configuring ISIF in VPFE to capture Raw Bayer RGB data from
- a image sensor or YUV data from a YUV source.
-
- To compile this driver as a module, choose M here. There will
- be three modules called vpfe_capture.ko, vpss.ko and isif.ko
-
config VIDEO_DAVINCI_VPBE_DISPLAY
tristate "TI DaVinci VPBE V4L2-Display driver"
depends on V4L_PLATFORM_DRIVERS
diff --git a/drivers/media/platform/ti/davinci/Makefile b/drivers/media/platform/ti/davinci/Makefile
index 05c45bf371aa..b20a91653162 100644
--- a/drivers/media/platform/ti/davinci/Makefile
+++ b/drivers/media/platform/ti/davinci/Makefile
@@ -8,9 +8,5 @@ obj-$(CONFIG_VIDEO_DAVINCI_VPIF_DISPLAY) += vpif.o vpif_display.o
#VPIF Capture driver
obj-$(CONFIG_VIDEO_DAVINCI_VPIF_CAPTURE) += vpif.o vpif_capture.o
-# Capture: DM6446 and DM355
-obj-$(CONFIG_VIDEO_DM6446_CCDC) += vpfe_capture.o vpss.o dm644x_ccdc.o
-obj-$(CONFIG_VIDEO_DM355_CCDC) += vpfe_capture.o vpss.o dm355_ccdc.o
-obj-$(CONFIG_VIDEO_DM365_ISIF) += vpfe_capture.o vpss.o isif.o
obj-$(CONFIG_VIDEO_DAVINCI_VPBE_DISPLAY) += vpss.o vpbe.o vpbe_osd.o \
vpbe_venc.o vpbe_display.o
diff --git a/drivers/media/platform/ti/davinci/ccdc_hw_device.h b/drivers/media/platform/ti/davinci/ccdc_hw_device.h
deleted file mode 100644
index a545052a95a9..000000000000
--- a/drivers/media/platform/ti/davinci/ccdc_hw_device.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2008-2009 Texas Instruments Inc
- *
- * ccdc device API
- */
-#ifndef _CCDC_HW_DEVICE_H
-#define _CCDC_HW_DEVICE_H
-
-#ifdef __KERNEL__
-#include <linux/videodev2.h>
-#include <linux/device.h>
-#include <media/davinci/vpfe_types.h>
-#include <media/davinci/ccdc_types.h>
-
-/*
- * ccdc hw operations
- */
-struct ccdc_hw_ops {
- /* Pointer to initialize function to initialize ccdc device */
- int (*open) (struct device *dev);
- /* Pointer to deinitialize function */
- int (*close) (struct device *dev);
- /* set ccdc base address */
- void (*set_ccdc_base)(void *base, int size);
- /* Pointer to function to enable or disable ccdc */
- void (*enable) (int en);
- /* reset sbl. only for 6446 */
- void (*reset) (void);
- /* enable output to sdram */
- void (*enable_out_to_sdram) (int en);
- /* Pointer to function to set hw parameters */
- int (*set_hw_if_params) (struct vpfe_hw_if_param *param);
- /* get interface parameters */
- int (*get_hw_if_params) (struct vpfe_hw_if_param *param);
- /* Pointer to function to configure ccdc */
- int (*configure) (void);
-
- /* Pointer to function to set buffer type */
- int (*set_buftype) (enum ccdc_buftype buf_type);
- /* Pointer to function to get buffer type */
- enum ccdc_buftype (*get_buftype) (void);
- /* Pointer to function to set frame format */
- int (*set_frame_format) (enum ccdc_frmfmt frm_fmt);
- /* Pointer to function to get frame format */
- enum ccdc_frmfmt (*get_frame_format) (void);
- /* enumerate hw pix formats */
- int (*enum_pix)(u32 *hw_pix, int i);
- /* Pointer to function to set buffer type */
- u32 (*get_pixel_format) (void);
- /* Pointer to function to get pixel format. */
- int (*set_pixel_format) (u32 pixfmt);
- /* Pointer to function to set image window */
- int (*set_image_window) (struct v4l2_rect *win);
- /* Pointer to function to set image window */
- void (*get_image_window) (struct v4l2_rect *win);
- /* Pointer to function to get line length */
- unsigned int (*get_line_length) (void);
-
- /* Pointer to function to set frame buffer address */
- void (*setfbaddr) (unsigned long addr);
- /* Pointer to function to get field id */
- int (*getfid) (void);
-};
-
-struct ccdc_hw_device {
- /* ccdc device name */
- char name[32];
- /* module owner */
- struct module *owner;
- /* hw ops */
- struct ccdc_hw_ops hw_ops;
-};
-
-/* Used by CCDC module to register & unregister with vpfe capture driver */
-int vpfe_register_ccdc_device(const struct ccdc_hw_device *dev);
-void vpfe_unregister_ccdc_device(const struct ccdc_hw_device *dev);
-
-#endif
-#endif
diff --git a/drivers/media/platform/ti/davinci/dm355_ccdc.c b/drivers/media/platform/ti/davinci/dm355_ccdc.c
deleted file mode 100644
index 8fe55d1b972c..000000000000
--- a/drivers/media/platform/ti/davinci/dm355_ccdc.c
+++ /dev/null
@@ -1,934 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2005-2009 Texas Instruments Inc
- *
- * CCDC hardware module for DM355
- * ------------------------------
- *
- * This module is for configuring DM355 CCD controller of VPFE to capture
- * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules
- * such as Defect Pixel Correction, Color Space Conversion etc to
- * pre-process the Bayer RGB data, before writing it to SDRAM.
- *
- * TODO: 1) Raw bayer parameter settings and bayer capture
- * 2) Split module parameter structure to module specific ioctl structs
- * 3) add support for lense shading correction
- * 4) investigate if enum used for user space type definition
- * to be replaced by #defines or integer
- */
-#include <linux/platform_device.h>
-#include <linux/uaccess.h>
-#include <linux/videodev2.h>
-#include <linux/err.h>
-#include <linux/module.h>
-
-#include <media/davinci/dm355_ccdc.h>
-#include <media/davinci/vpss.h>
-
-#include "dm355_ccdc_regs.h"
-#include "ccdc_hw_device.h"
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("CCDC Driver for DM355");
-MODULE_AUTHOR("Texas Instruments");
-
-static struct ccdc_oper_config {
- struct device *dev;
- /* CCDC interface type */
- enum vpfe_hw_if_type if_type;
- /* Raw Bayer configuration */
- struct ccdc_params_raw bayer;
- /* YCbCr configuration */
- struct ccdc_params_ycbcr ycbcr;
- /* ccdc base address */
- void __iomem *base_addr;
-} ccdc_cfg = {
- /* Raw configurations */
- .bayer = {
- .pix_fmt = CCDC_PIXFMT_RAW,
- .frm_fmt = CCDC_FRMFMT_PROGRESSIVE,
- .win = CCDC_WIN_VGA,
- .fid_pol = VPFE_PINPOL_POSITIVE,
- .vd_pol = VPFE_PINPOL_POSITIVE,
- .hd_pol = VPFE_PINPOL_POSITIVE,
- .gain = {
- .r_ye = 256,
- .gb_g = 256,
- .gr_cy = 256,
- .b_mg = 256
- },
- .config_params = {
- .datasft = 2,
- .mfilt1 = CCDC_NO_MEDIAN_FILTER1,
- .mfilt2 = CCDC_NO_MEDIAN_FILTER2,
- .alaw = {
- .gamma_wd = 2,
- },
- .blk_clamp = {
- .sample_pixel = 1,
- .dc_sub = 25
- },
- .col_pat_field0 = {
- .olop = CCDC_GREEN_BLUE,
- .olep = CCDC_BLUE,
- .elop = CCDC_RED,
- .elep = CCDC_GREEN_RED
- },
- .col_pat_field1 = {
- .olop = CCDC_GREEN_BLUE,
- .olep = CCDC_BLUE,
- .elop = CCDC_RED,
- .elep = CCDC_GREEN_RED
- },
- },
- },
- /* YCbCr configuration */
- .ycbcr = {
- .win = CCDC_WIN_PAL,
- .pix_fmt = CCDC_PIXFMT_YCBCR_8BIT,
- .frm_fmt = CCDC_FRMFMT_INTERLACED,
- .fid_pol = VPFE_PINPOL_POSITIVE,
- .vd_pol = VPFE_PINPOL_POSITIVE,
- .hd_pol = VPFE_PINPOL_POSITIVE,
- .bt656_enable = 1,
- .pix_order = CCDC_PIXORDER_CBYCRY,
- .buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED
- },
-};
-
-
-/* Raw Bayer formats */
-static u32 ccdc_raw_bayer_pix_formats[] =
- {V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16};
-
-/* Raw YUV formats */
-static u32 ccdc_raw_yuv_pix_formats[] =
- {V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV};
-
-/* register access routines */
-static inline u32 regr(u32 offset)
-{
- return __raw_readl(ccdc_cfg.base_addr + offset);
-}
-
-static inline void regw(u32 val, u32 offset)
-{
- __raw_writel(val, ccdc_cfg.base_addr + offset);
-}
-
-static void ccdc_enable(int en)
-{
- unsigned int temp;
- temp = regr(SYNCEN);
- temp &= (~CCDC_SYNCEN_VDHDEN_MASK);
- temp |= (en & CCDC_SYNCEN_VDHDEN_MASK);
- regw(temp, SYNCEN);
-}
-
-static void ccdc_enable_output_to_sdram(int en)
-{
- unsigned int temp;
- temp = regr(SYNCEN);
- temp &= (~(CCDC_SYNCEN_WEN_MASK));
- temp |= ((en << CCDC_SYNCEN_WEN_SHIFT) & CCDC_SYNCEN_WEN_MASK);
- regw(temp, SYNCEN);
-}
-
-static void ccdc_config_gain_offset(void)
-{
- /* configure gain */
- regw(ccdc_cfg.bayer.gain.r_ye, RYEGAIN);
- regw(ccdc_cfg.bayer.gain.gr_cy, GRCYGAIN);
- regw(ccdc_cfg.bayer.gain.gb_g, GBGGAIN);
- regw(ccdc_cfg.bayer.gain.b_mg, BMGGAIN);
- /* configure offset */
- regw(ccdc_cfg.bayer.ccdc_offset, OFFSET);
-}
-
-/*
- * ccdc_restore_defaults()
- * This function restore power on defaults in the ccdc registers
- */
-static int ccdc_restore_defaults(void)
-{
- int i;
-
- dev_dbg(ccdc_cfg.dev, "\nstarting ccdc_restore_defaults...");
- /* set all registers to zero */
- for (i = 0; i <= CCDC_REG_LAST; i += 4)
- regw(0, i);
-
- /* now override the values with power on defaults in registers */
- regw(MODESET_DEFAULT, MODESET);
- /* no culling support */
- regw(CULH_DEFAULT, CULH);
- regw(CULV_DEFAULT, CULV);
- /* Set default Gain and Offset */
- ccdc_cfg.bayer.gain.r_ye = GAIN_DEFAULT;
- ccdc_cfg.bayer.gain.gb_g = GAIN_DEFAULT;
- ccdc_cfg.bayer.gain.gr_cy = GAIN_DEFAULT;
- ccdc_cfg.bayer.gain.b_mg = GAIN_DEFAULT;
- ccdc_config_gain_offset();
- regw(OUTCLIP_DEFAULT, OUTCLIP);
- regw(LSCCFG2_DEFAULT, LSCCFG2);
- /* select ccdc input */
- if (vpss_select_ccdc_source(VPSS_CCDCIN)) {
- dev_dbg(ccdc_cfg.dev, "\ncouldn't select ccdc input source");
- return -EFAULT;
- }
- /* select ccdc clock */
- if (vpss_enable_clock(VPSS_CCDC_CLOCK, 1) < 0) {
- dev_dbg(ccdc_cfg.dev, "\ncouldn't enable ccdc clock");
- return -EFAULT;
- }
- dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_restore_defaults...");
- return 0;
-}
-
-static int ccdc_open(struct device *device)
-{
- return ccdc_restore_defaults();
-}
-
-static int ccdc_close(struct device *device)
-{
- /* disable clock */
- vpss_enable_clock(VPSS_CCDC_CLOCK, 0);
- /* do nothing for now */
- return 0;
-}
-/*
- * ccdc_setwin()
- * This function will configure the window size to
- * be capture in CCDC reg.
- */
-static void ccdc_setwin(struct v4l2_rect *image_win,
- enum ccdc_frmfmt frm_fmt, int ppc)
-{
- int horz_start, horz_nr_pixels;
- int vert_start, vert_nr_lines;
- int mid_img = 0;
-
- dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_setwin...");
-
- /*
- * ppc - per pixel count. indicates how many pixels per cell
- * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
- * raw capture this is 1
- */
- horz_start = image_win->left << (ppc - 1);
- horz_nr_pixels = ((image_win->width) << (ppc - 1)) - 1;
-
- /* Writing the horizontal info into the registers */
- regw(horz_start, SPH);
- regw(horz_nr_pixels, NPH);
- vert_start = image_win->top;
-
- if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
- vert_nr_lines = (image_win->height >> 1) - 1;
- vert_start >>= 1;
- /* Since first line doesn't have any data */
- vert_start += 1;
- /* configure VDINT0 and VDINT1 */
- regw(vert_start, VDINT0);
- } else {
- /* Since first line doesn't have any data */
- vert_start += 1;
- vert_nr_lines = image_win->height - 1;
- /* configure VDINT0 and VDINT1 */
- mid_img = vert_start + (image_win->height / 2);
- regw(vert_start, VDINT0);
- regw(mid_img, VDINT1);
- }
- regw(vert_start & CCDC_START_VER_ONE_MASK, SLV0);
- regw(vert_start & CCDC_START_VER_TWO_MASK, SLV1);
- regw(vert_nr_lines & CCDC_NUM_LINES_VER, NLV);
- dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_setwin...");
-}
-
-/* This function will configure CCDC for YCbCr video capture */
-static void ccdc_config_ycbcr(void)
-{
- struct ccdc_params_ycbcr *params = &ccdc_cfg.ycbcr;
- u32 temp;
-
- /* first set the CCDC power on defaults values in all registers */
- dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_ycbcr...");
- ccdc_restore_defaults();
-
- /* configure pixel format & video frame format */
- temp = (((params->pix_fmt & CCDC_INPUT_MODE_MASK) <<
- CCDC_INPUT_MODE_SHIFT) |
- ((params->frm_fmt & CCDC_FRM_FMT_MASK) <<
- CCDC_FRM_FMT_SHIFT));
-
- /* setup BT.656 sync mode */
- if (params->bt656_enable) {
- regw(CCDC_REC656IF_BT656_EN, REC656IF);
- /*
- * configure the FID, VD, HD pin polarity fld,hd pol positive,
- * vd negative, 8-bit pack mode
- */
- temp |= CCDC_VD_POL_NEGATIVE;
- } else { /* y/c external sync mode */
- temp |= (((params->fid_pol & CCDC_FID_POL_MASK) <<
- CCDC_FID_POL_SHIFT) |
- ((params->hd_pol & CCDC_HD_POL_MASK) <<
- CCDC_HD_POL_SHIFT) |
- ((params->vd_pol & CCDC_VD_POL_MASK) <<
- CCDC_VD_POL_SHIFT));
- }
-
- /* pack the data to 8-bit */
- temp |= CCDC_DATA_PACK_ENABLE;
-
- regw(temp, MODESET);
-
- /* configure video window */
- ccdc_setwin(&params->win, params->frm_fmt, 2);
-
- /* configure the order of y cb cr in SD-RAM */
- temp = (params->pix_order << CCDC_Y8POS_SHIFT);
- temp |= CCDC_LATCH_ON_VSYNC_DISABLE | CCDC_CCDCFG_FIDMD_NO_LATCH_VSYNC;
- regw(temp, CCDCFG);
-
- /*
- * configure the horizontal line offset. This is done by rounding up
- * width to a multiple of 16 pixels and multiply by two to account for
- * y:cb:cr 4:2:2 data
- */
- regw(((params->win.width * 2 + 31) >> 5), HSIZE);
-
- /* configure the memory line offset */
- if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED) {
- /* two fields are interleaved in memory */
- regw(CCDC_SDOFST_FIELD_INTERLEAVED, SDOFST);
- }
-
- dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_config_ycbcr...\n");
-}
-
-/*
- * ccdc_config_black_clamp()
- * configure parameters for Optical Black Clamp
- */
-static void ccdc_config_black_clamp(struct ccdc_black_clamp *bclamp)
-{
- u32 val;
-
- if (!bclamp->b_clamp_enable) {
- /* configure DCSub */
- regw(bclamp->dc_sub & CCDC_BLK_DC_SUB_MASK, DCSUB);
- regw(0x0000, CLAMP);
- return;
- }
- /* Enable the Black clamping, set sample lines and pixels */
- val = (bclamp->start_pixel & CCDC_BLK_ST_PXL_MASK) |
- ((bclamp->sample_pixel & CCDC_BLK_SAMPLE_LN_MASK) <<
- CCDC_BLK_SAMPLE_LN_SHIFT) | CCDC_BLK_CLAMP_ENABLE;
- regw(val, CLAMP);
-
- /* If Black clamping is enable then make dcsub 0 */
- val = (bclamp->sample_ln & CCDC_NUM_LINE_CALC_MASK)
- << CCDC_NUM_LINE_CALC_SHIFT;
- regw(val, DCSUB);
-}
-
-/*
- * ccdc_config_black_compense()
- * configure parameters for Black Compensation
- */
-static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp)
-{
- u32 val;
-
- val = (bcomp->b & CCDC_BLK_COMP_MASK) |
- ((bcomp->gb & CCDC_BLK_COMP_MASK) <<
- CCDC_BLK_COMP_GB_COMP_SHIFT);
- regw(val, BLKCMP1);
-
- val = ((bcomp->gr & CCDC_BLK_COMP_MASK) <<
- CCDC_BLK_COMP_GR_COMP_SHIFT) |
- ((bcomp->r & CCDC_BLK_COMP_MASK) <<
- CCDC_BLK_COMP_R_COMP_SHIFT);
- regw(val, BLKCMP0);
-}
-
-/*
- * ccdc_write_dfc_entry()
- * write an entry in the dfc table.
- */
-static int ccdc_write_dfc_entry(int index, struct ccdc_vertical_dft *dfc)
-{
-/* TODO This is to be re-visited and adjusted */
-#define DFC_WRITE_WAIT_COUNT 1000
- u32 val, count = DFC_WRITE_WAIT_COUNT;
-
- regw(dfc->dft_corr_vert[index], DFCMEM0);
- regw(dfc->dft_corr_horz[index], DFCMEM1);
- regw(dfc->dft_corr_sub1[index], DFCMEM2);
- regw(dfc->dft_corr_sub2[index], DFCMEM3);
- regw(dfc->dft_corr_sub3[index], DFCMEM4);
- /* set WR bit to write */
- val = regr(DFCMEMCTL) | CCDC_DFCMEMCTL_DFCMWR_MASK;
- regw(val, DFCMEMCTL);
-
- /*
- * Assume, it is very short. If we get an error, we need to
- * adjust this value
- */
- while (regr(DFCMEMCTL) & CCDC_DFCMEMCTL_DFCMWR_MASK)
- count--;
- /*
- * TODO We expect the count to be non-zero to be successful. Adjust
- * the count if write requires more time
- */
-
- if (count) {
- dev_err(ccdc_cfg.dev, "defect table write timeout !!!\n");
- return -1;
- }
- return 0;
-}
-
-/*
- * ccdc_config_vdfc()
- * configure parameters for Vertical Defect Correction
- */
-static int ccdc_config_vdfc(struct ccdc_vertical_dft *dfc)
-{
- u32 val;
- int i;
-
- /* Configure General Defect Correction. The table used is from IPIPE */
- val = dfc->gen_dft_en & CCDC_DFCCTL_GDFCEN_MASK;
-
- /* Configure Vertical Defect Correction if needed */
- if (!dfc->ver_dft_en) {
- /* Enable only General Defect Correction */
- regw(val, DFCCTL);
- return 0;
- }
-
- if (dfc->table_size > CCDC_DFT_TABLE_SIZE)
- return -EINVAL;
-
- val |= CCDC_DFCCTL_VDFC_DISABLE;
- val |= (dfc->dft_corr_ctl.vdfcsl & CCDC_DFCCTL_VDFCSL_MASK) <<
- CCDC_DFCCTL_VDFCSL_SHIFT;
- val |= (dfc->dft_corr_ctl.vdfcuda & CCDC_DFCCTL_VDFCUDA_MASK) <<
- CCDC_DFCCTL_VDFCUDA_SHIFT;
- val |= (dfc->dft_corr_ctl.vdflsft & CCDC_DFCCTL_VDFLSFT_MASK) <<
- CCDC_DFCCTL_VDFLSFT_SHIFT;
- regw(val , DFCCTL);
-
- /* clear address ptr to offset 0 */
- val = CCDC_DFCMEMCTL_DFCMARST_MASK << CCDC_DFCMEMCTL_DFCMARST_SHIFT;
-
- /* write defect table entries */
- for (i = 0; i < dfc->table_size; i++) {
- /* increment address for non zero index */
- if (i != 0)
- val = CCDC_DFCMEMCTL_INC_ADDR;
- regw(val, DFCMEMCTL);
- if (ccdc_write_dfc_entry(i, dfc) < 0)
- return -EFAULT;
- }
-
- /* update saturation level and enable dfc */
- regw(dfc->saturation_ctl & CCDC_VDC_DFCVSAT_MASK, DFCVSAT);
- val = regr(DFCCTL) | (CCDC_DFCCTL_VDFCEN_MASK <<
- CCDC_DFCCTL_VDFCEN_SHIFT);
- regw(val, DFCCTL);
- return 0;
-}
-
-/*
- * ccdc_config_csc()
- * configure parameters for color space conversion
- * Each register CSCM0-7 has two values in S8Q5 format.
- */
-static void ccdc_config_csc(struct ccdc_csc *csc)
-{
- u32 val1 = 0, val2;
- int i;
-
- if (!csc->enable)
- return;
-
- /* Enable the CSC sub-module */
- regw(CCDC_CSC_ENABLE, CSCCTL);
-
- /* Converting the co-eff as per the format of the register */
- for (i = 0; i < CCDC_CSC_COEFF_TABLE_SIZE; i++) {
- if ((i % 2) == 0) {
- /* CSCM - LSB */
- val1 = (csc->coeff[i].integer &
- CCDC_CSC_COEF_INTEG_MASK)
- << CCDC_CSC_COEF_INTEG_SHIFT;
- /*
- * convert decimal part to binary. Use 2 decimal
- * precision, user values range from .00 - 0.99
- */
- val1 |= (((csc->coeff[i].decimal &
- CCDC_CSC_COEF_DECIMAL_MASK) *
- CCDC_CSC_DEC_MAX) / 100);
- } else {
-
- /* CSCM - MSB */
- val2 = (csc->coeff[i].integer &
- CCDC_CSC_COEF_INTEG_MASK)
- << CCDC_CSC_COEF_INTEG_SHIFT;
- val2 |= (((csc->coeff[i].decimal &
- CCDC_CSC_COEF_DECIMAL_MASK) *
- CCDC_CSC_DEC_MAX) / 100);
- val2 <<= CCDC_CSCM_MSB_SHIFT;
- val2 |= val1;
- regw(val2, (CSCM0 + ((i - 1) << 1)));
- }
- }
-}
-
-/*
- * ccdc_config_color_patterns()
- * configure parameters for color patterns
- */
-static void ccdc_config_color_patterns(struct ccdc_col_pat *pat0,
- struct ccdc_col_pat *pat1)
-{
- u32 val;
-
- val = (pat0->olop | (pat0->olep << 2) | (pat0->elop << 4) |
- (pat0->elep << 6) | (pat1->olop << 8) | (pat1->olep << 10) |
- (pat1->elop << 12) | (pat1->elep << 14));
- regw(val, COLPTN);
-}
-
-/* This function will configure CCDC for Raw mode image capture */
-static int ccdc_config_raw(void)
-{
- struct ccdc_params_raw *params = &ccdc_cfg.bayer;
- struct ccdc_config_params_raw *config_params =
- &ccdc_cfg.bayer.config_params;
- unsigned int val;
-
- dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_raw...");
-
- /* restore power on defaults to register */
- ccdc_restore_defaults();
-
- /* CCDCFG register:
- * set CCD Not to swap input since input is RAW data
- * set FID detection function to Latch at V-Sync
- * set WENLOG - ccdc valid area to AND
- * set TRGSEL to WENBIT
- * set EXTRG to DISABLE
- * disable latching function on VSYNC - shadowed registers
- */
- regw(CCDC_YCINSWP_RAW | CCDC_CCDCFG_FIDMD_LATCH_VSYNC |
- CCDC_CCDCFG_WENLOG_AND | CCDC_CCDCFG_TRGSEL_WEN |
- CCDC_CCDCFG_EXTRG_DISABLE | CCDC_LATCH_ON_VSYNC_DISABLE, CCDCFG);
-
- /*
- * Set VDHD direction to input, input type to raw input
- * normal data polarity, do not use external WEN
- */
- val = (CCDC_VDHDOUT_INPUT | CCDC_RAW_IP_MODE | CCDC_DATAPOL_NORMAL |
- CCDC_EXWEN_DISABLE);
-
- /*
- * Configure the vertical sync polarity (MODESET.VDPOL), horizontal
- * sync polarity (MODESET.HDPOL), field id polarity (MODESET.FLDPOL),
- * frame format(progressive or interlace), & pixel format (Input mode)
- */
- val |= (((params->vd_pol & CCDC_VD_POL_MASK) << CCDC_VD_POL_SHIFT) |
- ((params->hd_pol & CCDC_HD_POL_MASK) << CCDC_HD_POL_SHIFT) |
- ((params->fid_pol & CCDC_FID_POL_MASK) << CCDC_FID_POL_SHIFT) |
- ((params->frm_fmt & CCDC_FRM_FMT_MASK) << CCDC_FRM_FMT_SHIFT) |
- ((params->pix_fmt & CCDC_PIX_FMT_MASK) << CCDC_PIX_FMT_SHIFT));
-
- /* set pack for alaw compression */
- if ((config_params->data_sz == CCDC_DATA_8BITS) ||
- config_params->alaw.enable)
- val |= CCDC_DATA_PACK_ENABLE;
-
- /* Configure for LPF */
- if (config_params->lpf_enable)
- val |= (config_params->lpf_enable & CCDC_LPF_MASK) <<
- CCDC_LPF_SHIFT;
-
- /* Configure the data shift */
- val |= (config_params->datasft & CCDC_DATASFT_MASK) <<
- CCDC_DATASFT_SHIFT;
- regw(val , MODESET);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to MODESET...\n", val);
-
- /* Configure the Median Filter threshold */
- regw((config_params->med_filt_thres) & CCDC_MED_FILT_THRESH, MEDFILT);
-
- /* Configure GAMMAWD register. defaur 11-2, and Mosaic cfa pattern */
- val = CCDC_GAMMA_BITS_11_2 << CCDC_GAMMAWD_INPUT_SHIFT |
- CCDC_CFA_MOSAIC;
-
- /* Enable and configure aLaw register if needed */
- if (config_params->alaw.enable) {
- val |= (CCDC_ALAW_ENABLE |
- ((config_params->alaw.gamma_wd &
- CCDC_ALAW_GAMMA_WD_MASK) <<
- CCDC_GAMMAWD_INPUT_SHIFT));
- }
-
- /* Configure Median filter1 & filter2 */
- val |= ((config_params->mfilt1 << CCDC_MFILT1_SHIFT) |
- (config_params->mfilt2 << CCDC_MFILT2_SHIFT));
-
- regw(val, GAMMAWD);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to GAMMAWD...\n", val);
-
- /* configure video window */
- ccdc_setwin(&params->win, params->frm_fmt, 1);
-
- /* Optical Clamp Averaging */
- ccdc_config_black_clamp(&config_params->blk_clamp);
-
- /* Black level compensation */
- ccdc_config_black_compense(&config_params->blk_comp);
-
- /* Vertical Defect Correction if needed */
- if (ccdc_config_vdfc(&config_params->vertical_dft) < 0)
- return -EFAULT;
-
- /* color space conversion */
- ccdc_config_csc(&config_params->csc);
-
- /* color pattern */
- ccdc_config_color_patterns(&config_params->col_pat_field0,
- &config_params->col_pat_field1);
-
- /* Configure the Gain & offset control */
- ccdc_config_gain_offset();
-
- dev_dbg(ccdc_cfg.dev, "\nWriting %x to COLPTN...\n", val);
-
- /* Configure DATAOFST register */
- val = (config_params->data_offset.horz_offset & CCDC_DATAOFST_MASK) <<
- CCDC_DATAOFST_H_SHIFT;
- val |= (config_params->data_offset.vert_offset & CCDC_DATAOFST_MASK) <<
- CCDC_DATAOFST_V_SHIFT;
- regw(val, DATAOFST);
-
- /* configuring HSIZE register */
- val = (params->horz_flip_enable & CCDC_HSIZE_FLIP_MASK) <<
- CCDC_HSIZE_FLIP_SHIFT;
-
- /* If pack 8 is enable then 1 pixel will take 1 byte */
- if ((config_params->data_sz == CCDC_DATA_8BITS) ||
- config_params->alaw.enable) {
- val |= (((params->win.width) + 31) >> 5) &
- CCDC_HSIZE_VAL_MASK;
-
- /* adjust to multiple of 32 */
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to HSIZE...\n",
- (((params->win.width) + 31) >> 5) &
- CCDC_HSIZE_VAL_MASK);
- } else {
- /* else one pixel will take 2 byte */
- val |= (((params->win.width * 2) + 31) >> 5) &
- CCDC_HSIZE_VAL_MASK;
-
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to HSIZE...\n",
- (((params->win.width * 2) + 31) >> 5) &
- CCDC_HSIZE_VAL_MASK);
- }
- regw(val, HSIZE);
-
- /* Configure SDOFST register */
- if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
- if (params->image_invert_enable) {
- /* For interlace inverse mode */
- regw(CCDC_SDOFST_INTERLACE_INVERSE, SDOFST);
- dev_dbg(ccdc_cfg.dev, "\nWriting %x to SDOFST...\n",
- CCDC_SDOFST_INTERLACE_INVERSE);
- } else {
- /* For interlace non inverse mode */
- regw(CCDC_SDOFST_INTERLACE_NORMAL, SDOFST);
- dev_dbg(ccdc_cfg.dev, "\nWriting %x to SDOFST...\n",
- CCDC_SDOFST_INTERLACE_NORMAL);
- }
- } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
- if (params->image_invert_enable) {
- /* For progessive inverse mode */
- regw(CCDC_SDOFST_PROGRESSIVE_INVERSE, SDOFST);
- dev_dbg(ccdc_cfg.dev, "\nWriting %x to SDOFST...\n",
- CCDC_SDOFST_PROGRESSIVE_INVERSE);
- } else {
- /* For progessive non inverse mode */
- regw(CCDC_SDOFST_PROGRESSIVE_NORMAL, SDOFST);
- dev_dbg(ccdc_cfg.dev, "\nWriting %x to SDOFST...\n",
- CCDC_SDOFST_PROGRESSIVE_NORMAL);
- }
- }
- dev_dbg(ccdc_cfg.dev, "\nend of ccdc_config_raw...");
- return 0;
-}
-
-static int ccdc_configure(void)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- return ccdc_config_raw();
- else
- ccdc_config_ycbcr();
- return 0;
-}
-
-static int ccdc_set_buftype(enum ccdc_buftype buf_type)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- ccdc_cfg.bayer.buf_type = buf_type;
- else
- ccdc_cfg.ycbcr.buf_type = buf_type;
- return 0;
-}
-static enum ccdc_buftype ccdc_get_buftype(void)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- return ccdc_cfg.bayer.buf_type;
- return ccdc_cfg.ycbcr.buf_type;
-}
-
-static int ccdc_enum_pix(u32 *pix, int i)
-{
- int ret = -EINVAL;
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
- if (i < ARRAY_SIZE(ccdc_raw_bayer_pix_formats)) {
- *pix = ccdc_raw_bayer_pix_formats[i];
- ret = 0;
- }
- } else {
- if (i < ARRAY_SIZE(ccdc_raw_yuv_pix_formats)) {
- *pix = ccdc_raw_yuv_pix_formats[i];
- ret = 0;
- }
- }
- return ret;
-}
-
-static int ccdc_set_pixel_format(u32 pixfmt)
-{
- struct ccdc_a_law *alaw = &ccdc_cfg.bayer.config_params.alaw;
-
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
- ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
- if (pixfmt == V4L2_PIX_FMT_SBGGR8)
- alaw->enable = 1;
- else if (pixfmt != V4L2_PIX_FMT_SBGGR16)
- return -EINVAL;
- } else {
- if (pixfmt == V4L2_PIX_FMT_YUYV)
- ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
- else if (pixfmt == V4L2_PIX_FMT_UYVY)
- ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
- else
- return -EINVAL;
- }
- return 0;
-}
-static u32 ccdc_get_pixel_format(void)
-{
- struct ccdc_a_law *alaw = &ccdc_cfg.bayer.config_params.alaw;
- u32 pixfmt;
-
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- if (alaw->enable)
- pixfmt = V4L2_PIX_FMT_SBGGR8;
- else
- pixfmt = V4L2_PIX_FMT_SBGGR16;
- else {
- if (ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
- pixfmt = V4L2_PIX_FMT_YUYV;
- else
- pixfmt = V4L2_PIX_FMT_UYVY;
- }
- return pixfmt;
-}
-static int ccdc_set_image_window(struct v4l2_rect *win)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- ccdc_cfg.bayer.win = *win;
- else
- ccdc_cfg.ycbcr.win = *win;
- return 0;
-}
-
-static void ccdc_get_image_window(struct v4l2_rect *win)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- *win = ccdc_cfg.bayer.win;
- else
- *win = ccdc_cfg.ycbcr.win;
-}
-
-static unsigned int ccdc_get_line_length(void)
-{
- struct ccdc_config_params_raw *config_params =
- &ccdc_cfg.bayer.config_params;
- unsigned int len;
-
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
- if ((config_params->alaw.enable) ||
- (config_params->data_sz == CCDC_DATA_8BITS))
- len = ccdc_cfg.bayer.win.width;
- else
- len = ccdc_cfg.bayer.win.width * 2;
- } else
- len = ccdc_cfg.ycbcr.win.width * 2;
- return ALIGN(len, 32);
-}
-
-static int ccdc_set_frame_format(enum ccdc_frmfmt frm_fmt)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- ccdc_cfg.bayer.frm_fmt = frm_fmt;
- else
- ccdc_cfg.ycbcr.frm_fmt = frm_fmt;
- return 0;
-}
-
-static enum ccdc_frmfmt ccdc_get_frame_format(void)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- return ccdc_cfg.bayer.frm_fmt;
- else
- return ccdc_cfg.ycbcr.frm_fmt;
-}
-
-static int ccdc_getfid(void)
-{
- return (regr(MODESET) >> 15) & 1;
-}
-
-/* misc operations */
-static inline void ccdc_setfbaddr(unsigned long addr)
-{
- regw((addr >> 21) & 0x007f, STADRH);
- regw((addr >> 5) & 0x0ffff, STADRL);
-}
-
-static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params)
-{
- ccdc_cfg.if_type = params->if_type;
-
- switch (params->if_type) {
- case VPFE_BT656:
- case VPFE_YCBCR_SYNC_16:
- case VPFE_YCBCR_SYNC_8:
- ccdc_cfg.ycbcr.vd_pol = params->vdpol;
- ccdc_cfg.ycbcr.hd_pol = params->hdpol;
- break;
- default:
- /* TODO add support for raw bayer here */
- return -EINVAL;
- }
- return 0;
-}
-
-static const struct ccdc_hw_device ccdc_hw_dev = {
- .name = "DM355 CCDC",
- .owner = THIS_MODULE,
- .hw_ops = {
- .open = ccdc_open,
- .close = ccdc_close,
- .enable = ccdc_enable,
- .enable_out_to_sdram = ccdc_enable_output_to_sdram,
- .set_hw_if_params = ccdc_set_hw_if_params,
- .configure = ccdc_configure,
- .set_buftype = ccdc_set_buftype,
- .get_buftype = ccdc_get_buftype,
- .enum_pix = ccdc_enum_pix,
- .set_pixel_format = ccdc_set_pixel_format,
- .get_pixel_format = ccdc_get_pixel_format,
- .set_frame_format = ccdc_set_frame_format,
- .get_frame_format = ccdc_get_frame_format,
- .set_image_window = ccdc_set_image_window,
- .get_image_window = ccdc_get_image_window,
- .get_line_length = ccdc_get_line_length,
- .setfbaddr = ccdc_setfbaddr,
- .getfid = ccdc_getfid,
- },
-};
-
-static int dm355_ccdc_probe(struct platform_device *pdev)
-{
- void (*setup_pinmux)(void);
- struct resource *res;
- int status = 0;
-
- /*
- * first try to register with vpfe. If not correct platform, then we
- * don't have to iomap
- */
- status = vpfe_register_ccdc_device(&ccdc_hw_dev);
- if (status < 0)
- return status;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- status = -ENODEV;
- goto fail_nores;
- }
-
- res = request_mem_region(res->start, resource_size(res), res->name);
- if (!res) {
- status = -EBUSY;
- goto fail_nores;
- }
-
- ccdc_cfg.base_addr = ioremap(res->start, resource_size(res));
- if (!ccdc_cfg.base_addr) {
- status = -ENOMEM;
- goto fail_nomem;
- }
-
- /* Platform data holds setup_pinmux function ptr */
- if (NULL == pdev->dev.platform_data) {
- status = -ENODEV;
- goto fail_nomap;
- }
- setup_pinmux = pdev->dev.platform_data;
- /*
- * setup Mux configuration for ccdc which may be different for
- * different SoCs using this CCDC
- */
- setup_pinmux();
- ccdc_cfg.dev = &pdev->dev;
- printk(KERN_NOTICE "%s is registered with vpfe.\n", ccdc_hw_dev.name);
- return 0;
-fail_nomap:
- iounmap(ccdc_cfg.base_addr);
-fail_nomem:
- release_mem_region(res->start, resource_size(res));
-fail_nores:
- vpfe_unregister_ccdc_device(&ccdc_hw_dev);
- return status;
-}
-
-static int dm355_ccdc_remove(struct platform_device *pdev)
-{
- struct resource *res;
-
- iounmap(ccdc_cfg.base_addr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
- vpfe_unregister_ccdc_device(&ccdc_hw_dev);
- return 0;
-}
-
-static struct platform_driver dm355_ccdc_driver = {
- .driver = {
- .name = "dm355_ccdc",
- },
- .remove = dm355_ccdc_remove,
- .probe = dm355_ccdc_probe,
-};
-
-module_platform_driver(dm355_ccdc_driver);
diff --git a/drivers/media/platform/ti/davinci/dm355_ccdc_regs.h b/drivers/media/platform/ti/davinci/dm355_ccdc_regs.h
deleted file mode 100644
index eb381f075245..000000000000
--- a/drivers/media/platform/ti/davinci/dm355_ccdc_regs.h
+++ /dev/null
@@ -1,297 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2005-2009 Texas Instruments Inc
- */
-#ifndef _DM355_CCDC_REGS_H
-#define _DM355_CCDC_REGS_H
-
-/**************************************************************************\
-* Register OFFSET Definitions
-\**************************************************************************/
-#define SYNCEN 0x00
-#define MODESET 0x04
-#define HDWIDTH 0x08
-#define VDWIDTH 0x0c
-#define PPLN 0x10
-#define LPFR 0x14
-#define SPH 0x18
-#define NPH 0x1c
-#define SLV0 0x20
-#define SLV1 0x24
-#define NLV 0x28
-#define CULH 0x2c
-#define CULV 0x30
-#define HSIZE 0x34
-#define SDOFST 0x38
-#define STADRH 0x3c
-#define STADRL 0x40
-#define CLAMP 0x44
-#define DCSUB 0x48
-#define COLPTN 0x4c
-#define BLKCMP0 0x50
-#define BLKCMP1 0x54
-#define MEDFILT 0x58
-#define RYEGAIN 0x5c
-#define GRCYGAIN 0x60
-#define GBGGAIN 0x64
-#define BMGGAIN 0x68
-#define OFFSET 0x6c
-#define OUTCLIP 0x70
-#define VDINT0 0x74
-#define VDINT1 0x78
-#define RSV0 0x7c
-#define GAMMAWD 0x80
-#define REC656IF 0x84
-#define CCDCFG 0x88
-#define FMTCFG 0x8c
-#define FMTPLEN 0x90
-#define FMTSPH 0x94
-#define FMTLNH 0x98
-#define FMTSLV 0x9c
-#define FMTLNV 0xa0
-#define FMTRLEN 0xa4
-#define FMTHCNT 0xa8
-#define FMT_ADDR_PTR_B 0xac
-#define FMT_ADDR_PTR(i) (FMT_ADDR_PTR_B + (i * 4))
-#define FMTPGM_VF0 0xcc
-#define FMTPGM_VF1 0xd0
-#define FMTPGM_AP0 0xd4
-#define FMTPGM_AP1 0xd8
-#define FMTPGM_AP2 0xdc
-#define FMTPGM_AP3 0xe0
-#define FMTPGM_AP4 0xe4
-#define FMTPGM_AP5 0xe8
-#define FMTPGM_AP6 0xec
-#define FMTPGM_AP7 0xf0
-#define LSCCFG1 0xf4
-#define LSCCFG2 0xf8
-#define LSCH0 0xfc
-#define LSCV0 0x100
-#define LSCKH 0x104
-#define LSCKV 0x108
-#define LSCMEMCTL 0x10c
-#define LSCMEMD 0x110
-#define LSCMEMQ 0x114
-#define DFCCTL 0x118
-#define DFCVSAT 0x11c
-#define DFCMEMCTL 0x120
-#define DFCMEM0 0x124
-#define DFCMEM1 0x128
-#define DFCMEM2 0x12c
-#define DFCMEM3 0x130
-#define DFCMEM4 0x134
-#define CSCCTL 0x138
-#define CSCM0 0x13c
-#define CSCM1 0x140
-#define CSCM2 0x144
-#define CSCM3 0x148
-#define CSCM4 0x14c
-#define CSCM5 0x150
-#define CSCM6 0x154
-#define CSCM7 0x158
-#define DATAOFST 0x15c
-#define CCDC_REG_LAST DATAOFST
-/**************************************************************
-* Define for various register bit mask and shifts for CCDC
-*
-**************************************************************/
-#define CCDC_RAW_IP_MODE 0
-#define CCDC_VDHDOUT_INPUT 0
-#define CCDC_YCINSWP_RAW (0 << 4)
-#define CCDC_EXWEN_DISABLE 0
-#define CCDC_DATAPOL_NORMAL 0
-#define CCDC_CCDCFG_FIDMD_LATCH_VSYNC 0
-#define CCDC_CCDCFG_FIDMD_NO_LATCH_VSYNC (1 << 6)
-#define CCDC_CCDCFG_WENLOG_AND 0
-#define CCDC_CCDCFG_TRGSEL_WEN 0
-#define CCDC_CCDCFG_EXTRG_DISABLE 0
-#define CCDC_CFA_MOSAIC 0
-#define CCDC_Y8POS_SHIFT 11
-
-#define CCDC_VDC_DFCVSAT_MASK 0x3fff
-#define CCDC_DATAOFST_MASK 0x0ff
-#define CCDC_DATAOFST_H_SHIFT 0
-#define CCDC_DATAOFST_V_SHIFT 8
-#define CCDC_GAMMAWD_CFA_MASK 1
-#define CCDC_GAMMAWD_CFA_SHIFT 5
-#define CCDC_GAMMAWD_INPUT_SHIFT 2
-#define CCDC_FID_POL_MASK 1
-#define CCDC_FID_POL_SHIFT 4
-#define CCDC_HD_POL_MASK 1
-#define CCDC_HD_POL_SHIFT 3
-#define CCDC_VD_POL_MASK 1
-#define CCDC_VD_POL_SHIFT 2
-#define CCDC_VD_POL_NEGATIVE (1 << 2)
-#define CCDC_FRM_FMT_MASK 1
-#define CCDC_FRM_FMT_SHIFT 7
-#define CCDC_DATA_SZ_MASK 7
-#define CCDC_DATA_SZ_SHIFT 8
-#define CCDC_VDHDOUT_MASK 1
-#define CCDC_VDHDOUT_SHIFT 0
-#define CCDC_EXWEN_MASK 1
-#define CCDC_EXWEN_SHIFT 5
-#define CCDC_INPUT_MODE_MASK 3
-#define CCDC_INPUT_MODE_SHIFT 12
-#define CCDC_PIX_FMT_MASK 3
-#define CCDC_PIX_FMT_SHIFT 12
-#define CCDC_DATAPOL_MASK 1
-#define CCDC_DATAPOL_SHIFT 6
-#define CCDC_WEN_ENABLE (1 << 1)
-#define CCDC_VDHDEN_ENABLE (1 << 16)
-#define CCDC_LPF_ENABLE (1 << 14)
-#define CCDC_ALAW_ENABLE 1
-#define CCDC_ALAW_GAMMA_WD_MASK 7
-#define CCDC_REC656IF_BT656_EN 3
-
-#define CCDC_FMTCFG_FMTMODE_MASK 3
-#define CCDC_FMTCFG_FMTMODE_SHIFT 1
-#define CCDC_FMTCFG_LNUM_MASK 3
-#define CCDC_FMTCFG_LNUM_SHIFT 4
-#define CCDC_FMTCFG_ADDRINC_MASK 7
-#define CCDC_FMTCFG_ADDRINC_SHIFT 8
-
-#define CCDC_CCDCFG_FIDMD_SHIFT 6
-#define CCDC_CCDCFG_WENLOG_SHIFT 8
-#define CCDC_CCDCFG_TRGSEL_SHIFT 9
-#define CCDC_CCDCFG_EXTRG_SHIFT 10
-#define CCDC_CCDCFG_MSBINVI_SHIFT 13
-
-#define CCDC_HSIZE_FLIP_SHIFT 12
-#define CCDC_HSIZE_FLIP_MASK 1
-#define CCDC_HSIZE_VAL_MASK 0xFFF
-#define CCDC_SDOFST_FIELD_INTERLEAVED 0x249
-#define CCDC_SDOFST_INTERLACE_INVERSE 0x4B6D
-#define CCDC_SDOFST_INTERLACE_NORMAL 0x0B6D
-#define CCDC_SDOFST_PROGRESSIVE_INVERSE 0x4000
-#define CCDC_SDOFST_PROGRESSIVE_NORMAL 0
-#define CCDC_START_PX_HOR_MASK 0x7FFF
-#define CCDC_NUM_PX_HOR_MASK 0x7FFF
-#define CCDC_START_VER_ONE_MASK 0x7FFF
-#define CCDC_START_VER_TWO_MASK 0x7FFF
-#define CCDC_NUM_LINES_VER 0x7FFF
-
-#define CCDC_BLK_CLAMP_ENABLE (1 << 15)
-#define CCDC_BLK_SGAIN_MASK 0x1F
-#define CCDC_BLK_ST_PXL_MASK 0x1FFF
-#define CCDC_BLK_SAMPLE_LN_MASK 3
-#define CCDC_BLK_SAMPLE_LN_SHIFT 13
-
-#define CCDC_NUM_LINE_CALC_MASK 3
-#define CCDC_NUM_LINE_CALC_SHIFT 14
-
-#define CCDC_BLK_DC_SUB_MASK 0x3FFF
-#define CCDC_BLK_COMP_MASK 0xFF
-#define CCDC_BLK_COMP_GB_COMP_SHIFT 8
-#define CCDC_BLK_COMP_GR_COMP_SHIFT 0
-#define CCDC_BLK_COMP_R_COMP_SHIFT 8
-#define CCDC_LATCH_ON_VSYNC_DISABLE (1 << 15)
-#define CCDC_LATCH_ON_VSYNC_ENABLE (0 << 15)
-#define CCDC_FPC_ENABLE (1 << 15)
-#define CCDC_FPC_FPC_NUM_MASK 0x7FFF
-#define CCDC_DATA_PACK_ENABLE (1 << 11)
-#define CCDC_FMT_HORZ_FMTLNH_MASK 0x1FFF
-#define CCDC_FMT_HORZ_FMTSPH_MASK 0x1FFF
-#define CCDC_FMT_HORZ_FMTSPH_SHIFT 16
-#define CCDC_FMT_VERT_FMTLNV_MASK 0x1FFF
-#define CCDC_FMT_VERT_FMTSLV_MASK 0x1FFF
-#define CCDC_FMT_VERT_FMTSLV_SHIFT 16
-#define CCDC_VP_OUT_VERT_NUM_MASK 0x3FFF
-#define CCDC_VP_OUT_VERT_NUM_SHIFT 17
-#define CCDC_VP_OUT_HORZ_NUM_MASK 0x1FFF
-#define CCDC_VP_OUT_HORZ_NUM_SHIFT 4
-#define CCDC_VP_OUT_HORZ_ST_MASK 0xF
-
-#define CCDC_CSC_COEF_INTEG_MASK 7
-#define CCDC_CSC_COEF_DECIMAL_MASK 0x1f
-#define CCDC_CSC_COEF_INTEG_SHIFT 5
-#define CCDC_CSCM_MSB_SHIFT 8
-#define CCDC_CSC_ENABLE 1
-#define CCDC_CSC_DEC_MAX 32
-
-#define CCDC_MFILT1_SHIFT 10
-#define CCDC_MFILT2_SHIFT 8
-#define CCDC_MED_FILT_THRESH 0x3FFF
-#define CCDC_LPF_MASK 1
-#define CCDC_LPF_SHIFT 14
-#define CCDC_OFFSET_MASK 0x3FF
-#define CCDC_DATASFT_MASK 7
-#define CCDC_DATASFT_SHIFT 8
-
-#define CCDC_DF_ENABLE 1
-
-#define CCDC_FMTPLEN_P0_MASK 0xF
-#define CCDC_FMTPLEN_P1_MASK 0xF
-#define CCDC_FMTPLEN_P2_MASK 7
-#define CCDC_FMTPLEN_P3_MASK 7
-#define CCDC_FMTPLEN_P0_SHIFT 0
-#define CCDC_FMTPLEN_P1_SHIFT 4
-#define CCDC_FMTPLEN_P2_SHIFT 8
-#define CCDC_FMTPLEN_P3_SHIFT 12
-
-#define CCDC_FMTSPH_MASK 0x1FFF
-#define CCDC_FMTLNH_MASK 0x1FFF
-#define CCDC_FMTSLV_MASK 0x1FFF
-#define CCDC_FMTLNV_MASK 0x7FFF
-#define CCDC_FMTRLEN_MASK 0x1FFF
-#define CCDC_FMTHCNT_MASK 0x1FFF
-
-#define CCDC_ADP_INIT_MASK 0x1FFF
-#define CCDC_ADP_LINE_SHIFT 13
-#define CCDC_ADP_LINE_MASK 3
-#define CCDC_FMTPGN_APTR_MASK 7
-
-#define CCDC_DFCCTL_GDFCEN_MASK 1
-#define CCDC_DFCCTL_VDFCEN_MASK 1
-#define CCDC_DFCCTL_VDFC_DISABLE (0 << 4)
-#define CCDC_DFCCTL_VDFCEN_SHIFT 4
-#define CCDC_DFCCTL_VDFCSL_MASK 3
-#define CCDC_DFCCTL_VDFCSL_SHIFT 5
-#define CCDC_DFCCTL_VDFCUDA_MASK 1
-#define CCDC_DFCCTL_VDFCUDA_SHIFT 7
-#define CCDC_DFCCTL_VDFLSFT_MASK 3
-#define CCDC_DFCCTL_VDFLSFT_SHIFT 8
-#define CCDC_DFCMEMCTL_DFCMARST_MASK 1
-#define CCDC_DFCMEMCTL_DFCMARST_SHIFT 2
-#define CCDC_DFCMEMCTL_DFCMWR_MASK 1
-#define CCDC_DFCMEMCTL_DFCMWR_SHIFT 0
-#define CCDC_DFCMEMCTL_INC_ADDR (0 << 2)
-
-#define CCDC_LSCCFG_GFTSF_MASK 7
-#define CCDC_LSCCFG_GFTSF_SHIFT 1
-#define CCDC_LSCCFG_GFTINV_MASK 0xf
-#define CCDC_LSCCFG_GFTINV_SHIFT 4
-#define CCDC_LSC_GFTABLE_SEL_MASK 3
-#define CCDC_LSC_GFTABLE_EPEL_SHIFT 8
-#define CCDC_LSC_GFTABLE_OPEL_SHIFT 10
-#define CCDC_LSC_GFTABLE_EPOL_SHIFT 12
-#define CCDC_LSC_GFTABLE_OPOL_SHIFT 14
-#define CCDC_LSC_GFMODE_MASK 3
-#define CCDC_LSC_GFMODE_SHIFT 4
-#define CCDC_LSC_DISABLE 0
-#define CCDC_LSC_ENABLE 1
-#define CCDC_LSC_TABLE1_SLC 0
-#define CCDC_LSC_TABLE2_SLC 1
-#define CCDC_LSC_TABLE3_SLC 2
-#define CCDC_LSC_MEMADDR_RESET (1 << 2)
-#define CCDC_LSC_MEMADDR_INCR (0 << 2)
-#define CCDC_LSC_FRAC_MASK_T1 0xFF
-#define CCDC_LSC_INT_MASK 3
-#define CCDC_LSC_FRAC_MASK 0x3FFF
-#define CCDC_LSC_CENTRE_MASK 0x3FFF
-#define CCDC_LSC_COEF_MASK 0xff
-#define CCDC_LSC_COEFL_SHIFT 0
-#define CCDC_LSC_COEFU_SHIFT 8
-#define CCDC_GAIN_MASK 0x7FF
-#define CCDC_SYNCEN_VDHDEN_MASK (1 << 0)
-#define CCDC_SYNCEN_WEN_MASK (1 << 1)
-#define CCDC_SYNCEN_WEN_SHIFT 1
-
-/* Power on Defaults in hardware */
-#define MODESET_DEFAULT 0x200
-#define CULH_DEFAULT 0xFFFF
-#define CULV_DEFAULT 0xFF
-#define GAIN_DEFAULT 256
-#define OUTCLIP_DEFAULT 0x3FFF
-#define LSCCFG2_DEFAULT 0xE
-
-#endif
diff --git a/drivers/media/platform/ti/davinci/dm644x_ccdc.c b/drivers/media/platform/ti/davinci/dm644x_ccdc.c
deleted file mode 100644
index e4073e99914c..000000000000
--- a/drivers/media/platform/ti/davinci/dm644x_ccdc.c
+++ /dev/null
@@ -1,879 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2006-2009 Texas Instruments Inc
- *
- * CCDC hardware module for DM6446
- * ------------------------------
- *
- * This module is for configuring CCD controller of DM6446 VPFE to capture
- * Raw yuv or Bayer RGB data from a decoder. CCDC has several modules
- * such as Defect Pixel Correction, Color Space Conversion etc to
- * pre-process the Raw Bayer RGB data, before writing it to SDRAM.
- * This file is named DM644x so that other variants such DM6443
- * may be supported using the same module.
- *
- * TODO: Test Raw bayer parameter settings and bayer capture
- * Split module parameter structure to module specific ioctl structs
- * investigate if enum used for user space type definition
- * to be replaced by #defines or integer
- */
-#include <linux/platform_device.h>
-#include <linux/uaccess.h>
-#include <linux/videodev2.h>
-#include <linux/gfp.h>
-#include <linux/err.h>
-#include <linux/module.h>
-
-#include <media/davinci/dm644x_ccdc.h>
-#include <media/davinci/vpss.h>
-
-#include "dm644x_ccdc_regs.h"
-#include "ccdc_hw_device.h"
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("CCDC Driver for DM6446");
-MODULE_AUTHOR("Texas Instruments");
-
-static struct ccdc_oper_config {
- struct device *dev;
- /* CCDC interface type */
- enum vpfe_hw_if_type if_type;
- /* Raw Bayer configuration */
- struct ccdc_params_raw bayer;
- /* YCbCr configuration */
- struct ccdc_params_ycbcr ycbcr;
- /* ccdc base address */
- void __iomem *base_addr;
-} ccdc_cfg = {
- /* Raw configurations */
- .bayer = {
- .pix_fmt = CCDC_PIXFMT_RAW,
- .frm_fmt = CCDC_FRMFMT_PROGRESSIVE,
- .win = CCDC_WIN_VGA,
- .fid_pol = VPFE_PINPOL_POSITIVE,
- .vd_pol = VPFE_PINPOL_POSITIVE,
- .hd_pol = VPFE_PINPOL_POSITIVE,
- .config_params = {
- .data_sz = CCDC_DATA_10BITS,
- },
- },
- .ycbcr = {
- .pix_fmt = CCDC_PIXFMT_YCBCR_8BIT,
- .frm_fmt = CCDC_FRMFMT_INTERLACED,
- .win = CCDC_WIN_PAL,
- .fid_pol = VPFE_PINPOL_POSITIVE,
- .vd_pol = VPFE_PINPOL_POSITIVE,
- .hd_pol = VPFE_PINPOL_POSITIVE,
- .bt656_enable = 1,
- .pix_order = CCDC_PIXORDER_CBYCRY,
- .buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED
- },
-};
-
-#define CCDC_MAX_RAW_YUV_FORMATS 2
-
-/* Raw Bayer formats */
-static u32 ccdc_raw_bayer_pix_formats[] =
- {V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16};
-
-/* Raw YUV formats */
-static u32 ccdc_raw_yuv_pix_formats[] =
- {V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV};
-
-/* CCDC Save/Restore context */
-static u32 ccdc_ctx[CCDC_REG_END / sizeof(u32)];
-
-/* register access routines */
-static inline u32 regr(u32 offset)
-{
- return __raw_readl(ccdc_cfg.base_addr + offset);
-}
-
-static inline void regw(u32 val, u32 offset)
-{
- __raw_writel(val, ccdc_cfg.base_addr + offset);
-}
-
-static void ccdc_enable(int flag)
-{
- regw(flag, CCDC_PCR);
-}
-
-static void ccdc_enable_vport(int flag)
-{
- if (flag)
- /* enable video port */
- regw(CCDC_ENABLE_VIDEO_PORT, CCDC_FMTCFG);
- else
- regw(CCDC_DISABLE_VIDEO_PORT, CCDC_FMTCFG);
-}
-
-/*
- * ccdc_setwin()
- * This function will configure the window size
- * to be capture in CCDC reg
- */
-static void ccdc_setwin(struct v4l2_rect *image_win,
- enum ccdc_frmfmt frm_fmt,
- int ppc)
-{
- int horz_start, horz_nr_pixels;
- int vert_start, vert_nr_lines;
- int val = 0, mid_img = 0;
-
- dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_setwin...");
- /*
- * ppc - per pixel count. indicates how many pixels per cell
- * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
- * raw capture this is 1
- */
- horz_start = image_win->left << (ppc - 1);
- horz_nr_pixels = (image_win->width << (ppc - 1)) - 1;
- regw((horz_start << CCDC_HORZ_INFO_SPH_SHIFT) | horz_nr_pixels,
- CCDC_HORZ_INFO);
-
- vert_start = image_win->top;
-
- if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
- vert_nr_lines = (image_win->height >> 1) - 1;
- vert_start >>= 1;
- /* Since first line doesn't have any data */
- vert_start += 1;
- /* configure VDINT0 */
- val = (vert_start << CCDC_VDINT_VDINT0_SHIFT);
- regw(val, CCDC_VDINT);
-
- } else {
- /* Since first line doesn't have any data */
- vert_start += 1;
- vert_nr_lines = image_win->height - 1;
- /*
- * configure VDINT0 and VDINT1. VDINT1 will be at half
- * of image height
- */
- mid_img = vert_start + (image_win->height / 2);
- val = (vert_start << CCDC_VDINT_VDINT0_SHIFT) |
- (mid_img & CCDC_VDINT_VDINT1_MASK);
- regw(val, CCDC_VDINT);
-
- }
- regw((vert_start << CCDC_VERT_START_SLV0_SHIFT) | vert_start,
- CCDC_VERT_START);
- regw(vert_nr_lines, CCDC_VERT_LINES);
- dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_setwin...");
-}
-
-static void ccdc_readregs(void)
-{
- unsigned int val = 0;
-
- val = regr(CCDC_ALAW);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to ALAW...\n", val);
- val = regr(CCDC_CLAMP);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to CLAMP...\n", val);
- val = regr(CCDC_DCSUB);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to DCSUB...\n", val);
- val = regr(CCDC_BLKCMP);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to BLKCMP...\n", val);
- val = regr(CCDC_FPC_ADDR);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FPC_ADDR...\n", val);
- val = regr(CCDC_FPC);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FPC...\n", val);
- val = regr(CCDC_FMTCFG);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMTCFG...\n", val);
- val = regr(CCDC_COLPTN);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to COLPTN...\n", val);
- val = regr(CCDC_FMT_HORZ);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMT_HORZ...\n", val);
- val = regr(CCDC_FMT_VERT);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to FMT_VERT...\n", val);
- val = regr(CCDC_HSIZE_OFF);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to HSIZE_OFF...\n", val);
- val = regr(CCDC_SDOFST);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to SDOFST...\n", val);
- val = regr(CCDC_VP_OUT);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VP_OUT...\n", val);
- val = regr(CCDC_SYN_MODE);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to SYN_MODE...\n", val);
- val = regr(CCDC_HORZ_INFO);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to HORZ_INFO...\n", val);
- val = regr(CCDC_VERT_START);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_START...\n", val);
- val = regr(CCDC_VERT_LINES);
- dev_notice(ccdc_cfg.dev, "\nReading 0x%x to VERT_LINES...\n", val);
-}
-
-static int ccdc_close(struct device *dev)
-{
- return 0;
-}
-
-/*
- * ccdc_restore_defaults()
- * This function will write defaults to all CCDC registers
- */
-static void ccdc_restore_defaults(void)
-{
- int i;
-
- /* disable CCDC */
- ccdc_enable(0);
- /* set all registers to default value */
- for (i = 4; i <= 0x94; i += 4)
- regw(0, i);
- regw(CCDC_NO_CULLING, CCDC_CULLING);
- regw(CCDC_GAMMA_BITS_11_2, CCDC_ALAW);
-}
-
-static int ccdc_open(struct device *device)
-{
- ccdc_restore_defaults();
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- ccdc_enable_vport(1);
- return 0;
-}
-
-static void ccdc_sbl_reset(void)
-{
- vpss_clear_wbl_overflow(VPSS_PCR_CCDC_WBL_O);
-}
-
-/*
- * ccdc_config_ycbcr()
- * This function will configure CCDC for YCbCr video capture
- */
-static void ccdc_config_ycbcr(void)
-{
- struct ccdc_params_ycbcr *params = &ccdc_cfg.ycbcr;
- u32 syn_mode;
-
- dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_ycbcr...");
- /*
- * first restore the CCDC registers to default values
- * This is important since we assume default values to be set in
- * a lot of registers that we didn't touch
- */
- ccdc_restore_defaults();
-
- /*
- * configure pixel format, frame format, configure video frame
- * format, enable output to SDRAM, enable internal timing generator
- * and 8bit pack mode
- */
- syn_mode = (((params->pix_fmt & CCDC_SYN_MODE_INPMOD_MASK) <<
- CCDC_SYN_MODE_INPMOD_SHIFT) |
- ((params->frm_fmt & CCDC_SYN_FLDMODE_MASK) <<
- CCDC_SYN_FLDMODE_SHIFT) | CCDC_VDHDEN_ENABLE |
- CCDC_WEN_ENABLE | CCDC_DATA_PACK_ENABLE);
-
- /* setup BT.656 sync mode */
- if (params->bt656_enable) {
- regw(CCDC_REC656IF_BT656_EN, CCDC_REC656IF);
-
- /*
- * configure the FID, VD, HD pin polarity,
- * fld,hd pol positive, vd negative, 8-bit data
- */
- syn_mode |= CCDC_SYN_MODE_VD_POL_NEGATIVE;
- if (ccdc_cfg.if_type == VPFE_BT656_10BIT)
- syn_mode |= CCDC_SYN_MODE_10BITS;
- else
- syn_mode |= CCDC_SYN_MODE_8BITS;
- } else {
- /* y/c external sync mode */
- syn_mode |= (((params->fid_pol & CCDC_FID_POL_MASK) <<
- CCDC_FID_POL_SHIFT) |
- ((params->hd_pol & CCDC_HD_POL_MASK) <<
- CCDC_HD_POL_SHIFT) |
- ((params->vd_pol & CCDC_VD_POL_MASK) <<
- CCDC_VD_POL_SHIFT));
- }
- regw(syn_mode, CCDC_SYN_MODE);
-
- /* configure video window */
- ccdc_setwin(&params->win, params->frm_fmt, 2);
-
- /*
- * configure the order of y cb cr in SDRAM, and disable latch
- * internal register on vsync
- */
- if (ccdc_cfg.if_type == VPFE_BT656_10BIT)
- regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) |
- CCDC_LATCH_ON_VSYNC_DISABLE | CCDC_CCDCFG_BW656_10BIT,
- CCDC_CCDCFG);
- else
- regw((params->pix_order << CCDC_CCDCFG_Y8POS_SHIFT) |
- CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG);
-
- /*
- * configure the horizontal line offset. This should be a
- * on 32 byte boundary. So clear LSB 5 bits
- */
- regw(((params->win.width * 2 + 31) & ~0x1f), CCDC_HSIZE_OFF);
-
- /* configure the memory line offset */
- if (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED)
- /* two fields are interleaved in memory */
- regw(CCDC_SDOFST_FIELD_INTERLEAVED, CCDC_SDOFST);
-
- ccdc_sbl_reset();
- dev_dbg(ccdc_cfg.dev, "\nEnd of ccdc_config_ycbcr...\n");
-}
-
-static void ccdc_config_black_clamp(struct ccdc_black_clamp *bclamp)
-{
- u32 val;
-
- if (!bclamp->enable) {
- /* configure DCSub */
- val = (bclamp->dc_sub) & CCDC_BLK_DC_SUB_MASK;
- regw(val, CCDC_DCSUB);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to DCSUB...\n", val);
- regw(CCDC_CLAMP_DEFAULT_VAL, CCDC_CLAMP);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x0000 to CLAMP...\n");
- return;
- }
- /*
- * Configure gain, Start pixel, No of line to be avg,
- * No of pixel/line to be avg, & Enable the Black clamping
- */
- val = ((bclamp->sgain & CCDC_BLK_SGAIN_MASK) |
- ((bclamp->start_pixel & CCDC_BLK_ST_PXL_MASK) <<
- CCDC_BLK_ST_PXL_SHIFT) |
- ((bclamp->sample_ln & CCDC_BLK_SAMPLE_LINE_MASK) <<
- CCDC_BLK_SAMPLE_LINE_SHIFT) |
- ((bclamp->sample_pixel & CCDC_BLK_SAMPLE_LN_MASK) <<
- CCDC_BLK_SAMPLE_LN_SHIFT) | CCDC_BLK_CLAMP_ENABLE);
- regw(val, CCDC_CLAMP);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to CLAMP...\n", val);
- /* If Black clamping is enable then make dcsub 0 */
- regw(CCDC_DCSUB_DEFAULT_VAL, CCDC_DCSUB);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x00000000 to DCSUB...\n");
-}
-
-static void ccdc_config_black_compense(struct ccdc_black_compensation *bcomp)
-{
- u32 val;
-
- val = ((bcomp->b & CCDC_BLK_COMP_MASK) |
- ((bcomp->gb & CCDC_BLK_COMP_MASK) <<
- CCDC_BLK_COMP_GB_COMP_SHIFT) |
- ((bcomp->gr & CCDC_BLK_COMP_MASK) <<
- CCDC_BLK_COMP_GR_COMP_SHIFT) |
- ((bcomp->r & CCDC_BLK_COMP_MASK) <<
- CCDC_BLK_COMP_R_COMP_SHIFT));
- regw(val, CCDC_BLKCMP);
-}
-
-/*
- * ccdc_config_raw()
- * This function will configure CCDC for Raw capture mode
- */
-static void ccdc_config_raw(void)
-{
- struct ccdc_params_raw *params = &ccdc_cfg.bayer;
- struct ccdc_config_params_raw *config_params =
- &ccdc_cfg.bayer.config_params;
- unsigned int syn_mode = 0;
- unsigned int val;
-
- dev_dbg(ccdc_cfg.dev, "\nStarting ccdc_config_raw...");
-
- /* Reset CCDC */
- ccdc_restore_defaults();
-
- /* Disable latching function registers on VSYNC */
- regw(CCDC_LATCH_ON_VSYNC_DISABLE, CCDC_CCDCFG);
-
- /*
- * Configure the vertical sync polarity(SYN_MODE.VDPOL),
- * horizontal sync polarity (SYN_MODE.HDPOL), frame id polarity
- * (SYN_MODE.FLDPOL), frame format(progressive or interlace),
- * data size(SYNMODE.DATSIZ), &pixel format (Input mode), output
- * SDRAM, enable internal timing generator
- */
- syn_mode =
- (((params->vd_pol & CCDC_VD_POL_MASK) << CCDC_VD_POL_SHIFT) |
- ((params->hd_pol & CCDC_HD_POL_MASK) << CCDC_HD_POL_SHIFT) |
- ((params->fid_pol & CCDC_FID_POL_MASK) << CCDC_FID_POL_SHIFT) |
- ((params->frm_fmt & CCDC_FRM_FMT_MASK) << CCDC_FRM_FMT_SHIFT) |
- ((config_params->data_sz & CCDC_DATA_SZ_MASK) <<
- CCDC_DATA_SZ_SHIFT) |
- ((params->pix_fmt & CCDC_PIX_FMT_MASK) << CCDC_PIX_FMT_SHIFT) |
- CCDC_WEN_ENABLE | CCDC_VDHDEN_ENABLE);
-
- /* Enable and configure aLaw register if needed */
- if (config_params->alaw.enable) {
- val = ((config_params->alaw.gamma_wd &
- CCDC_ALAW_GAMMA_WD_MASK) | CCDC_ALAW_ENABLE);
- regw(val, CCDC_ALAW);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to ALAW...\n", val);
- }
-
- /* Configure video window */
- ccdc_setwin(&params->win, params->frm_fmt, CCDC_PPC_RAW);
-
- /* Configure Black Clamp */
- ccdc_config_black_clamp(&config_params->blk_clamp);
-
- /* Configure Black level compensation */
- ccdc_config_black_compense(&config_params->blk_comp);
-
- /* If data size is 8 bit then pack the data */
- if ((config_params->data_sz == CCDC_DATA_8BITS) ||
- config_params->alaw.enable)
- syn_mode |= CCDC_DATA_PACK_ENABLE;
-
- /* disable video port */
- val = CCDC_DISABLE_VIDEO_PORT;
-
- if (config_params->data_sz == CCDC_DATA_8BITS)
- val |= (CCDC_DATA_10BITS & CCDC_FMTCFG_VPIN_MASK)
- << CCDC_FMTCFG_VPIN_SHIFT;
- else
- val |= (config_params->data_sz & CCDC_FMTCFG_VPIN_MASK)
- << CCDC_FMTCFG_VPIN_SHIFT;
- /* Write value in FMTCFG */
- regw(val, CCDC_FMTCFG);
-
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMTCFG...\n", val);
- /* Configure the color pattern according to mt9t001 sensor */
- regw(CCDC_COLPTN_VAL, CCDC_COLPTN);
-
- dev_dbg(ccdc_cfg.dev, "\nWriting 0xBB11BB11 to COLPTN...\n");
- /*
- * Configure Data formatter(Video port) pixel selection
- * (FMT_HORZ, FMT_VERT)
- */
- val = ((params->win.left & CCDC_FMT_HORZ_FMTSPH_MASK) <<
- CCDC_FMT_HORZ_FMTSPH_SHIFT) |
- (params->win.width & CCDC_FMT_HORZ_FMTLNH_MASK);
- regw(val, CCDC_FMT_HORZ);
-
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMT_HORZ...\n", val);
- val = (params->win.top & CCDC_FMT_VERT_FMTSLV_MASK)
- << CCDC_FMT_VERT_FMTSLV_SHIFT;
- if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE)
- val |= (params->win.height) & CCDC_FMT_VERT_FMTLNV_MASK;
- else
- val |= (params->win.height >> 1) & CCDC_FMT_VERT_FMTLNV_MASK;
-
- dev_dbg(ccdc_cfg.dev, "\nparams->win.height 0x%x ...\n",
- params->win.height);
- regw(val, CCDC_FMT_VERT);
-
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to FMT_VERT...\n", val);
-
- dev_dbg(ccdc_cfg.dev, "\nbelow regw(val, FMT_VERT)...");
-
- /*
- * Configure Horizontal offset register. If pack 8 is enabled then
- * 1 pixel will take 1 byte
- */
- if ((config_params->data_sz == CCDC_DATA_8BITS) ||
- config_params->alaw.enable)
- regw((params->win.width + CCDC_32BYTE_ALIGN_VAL) &
- CCDC_HSIZE_OFF_MASK, CCDC_HSIZE_OFF);
- else
- /* else one pixel will take 2 byte */
- regw(((params->win.width * CCDC_TWO_BYTES_PER_PIXEL) +
- CCDC_32BYTE_ALIGN_VAL) & CCDC_HSIZE_OFF_MASK,
- CCDC_HSIZE_OFF);
-
- /* Set value for SDOFST */
- if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
- if (params->image_invert_enable) {
- /* For intelace inverse mode */
- regw(CCDC_INTERLACED_IMAGE_INVERT, CCDC_SDOFST);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x4B6D to SDOFST..\n");
- }
-
- else {
- /* For intelace non inverse mode */
- regw(CCDC_INTERLACED_NO_IMAGE_INVERT, CCDC_SDOFST);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x0249 to SDOFST..\n");
- }
- } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
- regw(CCDC_PROGRESSIVE_NO_IMAGE_INVERT, CCDC_SDOFST);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x0000 to SDOFST...\n");
- }
-
- /*
- * Configure video port pixel selection (VPOUT)
- * Here -1 is to make the height value less than FMT_VERT.FMTLNV
- */
- if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE)
- val = (((params->win.height - 1) & CCDC_VP_OUT_VERT_NUM_MASK))
- << CCDC_VP_OUT_VERT_NUM_SHIFT;
- else
- val =
- ((((params->win.height >> CCDC_INTERLACED_HEIGHT_SHIFT) -
- 1) & CCDC_VP_OUT_VERT_NUM_MASK)) <<
- CCDC_VP_OUT_VERT_NUM_SHIFT;
-
- val |= ((((params->win.width))) & CCDC_VP_OUT_HORZ_NUM_MASK)
- << CCDC_VP_OUT_HORZ_NUM_SHIFT;
- val |= (params->win.left) & CCDC_VP_OUT_HORZ_ST_MASK;
- regw(val, CCDC_VP_OUT);
-
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to VP_OUT...\n", val);
- regw(syn_mode, CCDC_SYN_MODE);
- dev_dbg(ccdc_cfg.dev, "\nWriting 0x%x to SYN_MODE...\n", syn_mode);
-
- ccdc_sbl_reset();
- dev_dbg(ccdc_cfg.dev, "\nend of ccdc_config_raw...");
- ccdc_readregs();
-}
-
-static int ccdc_configure(void)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- ccdc_config_raw();
- else
- ccdc_config_ycbcr();
- return 0;
-}
-
-static int ccdc_set_buftype(enum ccdc_buftype buf_type)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- ccdc_cfg.bayer.buf_type = buf_type;
- else
- ccdc_cfg.ycbcr.buf_type = buf_type;
- return 0;
-}
-
-static enum ccdc_buftype ccdc_get_buftype(void)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- return ccdc_cfg.bayer.buf_type;
- return ccdc_cfg.ycbcr.buf_type;
-}
-
-static int ccdc_enum_pix(u32 *pix, int i)
-{
- int ret = -EINVAL;
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
- if (i < ARRAY_SIZE(ccdc_raw_bayer_pix_formats)) {
- *pix = ccdc_raw_bayer_pix_formats[i];
- ret = 0;
- }
- } else {
- if (i < ARRAY_SIZE(ccdc_raw_yuv_pix_formats)) {
- *pix = ccdc_raw_yuv_pix_formats[i];
- ret = 0;
- }
- }
- return ret;
-}
-
-static int ccdc_set_pixel_format(u32 pixfmt)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
- ccdc_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
- if (pixfmt == V4L2_PIX_FMT_SBGGR8)
- ccdc_cfg.bayer.config_params.alaw.enable = 1;
- else if (pixfmt != V4L2_PIX_FMT_SBGGR16)
- return -EINVAL;
- } else {
- if (pixfmt == V4L2_PIX_FMT_YUYV)
- ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
- else if (pixfmt == V4L2_PIX_FMT_UYVY)
- ccdc_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
- else
- return -EINVAL;
- }
- return 0;
-}
-
-static u32 ccdc_get_pixel_format(void)
-{
- struct ccdc_a_law *alaw = &ccdc_cfg.bayer.config_params.alaw;
- u32 pixfmt;
-
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- if (alaw->enable)
- pixfmt = V4L2_PIX_FMT_SBGGR8;
- else
- pixfmt = V4L2_PIX_FMT_SBGGR16;
- else {
- if (ccdc_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
- pixfmt = V4L2_PIX_FMT_YUYV;
- else
- pixfmt = V4L2_PIX_FMT_UYVY;
- }
- return pixfmt;
-}
-
-static int ccdc_set_image_window(struct v4l2_rect *win)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- ccdc_cfg.bayer.win = *win;
- else
- ccdc_cfg.ycbcr.win = *win;
- return 0;
-}
-
-static void ccdc_get_image_window(struct v4l2_rect *win)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- *win = ccdc_cfg.bayer.win;
- else
- *win = ccdc_cfg.ycbcr.win;
-}
-
-static unsigned int ccdc_get_line_length(void)
-{
- struct ccdc_config_params_raw *config_params =
- &ccdc_cfg.bayer.config_params;
- unsigned int len;
-
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER) {
- if ((config_params->alaw.enable) ||
- (config_params->data_sz == CCDC_DATA_8BITS))
- len = ccdc_cfg.bayer.win.width;
- else
- len = ccdc_cfg.bayer.win.width * 2;
- } else
- len = ccdc_cfg.ycbcr.win.width * 2;
- return ALIGN(len, 32);
-}
-
-static int ccdc_set_frame_format(enum ccdc_frmfmt frm_fmt)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- ccdc_cfg.bayer.frm_fmt = frm_fmt;
- else
- ccdc_cfg.ycbcr.frm_fmt = frm_fmt;
- return 0;
-}
-
-static enum ccdc_frmfmt ccdc_get_frame_format(void)
-{
- if (ccdc_cfg.if_type == VPFE_RAW_BAYER)
- return ccdc_cfg.bayer.frm_fmt;
- else
- return ccdc_cfg.ycbcr.frm_fmt;
-}
-
-static int ccdc_getfid(void)
-{
- return (regr(CCDC_SYN_MODE) >> 15) & 1;
-}
-
-/* misc operations */
-static inline void ccdc_setfbaddr(unsigned long addr)
-{
- regw(addr & 0xffffffe0, CCDC_SDR_ADDR);
-}
-
-static int ccdc_set_hw_if_params(struct vpfe_hw_if_param *params)
-{
- ccdc_cfg.if_type = params->if_type;
-
- switch (params->if_type) {
- case VPFE_BT656:
- case VPFE_YCBCR_SYNC_16:
- case VPFE_YCBCR_SYNC_8:
- case VPFE_BT656_10BIT:
- ccdc_cfg.ycbcr.vd_pol = params->vdpol;
- ccdc_cfg.ycbcr.hd_pol = params->hdpol;
- break;
- default:
- /* TODO add support for raw bayer here */
- return -EINVAL;
- }
- return 0;
-}
-
-static void ccdc_save_context(void)
-{
- ccdc_ctx[CCDC_PCR >> 2] = regr(CCDC_PCR);
- ccdc_ctx[CCDC_SYN_MODE >> 2] = regr(CCDC_SYN_MODE);
- ccdc_ctx[CCDC_HD_VD_WID >> 2] = regr(CCDC_HD_VD_WID);
- ccdc_ctx[CCDC_PIX_LINES >> 2] = regr(CCDC_PIX_LINES);
- ccdc_ctx[CCDC_HORZ_INFO >> 2] = regr(CCDC_HORZ_INFO);
- ccdc_ctx[CCDC_VERT_START >> 2] = regr(CCDC_VERT_START);
- ccdc_ctx[CCDC_VERT_LINES >> 2] = regr(CCDC_VERT_LINES);
- ccdc_ctx[CCDC_CULLING >> 2] = regr(CCDC_CULLING);
- ccdc_ctx[CCDC_HSIZE_OFF >> 2] = regr(CCDC_HSIZE_OFF);
- ccdc_ctx[CCDC_SDOFST >> 2] = regr(CCDC_SDOFST);
- ccdc_ctx[CCDC_SDR_ADDR >> 2] = regr(CCDC_SDR_ADDR);
- ccdc_ctx[CCDC_CLAMP >> 2] = regr(CCDC_CLAMP);
- ccdc_ctx[CCDC_DCSUB >> 2] = regr(CCDC_DCSUB);
- ccdc_ctx[CCDC_COLPTN >> 2] = regr(CCDC_COLPTN);
- ccdc_ctx[CCDC_BLKCMP >> 2] = regr(CCDC_BLKCMP);
- ccdc_ctx[CCDC_FPC >> 2] = regr(CCDC_FPC);
- ccdc_ctx[CCDC_FPC_ADDR >> 2] = regr(CCDC_FPC_ADDR);
- ccdc_ctx[CCDC_VDINT >> 2] = regr(CCDC_VDINT);
- ccdc_ctx[CCDC_ALAW >> 2] = regr(CCDC_ALAW);
- ccdc_ctx[CCDC_REC656IF >> 2] = regr(CCDC_REC656IF);
- ccdc_ctx[CCDC_CCDCFG >> 2] = regr(CCDC_CCDCFG);
- ccdc_ctx[CCDC_FMTCFG >> 2] = regr(CCDC_FMTCFG);
- ccdc_ctx[CCDC_FMT_HORZ >> 2] = regr(CCDC_FMT_HORZ);
- ccdc_ctx[CCDC_FMT_VERT >> 2] = regr(CCDC_FMT_VERT);
- ccdc_ctx[CCDC_FMT_ADDR0 >> 2] = regr(CCDC_FMT_ADDR0);
- ccdc_ctx[CCDC_FMT_ADDR1 >> 2] = regr(CCDC_FMT_ADDR1);
- ccdc_ctx[CCDC_FMT_ADDR2 >> 2] = regr(CCDC_FMT_ADDR2);
- ccdc_ctx[CCDC_FMT_ADDR3 >> 2] = regr(CCDC_FMT_ADDR3);
- ccdc_ctx[CCDC_FMT_ADDR4 >> 2] = regr(CCDC_FMT_ADDR4);
- ccdc_ctx[CCDC_FMT_ADDR5 >> 2] = regr(CCDC_FMT_ADDR5);
- ccdc_ctx[CCDC_FMT_ADDR6 >> 2] = regr(CCDC_FMT_ADDR6);
- ccdc_ctx[CCDC_FMT_ADDR7 >> 2] = regr(CCDC_FMT_ADDR7);
- ccdc_ctx[CCDC_PRGEVEN_0 >> 2] = regr(CCDC_PRGEVEN_0);
- ccdc_ctx[CCDC_PRGEVEN_1 >> 2] = regr(CCDC_PRGEVEN_1);
- ccdc_ctx[CCDC_PRGODD_0 >> 2] = regr(CCDC_PRGODD_0);
- ccdc_ctx[CCDC_PRGODD_1 >> 2] = regr(CCDC_PRGODD_1);
- ccdc_ctx[CCDC_VP_OUT >> 2] = regr(CCDC_VP_OUT);
-}
-
-static void ccdc_restore_context(void)
-{
- regw(ccdc_ctx[CCDC_SYN_MODE >> 2], CCDC_SYN_MODE);
- regw(ccdc_ctx[CCDC_HD_VD_WID >> 2], CCDC_HD_VD_WID);
- regw(ccdc_ctx[CCDC_PIX_LINES >> 2], CCDC_PIX_LINES);
- regw(ccdc_ctx[CCDC_HORZ_INFO >> 2], CCDC_HORZ_INFO);
- regw(ccdc_ctx[CCDC_VERT_START >> 2], CCDC_VERT_START);
- regw(ccdc_ctx[CCDC_VERT_LINES >> 2], CCDC_VERT_LINES);
- regw(ccdc_ctx[CCDC_CULLING >> 2], CCDC_CULLING);
- regw(ccdc_ctx[CCDC_HSIZE_OFF >> 2], CCDC_HSIZE_OFF);
- regw(ccdc_ctx[CCDC_SDOFST >> 2], CCDC_SDOFST);
- regw(ccdc_ctx[CCDC_SDR_ADDR >> 2], CCDC_SDR_ADDR);
- regw(ccdc_ctx[CCDC_CLAMP >> 2], CCDC_CLAMP);
- regw(ccdc_ctx[CCDC_DCSUB >> 2], CCDC_DCSUB);
- regw(ccdc_ctx[CCDC_COLPTN >> 2], CCDC_COLPTN);
- regw(ccdc_ctx[CCDC_BLKCMP >> 2], CCDC_BLKCMP);
- regw(ccdc_ctx[CCDC_FPC >> 2], CCDC_FPC);
- regw(ccdc_ctx[CCDC_FPC_ADDR >> 2], CCDC_FPC_ADDR);
- regw(ccdc_ctx[CCDC_VDINT >> 2], CCDC_VDINT);
- regw(ccdc_ctx[CCDC_ALAW >> 2], CCDC_ALAW);
- regw(ccdc_ctx[CCDC_REC656IF >> 2], CCDC_REC656IF);
- regw(ccdc_ctx[CCDC_CCDCFG >> 2], CCDC_CCDCFG);
- regw(ccdc_ctx[CCDC_FMTCFG >> 2], CCDC_FMTCFG);
- regw(ccdc_ctx[CCDC_FMT_HORZ >> 2], CCDC_FMT_HORZ);
- regw(ccdc_ctx[CCDC_FMT_VERT >> 2], CCDC_FMT_VERT);
- regw(ccdc_ctx[CCDC_FMT_ADDR0 >> 2], CCDC_FMT_ADDR0);
- regw(ccdc_ctx[CCDC_FMT_ADDR1 >> 2], CCDC_FMT_ADDR1);
- regw(ccdc_ctx[CCDC_FMT_ADDR2 >> 2], CCDC_FMT_ADDR2);
- regw(ccdc_ctx[CCDC_FMT_ADDR3 >> 2], CCDC_FMT_ADDR3);
- regw(ccdc_ctx[CCDC_FMT_ADDR4 >> 2], CCDC_FMT_ADDR4);
- regw(ccdc_ctx[CCDC_FMT_ADDR5 >> 2], CCDC_FMT_ADDR5);
- regw(ccdc_ctx[CCDC_FMT_ADDR6 >> 2], CCDC_FMT_ADDR6);
- regw(ccdc_ctx[CCDC_FMT_ADDR7 >> 2], CCDC_FMT_ADDR7);
- regw(ccdc_ctx[CCDC_PRGEVEN_0 >> 2], CCDC_PRGEVEN_0);
- regw(ccdc_ctx[CCDC_PRGEVEN_1 >> 2], CCDC_PRGEVEN_1);
- regw(ccdc_ctx[CCDC_PRGODD_0 >> 2], CCDC_PRGODD_0);
- regw(ccdc_ctx[CCDC_PRGODD_1 >> 2], CCDC_PRGODD_1);
- regw(ccdc_ctx[CCDC_VP_OUT >> 2], CCDC_VP_OUT);
- regw(ccdc_ctx[CCDC_PCR >> 2], CCDC_PCR);
-}
-static const struct ccdc_hw_device ccdc_hw_dev = {
- .name = "DM6446 CCDC",
- .owner = THIS_MODULE,
- .hw_ops = {
- .open = ccdc_open,
- .close = ccdc_close,
- .reset = ccdc_sbl_reset,
- .enable = ccdc_enable,
- .set_hw_if_params = ccdc_set_hw_if_params,
- .configure = ccdc_configure,
- .set_buftype = ccdc_set_buftype,
- .get_buftype = ccdc_get_buftype,
- .enum_pix = ccdc_enum_pix,
- .set_pixel_format = ccdc_set_pixel_format,
- .get_pixel_format = ccdc_get_pixel_format,
- .set_frame_format = ccdc_set_frame_format,
- .get_frame_format = ccdc_get_frame_format,
- .set_image_window = ccdc_set_image_window,
- .get_image_window = ccdc_get_image_window,
- .get_line_length = ccdc_get_line_length,
- .setfbaddr = ccdc_setfbaddr,
- .getfid = ccdc_getfid,
- },
-};
-
-static int dm644x_ccdc_probe(struct platform_device *pdev)
-{
- struct resource *res;
- int status = 0;
-
- /*
- * first try to register with vpfe. If not correct platform, then we
- * don't have to iomap
- */
- status = vpfe_register_ccdc_device(&ccdc_hw_dev);
- if (status < 0)
- return status;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- status = -ENODEV;
- goto fail_nores;
- }
-
- res = request_mem_region(res->start, resource_size(res), res->name);
- if (!res) {
- status = -EBUSY;
- goto fail_nores;
- }
-
- ccdc_cfg.base_addr = ioremap(res->start, resource_size(res));
- if (!ccdc_cfg.base_addr) {
- status = -ENOMEM;
- goto fail_nomem;
- }
-
- ccdc_cfg.dev = &pdev->dev;
- printk(KERN_NOTICE "%s is registered with vpfe.\n", ccdc_hw_dev.name);
- return 0;
-fail_nomem:
- release_mem_region(res->start, resource_size(res));
-fail_nores:
- vpfe_unregister_ccdc_device(&ccdc_hw_dev);
- return status;
-}
-
-static int dm644x_ccdc_remove(struct platform_device *pdev)
-{
- struct resource *res;
-
- iounmap(ccdc_cfg.base_addr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
- vpfe_unregister_ccdc_device(&ccdc_hw_dev);
- return 0;
-}
-
-static int dm644x_ccdc_suspend(struct device *dev)
-{
- /* Save CCDC context */
- ccdc_save_context();
- /* Disable CCDC */
- ccdc_enable(0);
-
- return 0;
-}
-
-static int dm644x_ccdc_resume(struct device *dev)
-{
- /* Restore CCDC context */
- ccdc_restore_context();
-
- return 0;
-}
-
-static const struct dev_pm_ops dm644x_ccdc_pm_ops = {
- .suspend = dm644x_ccdc_suspend,
- .resume = dm644x_ccdc_resume,
-};
-
-static struct platform_driver dm644x_ccdc_driver = {
- .driver = {
- .name = "dm644x_ccdc",
- .pm = &dm644x_ccdc_pm_ops,
- },
- .remove = dm644x_ccdc_remove,
- .probe = dm644x_ccdc_probe,
-};
-
-module_platform_driver(dm644x_ccdc_driver);
diff --git a/drivers/media/platform/ti/davinci/dm644x_ccdc_regs.h b/drivers/media/platform/ti/davinci/dm644x_ccdc_regs.h
deleted file mode 100644
index c4894f6a254e..000000000000
--- a/drivers/media/platform/ti/davinci/dm644x_ccdc_regs.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2006-2009 Texas Instruments Inc
- */
-#ifndef _DM644X_CCDC_REGS_H
-#define _DM644X_CCDC_REGS_H
-
-/**************************************************************************\
-* Register OFFSET Definitions
-\**************************************************************************/
-#define CCDC_PID 0x0
-#define CCDC_PCR 0x4
-#define CCDC_SYN_MODE 0x8
-#define CCDC_HD_VD_WID 0xc
-#define CCDC_PIX_LINES 0x10
-#define CCDC_HORZ_INFO 0x14
-#define CCDC_VERT_START 0x18
-#define CCDC_VERT_LINES 0x1c
-#define CCDC_CULLING 0x20
-#define CCDC_HSIZE_OFF 0x24
-#define CCDC_SDOFST 0x28
-#define CCDC_SDR_ADDR 0x2c
-#define CCDC_CLAMP 0x30
-#define CCDC_DCSUB 0x34
-#define CCDC_COLPTN 0x38
-#define CCDC_BLKCMP 0x3c
-#define CCDC_FPC 0x40
-#define CCDC_FPC_ADDR 0x44
-#define CCDC_VDINT 0x48
-#define CCDC_ALAW 0x4c
-#define CCDC_REC656IF 0x50
-#define CCDC_CCDCFG 0x54
-#define CCDC_FMTCFG 0x58
-#define CCDC_FMT_HORZ 0x5c
-#define CCDC_FMT_VERT 0x60
-#define CCDC_FMT_ADDR0 0x64
-#define CCDC_FMT_ADDR1 0x68
-#define CCDC_FMT_ADDR2 0x6c
-#define CCDC_FMT_ADDR3 0x70
-#define CCDC_FMT_ADDR4 0x74
-#define CCDC_FMT_ADDR5 0x78
-#define CCDC_FMT_ADDR6 0x7c
-#define CCDC_FMT_ADDR7 0x80
-#define CCDC_PRGEVEN_0 0x84
-#define CCDC_PRGEVEN_1 0x88
-#define CCDC_PRGODD_0 0x8c
-#define CCDC_PRGODD_1 0x90
-#define CCDC_VP_OUT 0x94
-#define CCDC_REG_END 0x98
-
-/***************************************************************
-* Define for various register bit mask and shifts for CCDC
-****************************************************************/
-#define CCDC_FID_POL_MASK 1
-#define CCDC_FID_POL_SHIFT 4
-#define CCDC_HD_POL_MASK 1
-#define CCDC_HD_POL_SHIFT 3
-#define CCDC_VD_POL_MASK 1
-#define CCDC_VD_POL_SHIFT 2
-#define CCDC_HSIZE_OFF_MASK 0xffffffe0
-#define CCDC_32BYTE_ALIGN_VAL 31
-#define CCDC_FRM_FMT_MASK 0x1
-#define CCDC_FRM_FMT_SHIFT 7
-#define CCDC_DATA_SZ_MASK 7
-#define CCDC_DATA_SZ_SHIFT 8
-#define CCDC_PIX_FMT_MASK 3
-#define CCDC_PIX_FMT_SHIFT 12
-#define CCDC_VP2SDR_DISABLE 0xFFFBFFFF
-#define CCDC_WEN_ENABLE BIT(17)
-#define CCDC_SDR2RSZ_DISABLE 0xFFF7FFFF
-#define CCDC_VDHDEN_ENABLE BIT(16)
-#define CCDC_LPF_ENABLE BIT(14)
-#define CCDC_ALAW_ENABLE BIT(3)
-#define CCDC_ALAW_GAMMA_WD_MASK 7
-#define CCDC_BLK_CLAMP_ENABLE BIT(31)
-#define CCDC_BLK_SGAIN_MASK 0x1F
-#define CCDC_BLK_ST_PXL_MASK 0x7FFF
-#define CCDC_BLK_ST_PXL_SHIFT 10
-#define CCDC_BLK_SAMPLE_LN_MASK 7
-#define CCDC_BLK_SAMPLE_LN_SHIFT 28
-#define CCDC_BLK_SAMPLE_LINE_MASK 7
-#define CCDC_BLK_SAMPLE_LINE_SHIFT 25
-#define CCDC_BLK_DC_SUB_MASK 0x03FFF
-#define CCDC_BLK_COMP_MASK 0xFF
-#define CCDC_BLK_COMP_GB_COMP_SHIFT 8
-#define CCDC_BLK_COMP_GR_COMP_SHIFT 16
-#define CCDC_BLK_COMP_R_COMP_SHIFT 24
-#define CCDC_LATCH_ON_VSYNC_DISABLE BIT(15)
-#define CCDC_FPC_ENABLE BIT(15)
-#define CCDC_FPC_DISABLE 0
-#define CCDC_FPC_FPC_NUM_MASK 0x7FFF
-#define CCDC_DATA_PACK_ENABLE BIT(11)
-#define CCDC_FMTCFG_VPIN_MASK 7
-#define CCDC_FMTCFG_VPIN_SHIFT 12
-#define CCDC_FMT_HORZ_FMTLNH_MASK 0x1FFF
-#define CCDC_FMT_HORZ_FMTSPH_MASK 0x1FFF
-#define CCDC_FMT_HORZ_FMTSPH_SHIFT 16
-#define CCDC_FMT_VERT_FMTLNV_MASK 0x1FFF
-#define CCDC_FMT_VERT_FMTSLV_MASK 0x1FFF
-#define CCDC_FMT_VERT_FMTSLV_SHIFT 16
-#define CCDC_VP_OUT_VERT_NUM_MASK 0x3FFF
-#define CCDC_VP_OUT_VERT_NUM_SHIFT 17
-#define CCDC_VP_OUT_HORZ_NUM_MASK 0x1FFF
-#define CCDC_VP_OUT_HORZ_NUM_SHIFT 4
-#define CCDC_VP_OUT_HORZ_ST_MASK 0xF
-#define CCDC_HORZ_INFO_SPH_SHIFT 16
-#define CCDC_VERT_START_SLV0_SHIFT 16
-#define CCDC_VDINT_VDINT0_SHIFT 16
-#define CCDC_VDINT_VDINT1_MASK 0xFFFF
-#define CCDC_PPC_RAW 1
-#define CCDC_DCSUB_DEFAULT_VAL 0
-#define CCDC_CLAMP_DEFAULT_VAL 0
-#define CCDC_ENABLE_VIDEO_PORT 0x8000
-#define CCDC_DISABLE_VIDEO_PORT 0
-#define CCDC_COLPTN_VAL 0xBB11BB11
-#define CCDC_TWO_BYTES_PER_PIXEL 2
-#define CCDC_INTERLACED_IMAGE_INVERT 0x4B6D
-#define CCDC_INTERLACED_NO_IMAGE_INVERT 0x0249
-#define CCDC_PROGRESSIVE_IMAGE_INVERT 0x4000
-#define CCDC_PROGRESSIVE_NO_IMAGE_INVERT 0
-#define CCDC_INTERLACED_HEIGHT_SHIFT 1
-#define CCDC_SYN_MODE_INPMOD_SHIFT 12
-#define CCDC_SYN_MODE_INPMOD_MASK 3
-#define CCDC_SYN_MODE_8BITS (7 << 8)
-#define CCDC_SYN_MODE_10BITS (6 << 8)
-#define CCDC_SYN_MODE_11BITS (5 << 8)
-#define CCDC_SYN_MODE_12BITS (4 << 8)
-#define CCDC_SYN_MODE_13BITS (3 << 8)
-#define CCDC_SYN_MODE_14BITS (2 << 8)
-#define CCDC_SYN_MODE_15BITS (1 << 8)
-#define CCDC_SYN_MODE_16BITS (0 << 8)
-#define CCDC_SYN_FLDMODE_MASK 1
-#define CCDC_SYN_FLDMODE_SHIFT 7
-#define CCDC_REC656IF_BT656_EN 3
-#define CCDC_SYN_MODE_VD_POL_NEGATIVE BIT(2)
-#define CCDC_CCDCFG_Y8POS_SHIFT 11
-#define CCDC_CCDCFG_BW656_10BIT BIT(5)
-#define CCDC_SDOFST_FIELD_INTERLEAVED 0x249
-#define CCDC_NO_CULLING 0xffff00ff
-#endif
diff --git a/drivers/media/platform/ti/davinci/isif.c b/drivers/media/platform/ti/davinci/isif.c
deleted file mode 100644
index 69e862de014f..000000000000
--- a/drivers/media/platform/ti/davinci/isif.c
+++ /dev/null
@@ -1,1127 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2008-2009 Texas Instruments Inc
- *
- * Image Sensor Interface (ISIF) driver
- *
- * This driver is for configuring the ISIF IP available on DM365 or any other
- * TI SoCs. This is used for capturing yuv or bayer video or image data
- * from a decoder or sensor. This IP is similar to the CCDC IP on DM355
- * and DM6446, but with enhanced or additional ip blocks. The driver
- * configures the ISIF upon commands from the vpfe bridge driver through
- * ccdc_hw_device interface.
- *
- * TODO: 1) Raw bayer parameter settings and bayer capture
- * 2) Add support for control ioctl
- */
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <linux/videodev2.h>
-#include <linux/err.h>
-#include <linux/module.h>
-
-#include <media/davinci/isif.h>
-#include <media/davinci/vpss.h>
-
-#include "isif_regs.h"
-#include "ccdc_hw_device.h"
-
-/* Defaults for module configuration parameters */
-static const struct isif_config_params_raw isif_config_defaults = {
- .linearize = {
- .en = 0,
- .corr_shft = ISIF_NO_SHIFT,
- .scale_fact = {1, 0},
- },
- .df_csc = {
- .df_or_csc = 0,
- .csc = {
- .en = 0,
- },
- },
- .dfc = {
- .en = 0,
- },
- .bclamp = {
- .en = 0,
- },
- .gain_offset = {
- .gain = {
- .r_ye = {1, 0},
- .gr_cy = {1, 0},
- .gb_g = {1, 0},
- .b_mg = {1, 0},
- },
- },
- .culling = {
- .hcpat_odd = 0xff,
- .hcpat_even = 0xff,
- .vcpat = 0xff,
- },
- .compress = {
- .alg = ISIF_ALAW,
- },
-};
-
-/* ISIF operation configuration */
-static struct isif_oper_config {
- struct device *dev;
- enum vpfe_hw_if_type if_type;
- struct isif_ycbcr_config ycbcr;
- struct isif_params_raw bayer;
- enum isif_data_pack data_pack;
- /* ISIF base address */
- void __iomem *base_addr;
- /* ISIF Linear Table 0 */
- void __iomem *linear_tbl0_addr;
- /* ISIF Linear Table 1 */
- void __iomem *linear_tbl1_addr;
-} isif_cfg = {
- .ycbcr = {
- .pix_fmt = CCDC_PIXFMT_YCBCR_8BIT,
- .frm_fmt = CCDC_FRMFMT_INTERLACED,
- .win = ISIF_WIN_NTSC,
- .fid_pol = VPFE_PINPOL_POSITIVE,
- .vd_pol = VPFE_PINPOL_POSITIVE,
- .hd_pol = VPFE_PINPOL_POSITIVE,
- .pix_order = CCDC_PIXORDER_CBYCRY,
- .buf_type = CCDC_BUFTYPE_FLD_INTERLEAVED,
- },
- .bayer = {
- .pix_fmt = CCDC_PIXFMT_RAW,
- .frm_fmt = CCDC_FRMFMT_PROGRESSIVE,
- .win = ISIF_WIN_VGA,
- .fid_pol = VPFE_PINPOL_POSITIVE,
- .vd_pol = VPFE_PINPOL_POSITIVE,
- .hd_pol = VPFE_PINPOL_POSITIVE,
- .gain = {
- .r_ye = {1, 0},
- .gr_cy = {1, 0},
- .gb_g = {1, 0},
- .b_mg = {1, 0},
- },
- .cfa_pat = ISIF_CFA_PAT_MOSAIC,
- .data_msb = ISIF_BIT_MSB_11,
- .config_params = {
- .data_shift = ISIF_NO_SHIFT,
- .col_pat_field0 = {
- .olop = ISIF_GREEN_BLUE,
- .olep = ISIF_BLUE,
- .elop = ISIF_RED,
- .elep = ISIF_GREEN_RED,
- },
- .col_pat_field1 = {
- .olop = ISIF_GREEN_BLUE,
- .olep = ISIF_BLUE,
- .elop = ISIF_RED,
- .elep = ISIF_GREEN_RED,
- },
- .test_pat_gen = 0,
- },
- },
- .data_pack = ISIF_DATA_PACK8,
-};
-
-/* Raw Bayer formats */
-static const u32 isif_raw_bayer_pix_formats[] = {
- V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SBGGR16};
-
-/* Raw YUV formats */
-static const u32 isif_raw_yuv_pix_formats[] = {
- V4L2_PIX_FMT_UYVY, V4L2_PIX_FMT_YUYV};
-
-/* register access routines */
-static inline u32 regr(u32 offset)
-{
- return __raw_readl(isif_cfg.base_addr + offset);
-}
-
-static inline void regw(u32 val, u32 offset)
-{
- __raw_writel(val, isif_cfg.base_addr + offset);
-}
-
-/* reg_modify() - read, modify and write register */
-static inline u32 reg_modify(u32 mask, u32 val, u32 offset)
-{
- u32 new_val = (regr(offset) & ~mask) | (val & mask);
-
- regw(new_val, offset);
- return new_val;
-}
-
-static inline void regw_lin_tbl(u32 val, u32 offset, int i)
-{
- if (!i)
- __raw_writel(val, isif_cfg.linear_tbl0_addr + offset);
- else
- __raw_writel(val, isif_cfg.linear_tbl1_addr + offset);
-}
-
-static void isif_disable_all_modules(void)
-{
- /* disable BC */
- regw(0, CLAMPCFG);
- /* disable vdfc */
- regw(0, DFCCTL);
- /* disable CSC */
- regw(0, CSCCTL);
- /* disable linearization */
- regw(0, LINCFG0);
- /* disable other modules here as they are supported */
-}
-
-static void isif_enable(int en)
-{
- if (!en) {
- /* Before disable isif, disable all ISIF modules */
- isif_disable_all_modules();
- /*
- * wait for next VD. Assume lowest scan rate is 12 Hz. So
- * 100 msec delay is good enough
- */
- msleep(100);
- }
- reg_modify(ISIF_SYNCEN_VDHDEN_MASK, en, SYNCEN);
-}
-
-static void isif_enable_output_to_sdram(int en)
-{
- reg_modify(ISIF_SYNCEN_WEN_MASK, en << ISIF_SYNCEN_WEN_SHIFT, SYNCEN);
-}
-
-static void isif_config_culling(struct isif_cul *cul)
-{
- u32 val;
-
- /* Horizontal pattern */
- val = (cul->hcpat_even << CULL_PAT_EVEN_LINE_SHIFT) | cul->hcpat_odd;
- regw(val, CULH);
-
- /* vertical pattern */
- regw(cul->vcpat, CULV);
-
- /* LPF */
- reg_modify(ISIF_LPF_MASK << ISIF_LPF_SHIFT,
- cul->en_lpf << ISIF_LPF_SHIFT, MODESET);
-}
-
-static void isif_config_gain_offset(void)
-{
- struct isif_gain_offsets_adj *gain_off_p =
- &isif_cfg.bayer.config_params.gain_offset;
- u32 val;
-
- val = (!!gain_off_p->gain_sdram_en << GAIN_SDRAM_EN_SHIFT) |
- (!!gain_off_p->gain_ipipe_en << GAIN_IPIPE_EN_SHIFT) |
- (!!gain_off_p->gain_h3a_en << GAIN_H3A_EN_SHIFT) |
- (!!gain_off_p->offset_sdram_en << OFST_SDRAM_EN_SHIFT) |
- (!!gain_off_p->offset_ipipe_en << OFST_IPIPE_EN_SHIFT) |
- (!!gain_off_p->offset_h3a_en << OFST_H3A_EN_SHIFT);
-
- reg_modify(GAIN_OFFSET_EN_MASK, val, CGAMMAWD);
-
- val = (gain_off_p->gain.r_ye.integer << GAIN_INTEGER_SHIFT) |
- gain_off_p->gain.r_ye.decimal;
- regw(val, CRGAIN);
-
- val = (gain_off_p->gain.gr_cy.integer << GAIN_INTEGER_SHIFT) |
- gain_off_p->gain.gr_cy.decimal;
- regw(val, CGRGAIN);
-
- val = (gain_off_p->gain.gb_g.integer << GAIN_INTEGER_SHIFT) |
- gain_off_p->gain.gb_g.decimal;
- regw(val, CGBGAIN);
-
- val = (gain_off_p->gain.b_mg.integer << GAIN_INTEGER_SHIFT) |
- gain_off_p->gain.b_mg.decimal;
- regw(val, CBGAIN);
-
- regw(gain_off_p->offset, COFSTA);
-}
-
-static void isif_restore_defaults(void)
-{
- enum vpss_ccdc_source_sel source = VPSS_CCDCIN;
-
- dev_dbg(isif_cfg.dev, "\nstarting isif_restore_defaults...");
- isif_cfg.bayer.config_params = isif_config_defaults;
- /* Enable clock to ISIF, IPIPEIF and BL */
- vpss_enable_clock(VPSS_CCDC_CLOCK, 1);
- vpss_enable_clock(VPSS_IPIPEIF_CLOCK, 1);
- vpss_enable_clock(VPSS_BL_CLOCK, 1);
- /* Set default offset and gain */
- isif_config_gain_offset();
- vpss_select_ccdc_source(source);
- dev_dbg(isif_cfg.dev, "\nEnd of isif_restore_defaults...");
-}
-
-static int isif_open(struct device *device)
-{
- isif_restore_defaults();
- return 0;
-}
-
-/* This function will configure the window size to be capture in ISIF reg */
-static void isif_setwin(struct v4l2_rect *image_win,
- enum ccdc_frmfmt frm_fmt, int ppc)
-{
- int horz_start, horz_nr_pixels;
- int vert_start, vert_nr_lines;
- int mid_img = 0;
-
- dev_dbg(isif_cfg.dev, "\nStarting isif_setwin...");
- /*
- * ppc - per pixel count. indicates how many pixels per cell
- * output to SDRAM. example, for ycbcr, it is one y and one c, so 2.
- * raw capture this is 1
- */
- horz_start = image_win->left << (ppc - 1);
- horz_nr_pixels = ((image_win->width) << (ppc - 1)) - 1;
-
- /* Writing the horizontal info into the registers */
- regw(horz_start & START_PX_HOR_MASK, SPH);
- regw(horz_nr_pixels & NUM_PX_HOR_MASK, LNH);
- vert_start = image_win->top;
-
- if (frm_fmt == CCDC_FRMFMT_INTERLACED) {
- vert_nr_lines = (image_win->height >> 1) - 1;
- vert_start >>= 1;
- /* To account for VD since line 0 doesn't have any data */
- vert_start += 1;
- } else {
- /* To account for VD since line 0 doesn't have any data */
- vert_start += 1;
- vert_nr_lines = image_win->height - 1;
- /* configure VDINT0 and VDINT1 */
- mid_img = vert_start + (image_win->height / 2);
- regw(mid_img, VDINT1);
- }
-
- regw(0, VDINT0);
- regw(vert_start & START_VER_ONE_MASK, SLV0);
- regw(vert_start & START_VER_TWO_MASK, SLV1);
- regw(vert_nr_lines & NUM_LINES_VER, LNV);
-}
-
-static void isif_config_bclamp(struct isif_black_clamp *bc)
-{
- u32 val;
-
- /*
- * DC Offset is always added to image data irrespective of bc enable
- * status
- */
- regw(bc->dc_offset, CLDCOFST);
-
- if (bc->en) {
- val = bc->bc_mode_color << ISIF_BC_MODE_COLOR_SHIFT;
-
- /* Enable BC and horizontal clamp calculation parameters */
- val = val | 1 | (bc->horz.mode << ISIF_HORZ_BC_MODE_SHIFT);
-
- regw(val, CLAMPCFG);
-
- if (bc->horz.mode != ISIF_HORZ_BC_DISABLE) {
- /*
- * Window count for calculation
- * Base window selection
- * pixel limit
- * Horizontal size of window
- * vertical size of the window
- * Horizontal start position of the window
- * Vertical start position of the window
- */
- val = bc->horz.win_count_calc |
- ((!!bc->horz.base_win_sel_calc) <<
- ISIF_HORZ_BC_WIN_SEL_SHIFT) |
- ((!!bc->horz.clamp_pix_limit) <<
- ISIF_HORZ_BC_PIX_LIMIT_SHIFT) |
- (bc->horz.win_h_sz_calc <<
- ISIF_HORZ_BC_WIN_H_SIZE_SHIFT) |
- (bc->horz.win_v_sz_calc <<
- ISIF_HORZ_BC_WIN_V_SIZE_SHIFT);
- regw(val, CLHWIN0);
-
- regw(bc->horz.win_start_h_calc, CLHWIN1);
- regw(bc->horz.win_start_v_calc, CLHWIN2);
- }
-
- /* vertical clamp calculation parameters */
-
- /* Reset clamp value sel for previous line */
- val |=
- (bc->vert.reset_val_sel << ISIF_VERT_BC_RST_VAL_SEL_SHIFT) |
- (bc->vert.line_ave_coef << ISIF_VERT_BC_LINE_AVE_COEF_SHIFT);
- regw(val, CLVWIN0);
-
- /* Optical Black horizontal start position */
- regw(bc->vert.ob_start_h, CLVWIN1);
- /* Optical Black vertical start position */
- regw(bc->vert.ob_start_v, CLVWIN2);
- /* Optical Black vertical size for calculation */
- regw(bc->vert.ob_v_sz_calc, CLVWIN3);
- /* Vertical start position for BC subtraction */
- regw(bc->vert_start_sub, CLSV);
- }
-}
-
-static void isif_config_linearization(struct isif_linearize *linearize)
-{
- u32 val, i;
-
- if (!linearize->en) {
- regw(0, LINCFG0);
- return;
- }
-
- /* shift value for correction & enable linearization (set lsb) */
- val = (linearize->corr_shft << ISIF_LIN_CORRSFT_SHIFT) | 1;
- regw(val, LINCFG0);
-
- /* Scale factor */
- val = ((!!linearize->scale_fact.integer) <<
- ISIF_LIN_SCALE_FACT_INTEG_SHIFT) |
- linearize->scale_fact.decimal;
- regw(val, LINCFG1);
-
- for (i = 0; i < ISIF_LINEAR_TAB_SIZE; i++) {
- if (i % 2)
- regw_lin_tbl(linearize->table[i], ((i >> 1) << 2), 1);
- else
- regw_lin_tbl(linearize->table[i], ((i >> 1) << 2), 0);
- }
-}
-
-static int isif_config_dfc(struct isif_dfc *vdfc)
-{
- /* initialize retries to loop for max ~ 250 usec */
- u32 val, count, retries = loops_per_jiffy / (4000/HZ);
- int i;
-
- if (!vdfc->en)
- return 0;
-
- /* Correction mode */
- val = (vdfc->corr_mode << ISIF_VDFC_CORR_MOD_SHIFT);
-
- /* Correct whole line or partial */
- if (vdfc->corr_whole_line)
- val |= 1 << ISIF_VDFC_CORR_WHOLE_LN_SHIFT;
-
- /* level shift value */
- val |= vdfc->def_level_shift << ISIF_VDFC_LEVEL_SHFT_SHIFT;
-
- regw(val, DFCCTL);
-
- /* Defect saturation level */
- regw(vdfc->def_sat_level, VDFSATLV);
-
- regw(vdfc->table[0].pos_vert, DFCMEM0);
- regw(vdfc->table[0].pos_horz, DFCMEM1);
- if (vdfc->corr_mode == ISIF_VDFC_NORMAL ||
- vdfc->corr_mode == ISIF_VDFC_HORZ_INTERPOL_IF_SAT) {
- regw(vdfc->table[0].level_at_pos, DFCMEM2);
- regw(vdfc->table[0].level_up_pixels, DFCMEM3);
- regw(vdfc->table[0].level_low_pixels, DFCMEM4);
- }
-
- /* set DFCMARST and set DFCMWR */
- val = regr(DFCMEMCTL) | (1 << ISIF_DFCMEMCTL_DFCMARST_SHIFT) | 1;
- regw(val, DFCMEMCTL);
-
- count = retries;
- while (count && (regr(DFCMEMCTL) & 0x1))
- count--;
-
- if (!count) {
- dev_dbg(isif_cfg.dev, "defect table write timeout !!!\n");
- return -1;
- }
-
- for (i = 1; i < vdfc->num_vdefects; i++) {
- regw(vdfc->table[i].pos_vert, DFCMEM0);
- regw(vdfc->table[i].pos_horz, DFCMEM1);
- if (vdfc->corr_mode == ISIF_VDFC_NORMAL ||
- vdfc->corr_mode == ISIF_VDFC_HORZ_INTERPOL_IF_SAT) {
- regw(vdfc->table[i].level_at_pos, DFCMEM2);
- regw(vdfc->table[i].level_up_pixels, DFCMEM3);
- regw(vdfc->table[i].level_low_pixels, DFCMEM4);
- }
- val = regr(DFCMEMCTL);
- /* clear DFCMARST and set DFCMWR */
- val &= ~BIT(ISIF_DFCMEMCTL_DFCMARST_SHIFT);
- val |= 1;
- regw(val, DFCMEMCTL);
-
- count = retries;
- while (count && (regr(DFCMEMCTL) & 0x1))
- count--;
-
- if (!count) {
- dev_err(isif_cfg.dev,
- "defect table write timeout !!!\n");
- return -1;
- }
- }
- if (vdfc->num_vdefects < ISIF_VDFC_TABLE_SIZE) {
- /* Extra cycle needed */
- regw(0, DFCMEM0);
- regw(0x1FFF, DFCMEM1);
- regw(1, DFCMEMCTL);
- }
-
- /* enable VDFC */
- reg_modify((1 << ISIF_VDFC_EN_SHIFT), (1 << ISIF_VDFC_EN_SHIFT),
- DFCCTL);
- return 0;
-}
-
-static void isif_config_csc(struct isif_df_csc *df_csc)
-{
- u32 val1 = 0, val2 = 0, i;
-
- if (!df_csc->csc.en) {
- regw(0, CSCCTL);
- return;
- }
- for (i = 0; i < ISIF_CSC_NUM_COEFF; i++) {
- if ((i % 2) == 0) {
- /* CSCM - LSB */
- val1 = (df_csc->csc.coeff[i].integer <<
- ISIF_CSC_COEF_INTEG_SHIFT) |
- df_csc->csc.coeff[i].decimal;
- } else {
-
- /* CSCM - MSB */
- val2 = (df_csc->csc.coeff[i].integer <<
- ISIF_CSC_COEF_INTEG_SHIFT) |
- df_csc->csc.coeff[i].decimal;
- val2 <<= ISIF_CSCM_MSB_SHIFT;
- val2 |= val1;
- regw(val2, (CSCM0 + ((i - 1) << 1)));
- }
- }
-
- /* program the active area */
- regw(df_csc->start_pix, FMTSPH);
- /*
- * one extra pixel as required for CSC. Actually number of
- * pixel - 1 should be configured in this register. So we
- * need to subtract 1 before writing to FMTSPH, but we will
- * not do this since csc requires one extra pixel
- */
- regw(df_csc->num_pixels, FMTLNH);
- regw(df_csc->start_line, FMTSLV);
- /*
- * one extra line as required for CSC. See reason documented for
- * num_pixels
- */
- regw(df_csc->num_lines, FMTLNV);
-
- /* Enable CSC */
- regw(1, CSCCTL);
-}
-
-static int isif_config_raw(void)
-{
- struct isif_params_raw *params = &isif_cfg.bayer;
- struct isif_config_params_raw *module_params =
- &isif_cfg.bayer.config_params;
- struct vpss_pg_frame_size frame_size;
- struct vpss_sync_pol sync;
- u32 val;
-
- dev_dbg(isif_cfg.dev, "\nStarting isif_config_raw..\n");
-
- /*
- * Configure CCDCFG register:-
- * Set CCD Not to swap input since input is RAW data
- * Set FID detection function to Latch at V-Sync
- * Set WENLOG - isif valid area
- * Set TRGSEL
- * Set EXTRG
- * Packed to 8 or 16 bits
- */
-
- val = ISIF_YCINSWP_RAW | ISIF_CCDCFG_FIDMD_LATCH_VSYNC |
- ISIF_CCDCFG_WENLOG_AND | ISIF_CCDCFG_TRGSEL_WEN |
- ISIF_CCDCFG_EXTRG_DISABLE | isif_cfg.data_pack;
-
- dev_dbg(isif_cfg.dev, "Writing 0x%x to ...CCDCFG \n", val);
- regw(val, CCDCFG);
-
- /*
- * Configure the vertical sync polarity(MODESET.VDPOL)
- * Configure the horizontal sync polarity (MODESET.HDPOL)
- * Configure frame id polarity (MODESET.FLDPOL)
- * Configure data polarity
- * Configure External WEN Selection
- * Configure frame format(progressive or interlace)
- * Configure pixel format (Input mode)
- * Configure the data shift
- */
-
- val = ISIF_VDHDOUT_INPUT | (params->vd_pol << ISIF_VD_POL_SHIFT) |
- (params->hd_pol << ISIF_HD_POL_SHIFT) |
- (params->fid_pol << ISIF_FID_POL_SHIFT) |
- (ISIF_DATAPOL_NORMAL << ISIF_DATAPOL_SHIFT) |
- (ISIF_EXWEN_DISABLE << ISIF_EXWEN_SHIFT) |
- (params->frm_fmt << ISIF_FRM_FMT_SHIFT) |
- (params->pix_fmt << ISIF_INPUT_SHIFT) |
- (params->config_params.data_shift << ISIF_DATASFT_SHIFT);
-
- regw(val, MODESET);
- dev_dbg(isif_cfg.dev, "Writing 0x%x to MODESET...\n", val);
-
- /*
- * Configure GAMMAWD register
- * CFA pattern setting
- */
- val = params->cfa_pat << ISIF_GAMMAWD_CFA_SHIFT;
-
- /* Gamma msb */
- if (module_params->compress.alg == ISIF_ALAW)
- val |= ISIF_ALAW_ENABLE;
-
- val |= (params->data_msb << ISIF_ALAW_GAMMA_WD_SHIFT);
- regw(val, CGAMMAWD);
-
- /* Configure DPCM compression settings */
- if (module_params->compress.alg == ISIF_DPCM) {
- val = BIT(ISIF_DPCM_EN_SHIFT) |
- (module_params->compress.pred <<
- ISIF_DPCM_PREDICTOR_SHIFT);
- }
-
- regw(val, MISC);
-
- /* Configure Gain & Offset */
- isif_config_gain_offset();
-
- /* Configure Color pattern */
- val = (params->config_params.col_pat_field0.olop) |
- (params->config_params.col_pat_field0.olep << 2) |
- (params->config_params.col_pat_field0.elop << 4) |
- (params->config_params.col_pat_field0.elep << 6) |
- (params->config_params.col_pat_field1.olop << 8) |
- (params->config_params.col_pat_field1.olep << 10) |
- (params->config_params.col_pat_field1.elop << 12) |
- (params->config_params.col_pat_field1.elep << 14);
- regw(val, CCOLP);
- dev_dbg(isif_cfg.dev, "Writing %x to CCOLP ...\n", val);
-
- /* Configure HSIZE register */
- val = (!!params->horz_flip_en) << ISIF_HSIZE_FLIP_SHIFT;
-
- /* calculate line offset in 32 bytes based on pack value */
- if (isif_cfg.data_pack == ISIF_PACK_8BIT)
- val |= ((params->win.width + 31) >> 5);
- else if (isif_cfg.data_pack == ISIF_PACK_12BIT)
- val |= (((params->win.width +
- (params->win.width >> 2)) + 31) >> 5);
- else
- val |= (((params->win.width * 2) + 31) >> 5);
- regw(val, HSIZE);
-
- /* Configure SDOFST register */
- if (params->frm_fmt == CCDC_FRMFMT_INTERLACED) {
- if (params->image_invert_en) {
- /* For interlace inverse mode */
- regw(0x4B6D, SDOFST);
- dev_dbg(isif_cfg.dev, "Writing 0x4B6D to SDOFST...\n");
- } else {
- /* For interlace non inverse mode */
- regw(0x0B6D, SDOFST);
- dev_dbg(isif_cfg.dev, "Writing 0x0B6D to SDOFST...\n");
- }
- } else if (params->frm_fmt == CCDC_FRMFMT_PROGRESSIVE) {
- if (params->image_invert_en) {
- /* For progressive inverse mode */
- regw(0x4000, SDOFST);
- dev_dbg(isif_cfg.dev, "Writing 0x4000 to SDOFST...\n");
- } else {
- /* For progressive non inverse mode */
- regw(0x0000, SDOFST);
- dev_dbg(isif_cfg.dev, "Writing 0x0000 to SDOFST...\n");
- }
- }
-
- /* Configure video window */
- isif_setwin(&params->win, params->frm_fmt, 1);
-
- /* Configure Black Clamp */
- isif_config_bclamp(&module_params->bclamp);
-
- /* Configure Vertical Defection Pixel Correction */
- if (isif_config_dfc(&module_params->dfc) < 0)
- return -EFAULT;
-
- if (!module_params->df_csc.df_or_csc)
- /* Configure Color Space Conversion */
- isif_config_csc(&module_params->df_csc);
-
- isif_config_linearization(&module_params->linearize);
-
- /* Configure Culling */
- isif_config_culling(&module_params->culling);
-
- /* Configure horizontal and vertical offsets(DFC,LSC,Gain) */
- regw(module_params->horz_offset, DATAHOFST);
- regw(module_params->vert_offset, DATAVOFST);
-
- /* Setup test pattern if enabled */
- if (params->config_params.test_pat_gen) {
- /* Use the HD/VD pol settings from user */
- sync.ccdpg_hdpol = params->hd_pol;
- sync.ccdpg_vdpol = params->vd_pol;
- dm365_vpss_set_sync_pol(sync);
- frame_size.hlpfr = isif_cfg.bayer.win.width;
- frame_size.pplen = isif_cfg.bayer.win.height;
- dm365_vpss_set_pg_frame_size(frame_size);
- vpss_select_ccdc_source(VPSS_PGLPBK);
- }
-
- dev_dbg(isif_cfg.dev, "\nEnd of isif_config_ycbcr...\n");
- return 0;
-}
-
-static int isif_set_buftype(enum ccdc_buftype buf_type)
-{
- if (isif_cfg.if_type == VPFE_RAW_BAYER)
- isif_cfg.bayer.buf_type = buf_type;
- else
- isif_cfg.ycbcr.buf_type = buf_type;
-
- return 0;
-
-}
-static enum ccdc_buftype isif_get_buftype(void)
-{
- if (isif_cfg.if_type == VPFE_RAW_BAYER)
- return isif_cfg.bayer.buf_type;
-
- return isif_cfg.ycbcr.buf_type;
-}
-
-static int isif_enum_pix(u32 *pix, int i)
-{
- int ret = -EINVAL;
-
- if (isif_cfg.if_type == VPFE_RAW_BAYER) {
- if (i < ARRAY_SIZE(isif_raw_bayer_pix_formats)) {
- *pix = isif_raw_bayer_pix_formats[i];
- ret = 0;
- }
- } else {
- if (i < ARRAY_SIZE(isif_raw_yuv_pix_formats)) {
- *pix = isif_raw_yuv_pix_formats[i];
- ret = 0;
- }
- }
-
- return ret;
-}
-
-static int isif_set_pixel_format(unsigned int pixfmt)
-{
- if (isif_cfg.if_type == VPFE_RAW_BAYER) {
- if (pixfmt == V4L2_PIX_FMT_SBGGR8) {
- if ((isif_cfg.bayer.config_params.compress.alg !=
- ISIF_ALAW) &&
- (isif_cfg.bayer.config_params.compress.alg !=
- ISIF_DPCM)) {
- dev_dbg(isif_cfg.dev,
- "Either configure A-Law or DPCM\n");
- return -EINVAL;
- }
- isif_cfg.data_pack = ISIF_PACK_8BIT;
- } else if (pixfmt == V4L2_PIX_FMT_SBGGR16) {
- isif_cfg.bayer.config_params.compress.alg =
- ISIF_NO_COMPRESSION;
- isif_cfg.data_pack = ISIF_PACK_16BIT;
- } else
- return -EINVAL;
- isif_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
- } else {
- if (pixfmt == V4L2_PIX_FMT_YUYV)
- isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_YCBYCR;
- else if (pixfmt == V4L2_PIX_FMT_UYVY)
- isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
- else
- return -EINVAL;
- isif_cfg.data_pack = ISIF_PACK_8BIT;
- }
- return 0;
-}
-
-static u32 isif_get_pixel_format(void)
-{
- u32 pixfmt;
-
- if (isif_cfg.if_type == VPFE_RAW_BAYER)
- if (isif_cfg.bayer.config_params.compress.alg == ISIF_ALAW ||
- isif_cfg.bayer.config_params.compress.alg == ISIF_DPCM)
- pixfmt = V4L2_PIX_FMT_SBGGR8;
- else
- pixfmt = V4L2_PIX_FMT_SBGGR16;
- else {
- if (isif_cfg.ycbcr.pix_order == CCDC_PIXORDER_YCBYCR)
- pixfmt = V4L2_PIX_FMT_YUYV;
- else
- pixfmt = V4L2_PIX_FMT_UYVY;
- }
- return pixfmt;
-}
-
-static int isif_set_image_window(struct v4l2_rect *win)
-{
- if (isif_cfg.if_type == VPFE_RAW_BAYER) {
- isif_cfg.bayer.win.top = win->top;
- isif_cfg.bayer.win.left = win->left;
- isif_cfg.bayer.win.width = win->width;
- isif_cfg.bayer.win.height = win->height;
- } else {
- isif_cfg.ycbcr.win.top = win->top;
- isif_cfg.ycbcr.win.left = win->left;
- isif_cfg.ycbcr.win.width = win->width;
- isif_cfg.ycbcr.win.height = win->height;
- }
- return 0;
-}
-
-static void isif_get_image_window(struct v4l2_rect *win)
-{
- if (isif_cfg.if_type == VPFE_RAW_BAYER)
- *win = isif_cfg.bayer.win;
- else
- *win = isif_cfg.ycbcr.win;
-}
-
-static unsigned int isif_get_line_length(void)
-{
- unsigned int len;
-
- if (isif_cfg.if_type == VPFE_RAW_BAYER) {
- if (isif_cfg.data_pack == ISIF_PACK_8BIT)
- len = ((isif_cfg.bayer.win.width));
- else if (isif_cfg.data_pack == ISIF_PACK_12BIT)
- len = (((isif_cfg.bayer.win.width * 2) +
- (isif_cfg.bayer.win.width >> 2)));
- else
- len = (((isif_cfg.bayer.win.width * 2)));
- } else
- len = (((isif_cfg.ycbcr.win.width * 2)));
- return ALIGN(len, 32);
-}
-
-static int isif_set_frame_format(enum ccdc_frmfmt frm_fmt)
-{
- if (isif_cfg.if_type == VPFE_RAW_BAYER)
- isif_cfg.bayer.frm_fmt = frm_fmt;
- else
- isif_cfg.ycbcr.frm_fmt = frm_fmt;
- return 0;
-}
-static enum ccdc_frmfmt isif_get_frame_format(void)
-{
- if (isif_cfg.if_type == VPFE_RAW_BAYER)
- return isif_cfg.bayer.frm_fmt;
- return isif_cfg.ycbcr.frm_fmt;
-}
-
-static int isif_getfid(void)
-{
- return (regr(MODESET) >> 15) & 0x1;
-}
-
-/* misc operations */
-static void isif_setfbaddr(unsigned long addr)
-{
- regw((addr >> 21) & 0x07ff, CADU);
- regw((addr >> 5) & 0x0ffff, CADL);
-}
-
-static int isif_set_hw_if_params(struct vpfe_hw_if_param *params)
-{
- isif_cfg.if_type = params->if_type;
-
- switch (params->if_type) {
- case VPFE_BT656:
- case VPFE_BT656_10BIT:
- case VPFE_YCBCR_SYNC_8:
- isif_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_8BIT;
- isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
- break;
- case VPFE_BT1120:
- case VPFE_YCBCR_SYNC_16:
- isif_cfg.ycbcr.pix_fmt = CCDC_PIXFMT_YCBCR_16BIT;
- isif_cfg.ycbcr.pix_order = CCDC_PIXORDER_CBYCRY;
- break;
- case VPFE_RAW_BAYER:
- isif_cfg.bayer.pix_fmt = CCDC_PIXFMT_RAW;
- break;
- default:
- dev_dbg(isif_cfg.dev, "Invalid interface type\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* This function will configure ISIF for YCbCr parameters. */
-static int isif_config_ycbcr(void)
-{
- struct isif_ycbcr_config *params = &isif_cfg.ycbcr;
- u32 modeset = 0, ccdcfg = 0;
-
- dev_dbg(isif_cfg.dev, "\nStarting isif_config_ycbcr...");
-
- /* configure pixel format or input mode */
- modeset = modeset | (params->pix_fmt << ISIF_INPUT_SHIFT) |
- (params->frm_fmt << ISIF_FRM_FMT_SHIFT) |
- (params->fid_pol << ISIF_FID_POL_SHIFT) |
- (params->hd_pol << ISIF_HD_POL_SHIFT) |
- (params->vd_pol << ISIF_VD_POL_SHIFT);
-
- /* pack the data to 8-bit ISIFCFG */
- switch (isif_cfg.if_type) {
- case VPFE_BT656:
- if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) {
- dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- modeset |= (VPFE_PINPOL_NEGATIVE << ISIF_VD_POL_SHIFT);
- regw(3, REC656IF);
- ccdcfg = ccdcfg | ISIF_DATA_PACK8 | ISIF_YCINSWP_YCBCR;
- break;
- case VPFE_BT656_10BIT:
- if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) {
- dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- /* setup BT.656, embedded sync */
- regw(3, REC656IF);
- /* enable 10 bit mode in ccdcfg */
- ccdcfg = ccdcfg | ISIF_DATA_PACK8 | ISIF_YCINSWP_YCBCR |
- ISIF_BW656_ENABLE;
- break;
- case VPFE_BT1120:
- if (params->pix_fmt != CCDC_PIXFMT_YCBCR_16BIT) {
- dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- regw(3, REC656IF);
- break;
-
- case VPFE_YCBCR_SYNC_8:
- ccdcfg |= ISIF_DATA_PACK8;
- ccdcfg |= ISIF_YCINSWP_YCBCR;
- if (params->pix_fmt != CCDC_PIXFMT_YCBCR_8BIT) {
- dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- break;
- case VPFE_YCBCR_SYNC_16:
- if (params->pix_fmt != CCDC_PIXFMT_YCBCR_16BIT) {
- dev_dbg(isif_cfg.dev, "Invalid pix_fmt(input mode)\n");
- return -EINVAL;
- }
- break;
- default:
- /* should never come here */
- dev_dbg(isif_cfg.dev, "Invalid interface type\n");
- return -EINVAL;
- }
-
- regw(modeset, MODESET);
-
- /* Set up pix order */
- ccdcfg |= params->pix_order << ISIF_PIX_ORDER_SHIFT;
-
- regw(ccdcfg, CCDCFG);
-
- /* configure video window */
- if ((isif_cfg.if_type == VPFE_BT1120) ||
- (isif_cfg.if_type == VPFE_YCBCR_SYNC_16))
- isif_setwin(&params->win, params->frm_fmt, 1);
- else
- isif_setwin(&params->win, params->frm_fmt, 2);
-
- /*
- * configure the horizontal line offset
- * this is done by rounding up width to a multiple of 16 pixels
- * and multiply by two to account for y:cb:cr 4:2:2 data
- */
- regw(((((params->win.width * 2) + 31) & 0xffffffe0) >> 5), HSIZE);
-
- /* configure the memory line offset */
- if ((params->frm_fmt == CCDC_FRMFMT_INTERLACED) &&
- (params->buf_type == CCDC_BUFTYPE_FLD_INTERLEAVED))
- /* two fields are interleaved in memory */
- regw(0x00000249, SDOFST);
-
- return 0;
-}
-
-static int isif_configure(void)
-{
- if (isif_cfg.if_type == VPFE_RAW_BAYER)
- return isif_config_raw();
- return isif_config_ycbcr();
-}
-
-static int isif_close(struct device *device)
-{
- /* copy defaults to module params */
- isif_cfg.bayer.config_params = isif_config_defaults;
- return 0;
-}
-
-static const struct ccdc_hw_device isif_hw_dev = {
- .name = "ISIF",
- .owner = THIS_MODULE,
- .hw_ops = {
- .open = isif_open,
- .close = isif_close,
- .enable = isif_enable,
- .enable_out_to_sdram = isif_enable_output_to_sdram,
- .set_hw_if_params = isif_set_hw_if_params,
- .configure = isif_configure,
- .set_buftype = isif_set_buftype,
- .get_buftype = isif_get_buftype,
- .enum_pix = isif_enum_pix,
- .set_pixel_format = isif_set_pixel_format,
- .get_pixel_format = isif_get_pixel_format,
- .set_frame_format = isif_set_frame_format,
- .get_frame_format = isif_get_frame_format,
- .set_image_window = isif_set_image_window,
- .get_image_window = isif_get_image_window,
- .get_line_length = isif_get_line_length,
- .setfbaddr = isif_setfbaddr,
- .getfid = isif_getfid,
- },
-};
-
-static int isif_probe(struct platform_device *pdev)
-{
- void (*setup_pinmux)(void);
- struct resource *res;
- void __iomem *addr;
- int status = 0, i;
-
- /* Platform data holds setup_pinmux function ptr */
- if (!pdev->dev.platform_data)
- return -ENODEV;
-
- /*
- * first try to register with vpfe. If not correct platform, then we
- * don't have to iomap
- */
- status = vpfe_register_ccdc_device(&isif_hw_dev);
- if (status < 0)
- return status;
-
- setup_pinmux = pdev->dev.platform_data;
- /*
- * setup Mux configuration for ccdc which may be different for
- * different SoCs using this CCDC
- */
- setup_pinmux();
-
- i = 0;
- /* Get the ISIF base address, linearization table0 and table1 addr. */
- while (i < 3) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res) {
- status = -ENODEV;
- goto fail_nobase_res;
- }
- res = request_mem_region(res->start, resource_size(res),
- res->name);
- if (!res) {
- status = -EBUSY;
- goto fail_nobase_res;
- }
- addr = ioremap(res->start, resource_size(res));
- if (!addr) {
- status = -ENOMEM;
- goto fail_base_iomap;
- }
- switch (i) {
- case 0:
- /* ISIF base address */
- isif_cfg.base_addr = addr;
- break;
- case 1:
- /* ISIF linear tbl0 address */
- isif_cfg.linear_tbl0_addr = addr;
- break;
- default:
- /* ISIF linear tbl0 address */
- isif_cfg.linear_tbl1_addr = addr;
- break;
- }
- i++;
- }
- isif_cfg.dev = &pdev->dev;
-
- printk(KERN_NOTICE "%s is registered with vpfe.\n",
- isif_hw_dev.name);
- return 0;
-fail_base_iomap:
- release_mem_region(res->start, resource_size(res));
- i--;
-fail_nobase_res:
- if (isif_cfg.base_addr) {
- iounmap(isif_cfg.base_addr);
- isif_cfg.base_addr = NULL;
- }
- if (isif_cfg.linear_tbl0_addr) {
- iounmap(isif_cfg.linear_tbl0_addr);
- isif_cfg.linear_tbl0_addr = NULL;
- }
-
- while (i >= 0) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (res)
- release_mem_region(res->start, resource_size(res));
- i--;
- }
- vpfe_unregister_ccdc_device(&isif_hw_dev);
- return status;
-}
-
-static int isif_remove(struct platform_device *pdev)
-{
- struct resource *res;
- int i = 0;
-
- iounmap(isif_cfg.base_addr);
- isif_cfg.base_addr = NULL;
- iounmap(isif_cfg.linear_tbl0_addr);
- isif_cfg.linear_tbl0_addr = NULL;
- iounmap(isif_cfg.linear_tbl1_addr);
- isif_cfg.linear_tbl1_addr = NULL;
- while (i < 3) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- release_mem_region(res->start, resource_size(res));
- i++;
- }
- vpfe_unregister_ccdc_device(&isif_hw_dev);
- return 0;
-}
-
-static struct platform_driver isif_driver = {
- .driver = {
- .name = "isif",
- },
- .remove = isif_remove,
- .probe = isif_probe,
-};
-
-module_platform_driver(isif_driver);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/ti/davinci/isif_regs.h b/drivers/media/platform/ti/davinci/isif_regs.h
deleted file mode 100644
index d68d38841ae7..000000000000
--- a/drivers/media/platform/ti/davinci/isif_regs.h
+++ /dev/null
@@ -1,256 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2008-2009 Texas Instruments Inc
- */
-#ifndef _ISIF_REGS_H
-#define _ISIF_REGS_H
-
-/* ISIF registers relative offsets */
-#define SYNCEN 0x00
-#define MODESET 0x04
-#define HDW 0x08
-#define VDW 0x0c
-#define PPLN 0x10
-#define LPFR 0x14
-#define SPH 0x18
-#define LNH 0x1c
-#define SLV0 0x20
-#define SLV1 0x24
-#define LNV 0x28
-#define CULH 0x2c
-#define CULV 0x30
-#define HSIZE 0x34
-#define SDOFST 0x38
-#define CADU 0x3c
-#define CADL 0x40
-#define LINCFG0 0x44
-#define LINCFG1 0x48
-#define CCOLP 0x4c
-#define CRGAIN 0x50
-#define CGRGAIN 0x54
-#define CGBGAIN 0x58
-#define CBGAIN 0x5c
-#define COFSTA 0x60
-#define FLSHCFG0 0x64
-#define FLSHCFG1 0x68
-#define FLSHCFG2 0x6c
-#define VDINT0 0x70
-#define VDINT1 0x74
-#define VDINT2 0x78
-#define MISC 0x7c
-#define CGAMMAWD 0x80
-#define REC656IF 0x84
-#define CCDCFG 0x88
-/*****************************************************
-* Defect Correction registers
-*****************************************************/
-#define DFCCTL 0x8c
-#define VDFSATLV 0x90
-#define DFCMEMCTL 0x94
-#define DFCMEM0 0x98
-#define DFCMEM1 0x9c
-#define DFCMEM2 0xa0
-#define DFCMEM3 0xa4
-#define DFCMEM4 0xa8
-/****************************************************
-* Black Clamp registers
-****************************************************/
-#define CLAMPCFG 0xac
-#define CLDCOFST 0xb0
-#define CLSV 0xb4
-#define CLHWIN0 0xb8
-#define CLHWIN1 0xbc
-#define CLHWIN2 0xc0
-#define CLVRV 0xc4
-#define CLVWIN0 0xc8
-#define CLVWIN1 0xcc
-#define CLVWIN2 0xd0
-#define CLVWIN3 0xd4
-/****************************************************
-* Lense Shading Correction
-****************************************************/
-#define DATAHOFST 0xd8
-#define DATAVOFST 0xdc
-#define LSCHVAL 0xe0
-#define LSCVVAL 0xe4
-#define TWODLSCCFG 0xe8
-#define TWODLSCOFST 0xec
-#define TWODLSCINI 0xf0
-#define TWODLSCGRBU 0xf4
-#define TWODLSCGRBL 0xf8
-#define TWODLSCGROF 0xfc
-#define TWODLSCORBU 0x100
-#define TWODLSCORBL 0x104
-#define TWODLSCOROF 0x108
-#define TWODLSCIRQEN 0x10c
-#define TWODLSCIRQST 0x110
-/****************************************************
-* Data formatter
-****************************************************/
-#define FMTCFG 0x114
-#define FMTPLEN 0x118
-#define FMTSPH 0x11c
-#define FMTLNH 0x120
-#define FMTSLV 0x124
-#define FMTLNV 0x128
-#define FMTRLEN 0x12c
-#define FMTHCNT 0x130
-#define FMTAPTR_BASE 0x134
-/* Below macro for addresses FMTAPTR0 - FMTAPTR15 */
-#define FMTAPTR(i) (FMTAPTR_BASE + (i * 4))
-#define FMTPGMVF0 0x174
-#define FMTPGMVF1 0x178
-#define FMTPGMAPU0 0x17c
-#define FMTPGMAPU1 0x180
-#define FMTPGMAPS0 0x184
-#define FMTPGMAPS1 0x188
-#define FMTPGMAPS2 0x18c
-#define FMTPGMAPS3 0x190
-#define FMTPGMAPS4 0x194
-#define FMTPGMAPS5 0x198
-#define FMTPGMAPS6 0x19c
-#define FMTPGMAPS7 0x1a0
-/************************************************
-* Color Space Converter
-************************************************/
-#define CSCCTL 0x1a4
-#define CSCM0 0x1a8
-#define CSCM1 0x1ac
-#define CSCM2 0x1b0
-#define CSCM3 0x1b4
-#define CSCM4 0x1b8
-#define CSCM5 0x1bc
-#define CSCM6 0x1c0
-#define CSCM7 0x1c4
-#define OBWIN0 0x1c8
-#define OBWIN1 0x1cc
-#define OBWIN2 0x1d0
-#define OBWIN3 0x1d4
-#define OBVAL0 0x1d8
-#define OBVAL1 0x1dc
-#define OBVAL2 0x1e0
-#define OBVAL3 0x1e4
-#define OBVAL4 0x1e8
-#define OBVAL5 0x1ec
-#define OBVAL6 0x1f0
-#define OBVAL7 0x1f4
-#define CLKCTL 0x1f8
-
-/* Masks & Shifts below */
-#define START_PX_HOR_MASK 0x7FFF
-#define NUM_PX_HOR_MASK 0x7FFF
-#define START_VER_ONE_MASK 0x7FFF
-#define START_VER_TWO_MASK 0x7FFF
-#define NUM_LINES_VER 0x7FFF
-
-/* gain - offset masks */
-#define GAIN_INTEGER_SHIFT 9
-#define OFFSET_MASK 0xFFF
-#define GAIN_SDRAM_EN_SHIFT 12
-#define GAIN_IPIPE_EN_SHIFT 13
-#define GAIN_H3A_EN_SHIFT 14
-#define OFST_SDRAM_EN_SHIFT 8
-#define OFST_IPIPE_EN_SHIFT 9
-#define OFST_H3A_EN_SHIFT 10
-#define GAIN_OFFSET_EN_MASK 0x7700
-
-/* Culling */
-#define CULL_PAT_EVEN_LINE_SHIFT 8
-
-/* CCDCFG register */
-#define ISIF_YCINSWP_RAW (0x00 << 4)
-#define ISIF_YCINSWP_YCBCR (0x01 << 4)
-#define ISIF_CCDCFG_FIDMD_LATCH_VSYNC (0x00 << 6)
-#define ISIF_CCDCFG_WENLOG_AND (0x00 << 8)
-#define ISIF_CCDCFG_TRGSEL_WEN (0x00 << 9)
-#define ISIF_CCDCFG_EXTRG_DISABLE (0x00 << 10)
-#define ISIF_LATCH_ON_VSYNC_DISABLE (0x01 << 15)
-#define ISIF_LATCH_ON_VSYNC_ENABLE (0x00 << 15)
-#define ISIF_DATA_PACK_MASK 3
-#define ISIF_DATA_PACK16 0
-#define ISIF_DATA_PACK12 1
-#define ISIF_DATA_PACK8 2
-#define ISIF_PIX_ORDER_SHIFT 11
-#define ISIF_BW656_ENABLE (0x01 << 5)
-
-/* MODESET registers */
-#define ISIF_VDHDOUT_INPUT (0x00 << 0)
-#define ISIF_INPUT_SHIFT 12
-#define ISIF_RAW_INPUT_MODE 0
-#define ISIF_FID_POL_SHIFT 4
-#define ISIF_HD_POL_SHIFT 3
-#define ISIF_VD_POL_SHIFT 2
-#define ISIF_DATAPOL_NORMAL 0
-#define ISIF_DATAPOL_SHIFT 6
-#define ISIF_EXWEN_DISABLE 0
-#define ISIF_EXWEN_SHIFT 5
-#define ISIF_FRM_FMT_SHIFT 7
-#define ISIF_DATASFT_SHIFT 8
-#define ISIF_LPF_SHIFT 14
-#define ISIF_LPF_MASK 1
-
-/* GAMMAWD registers */
-#define ISIF_ALAW_GAMMA_WD_MASK 0xF
-#define ISIF_ALAW_GAMMA_WD_SHIFT 1
-#define ISIF_ALAW_ENABLE 1
-#define ISIF_GAMMAWD_CFA_SHIFT 5
-
-/* HSIZE registers */
-#define ISIF_HSIZE_FLIP_MASK 1
-#define ISIF_HSIZE_FLIP_SHIFT 12
-
-/* MISC registers */
-#define ISIF_DPCM_EN_SHIFT 12
-#define ISIF_DPCM_PREDICTOR_SHIFT 13
-
-/* Black clamp related */
-#define ISIF_BC_MODE_COLOR_SHIFT 4
-#define ISIF_HORZ_BC_MODE_SHIFT 1
-#define ISIF_HORZ_BC_WIN_SEL_SHIFT 5
-#define ISIF_HORZ_BC_PIX_LIMIT_SHIFT 6
-#define ISIF_HORZ_BC_WIN_H_SIZE_SHIFT 8
-#define ISIF_HORZ_BC_WIN_V_SIZE_SHIFT 12
-#define ISIF_VERT_BC_RST_VAL_SEL_SHIFT 4
-#define ISIF_VERT_BC_LINE_AVE_COEF_SHIFT 8
-
-/* VDFC registers */
-#define ISIF_VDFC_EN_SHIFT 4
-#define ISIF_VDFC_CORR_MOD_SHIFT 5
-#define ISIF_VDFC_CORR_WHOLE_LN_SHIFT 7
-#define ISIF_VDFC_LEVEL_SHFT_SHIFT 8
-#define ISIF_VDFC_POS_MASK 0x1FFF
-#define ISIF_DFCMEMCTL_DFCMARST_SHIFT 2
-
-/* CSC registers */
-#define ISIF_CSC_COEF_INTEG_MASK 7
-#define ISIF_CSC_COEF_DECIMAL_MASK 0x1f
-#define ISIF_CSC_COEF_INTEG_SHIFT 5
-#define ISIF_CSCM_MSB_SHIFT 8
-#define ISIF_DF_CSC_SPH_MASK 0x1FFF
-#define ISIF_DF_CSC_LNH_MASK 0x1FFF
-#define ISIF_DF_CSC_SLV_MASK 0x1FFF
-#define ISIF_DF_CSC_LNV_MASK 0x1FFF
-#define ISIF_DF_NUMLINES 0x7FFF
-#define ISIF_DF_NUMPIX 0x1FFF
-
-/* Offsets for LSC/DFC/Gain */
-#define ISIF_DATA_H_OFFSET_MASK 0x1FFF
-#define ISIF_DATA_V_OFFSET_MASK 0x1FFF
-
-/* Linearization */
-#define ISIF_LIN_CORRSFT_SHIFT 4
-#define ISIF_LIN_SCALE_FACT_INTEG_SHIFT 10
-
-
-/* Pattern registers */
-#define ISIF_PG_EN (1 << 3)
-#define ISIF_SEL_PG_SRC (3 << 4)
-#define ISIF_PG_VD_POL_SHIFT 0
-#define ISIF_PG_HD_POL_SHIFT 1
-
-/*random other junk*/
-#define ISIF_SYNCEN_VDHDEN_MASK (1 << 0)
-#define ISIF_SYNCEN_WEN_MASK (1 << 1)
-#define ISIF_SYNCEN_WEN_SHIFT 1
-
-#endif
diff --git a/drivers/media/platform/ti/davinci/vpbe.c b/drivers/media/platform/ti/davinci/vpbe.c
index 5f0aeb744e81..509ecc84624e 100644
--- a/drivers/media/platform/ti/davinci/vpbe.c
+++ b/drivers/media/platform/ti/davinci/vpbe.c
@@ -280,7 +280,7 @@ static int vpbe_set_default_output(struct vpbe_device *vpbe_dev)
* vpbe_get_output - Get output
* @vpbe_dev: vpbe device ptr
*
- * return current vpbe output to the the index
+ * return current vpbe output to the index
*/
static unsigned int vpbe_get_output(struct vpbe_device *vpbe_dev)
{
diff --git a/drivers/media/platform/ti/davinci/vpfe_capture.c b/drivers/media/platform/ti/davinci/vpfe_capture.c
deleted file mode 100644
index 0a2226b321d7..000000000000
--- a/drivers/media/platform/ti/davinci/vpfe_capture.c
+++ /dev/null
@@ -1,1902 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2008-2009 Texas Instruments Inc
- *
- * Driver name : VPFE Capture driver
- * VPFE Capture driver allows applications to capture and stream video
- * frames on DaVinci SoCs (DM6446, DM355 etc) from a YUV source such as
- * TVP5146 or Raw Bayer RGB image data from an image sensor
- * such as Microns' MT9T001, MT9T031 etc.
- *
- * These SoCs have, in common, a Video Processing Subsystem (VPSS) that
- * consists of a Video Processing Front End (VPFE) for capturing
- * video/raw image data and Video Processing Back End (VPBE) for displaying
- * YUV data through an in-built analog encoder or Digital LCD port. This
- * driver is for capture through VPFE. A typical EVM using these SoCs have
- * following high level configuration.
- *
- * decoder(TVP5146/ YUV/
- * MT9T001) --> Raw Bayer RGB ---> MUX -> VPFE (CCDC/ISIF)
- * data input | |
- * V |
- * SDRAM |
- * V
- * Image Processor
- * |
- * V
- * SDRAM
- * The data flow happens from a decoder connected to the VPFE over a
- * YUV embedded (BT.656/BT.1120) or separate sync or raw bayer rgb interface
- * and to the input of VPFE through an optional MUX (if more inputs are
- * to be interfaced on the EVM). The input data is first passed through
- * CCDC (CCD Controller, a.k.a Image Sensor Interface, ISIF). The CCDC
- * does very little or no processing on YUV data and does pre-process Raw
- * Bayer RGB data through modules such as Defect Pixel Correction (DFC)
- * Color Space Conversion (CSC), data gain/offset etc. After this, data
- * can be written to SDRAM or can be connected to the image processing
- * block such as IPIPE (on DM355 only).
- *
- * Features supported
- * - MMAP IO
- * - Capture using TVP5146 over BT.656
- * - support for interfacing decoders using sub device model
- * - Work with DM355 or DM6446 CCDC to do Raw Bayer RGB/YUV
- * data capture to SDRAM.
- * TODO list
- * - Support multiple REQBUF after open
- * - Support for de-allocating buffers through REQBUF
- * - Support for Raw Bayer RGB capture
- * - Support for chaining Image Processor
- * - Support for static allocation of buffers
- * - Support for USERPTR IO
- * - Support for STREAMON before QBUF
- * - Support for control ioctls
- */
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <media/v4l2-common.h>
-#include <linux/io.h>
-#include <media/davinci/vpfe_capture.h>
-#include "ccdc_hw_device.h"
-
-static int debug;
-static u32 numbuffers = 3;
-static u32 bufsize = (720 * 576 * 2);
-
-module_param(numbuffers, uint, S_IRUGO);
-module_param(bufsize, uint, S_IRUGO);
-module_param(debug, int, 0644);
-
-MODULE_PARM_DESC(numbuffers, "buffer count (default:3)");
-MODULE_PARM_DESC(bufsize, "buffer size in bytes (default:720 x 576 x 2)");
-MODULE_PARM_DESC(debug, "Debug level 0-1");
-
-MODULE_DESCRIPTION("VPFE Video for Linux Capture Driver");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Texas Instruments");
-
-/* standard information */
-struct vpfe_standard {
- v4l2_std_id std_id;
- unsigned int width;
- unsigned int height;
- struct v4l2_fract pixelaspect;
- /* 0 - progressive, 1 - interlaced */
- int frame_format;
-};
-
-/* ccdc configuration */
-struct ccdc_config {
- /* This make sure vpfe is probed and ready to go */
- int vpfe_probed;
- /* name of ccdc device */
- char name[32];
-};
-
-/* data structures */
-static struct vpfe_config_params config_params = {
- .min_numbuffers = 3,
- .numbuffers = 3,
- .min_bufsize = 720 * 480 * 2,
- .device_bufsize = 720 * 576 * 2,
-};
-
-/* ccdc device registered */
-static const struct ccdc_hw_device *ccdc_dev;
-/* lock for accessing ccdc information */
-static DEFINE_MUTEX(ccdc_lock);
-/* ccdc configuration */
-static struct ccdc_config *ccdc_cfg;
-
-static const struct vpfe_standard vpfe_standards[] = {
- {V4L2_STD_525_60, 720, 480, {11, 10}, 1},
- {V4L2_STD_625_50, 720, 576, {54, 59}, 1},
-};
-
-/* Used when raw Bayer image from ccdc is directly captured to SDRAM */
-static const struct vpfe_pixel_format vpfe_pix_fmts[] = {
- {
- .pixelformat = V4L2_PIX_FMT_SBGGR8,
- .bpp = 1,
- },
- {
- .pixelformat = V4L2_PIX_FMT_SBGGR16,
- .bpp = 2,
- },
- {
- .pixelformat = V4L2_PIX_FMT_SGRBG10DPCM8,
- .bpp = 1,
- },
- {
- .pixelformat = V4L2_PIX_FMT_UYVY,
- .bpp = 2,
- },
- {
- .pixelformat = V4L2_PIX_FMT_YUYV,
- .bpp = 2,
- },
- {
- .pixelformat = V4L2_PIX_FMT_NV12,
- .bpp = 1,
- },
-};
-
-/*
- * vpfe_lookup_pix_format()
- * lookup an entry in the vpfe pix format table based on pix_format
- */
-static const struct vpfe_pixel_format *vpfe_lookup_pix_format(u32 pix_format)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(vpfe_pix_fmts); i++) {
- if (pix_format == vpfe_pix_fmts[i].pixelformat)
- return &vpfe_pix_fmts[i];
- }
- return NULL;
-}
-
-/*
- * vpfe_register_ccdc_device. CCDC module calls this to
- * register with vpfe capture
- */
-int vpfe_register_ccdc_device(const struct ccdc_hw_device *dev)
-{
- int ret = 0;
- printk(KERN_NOTICE "vpfe_register_ccdc_device: %s\n", dev->name);
-
- if (!dev->hw_ops.open ||
- !dev->hw_ops.enable ||
- !dev->hw_ops.set_hw_if_params ||
- !dev->hw_ops.configure ||
- !dev->hw_ops.set_buftype ||
- !dev->hw_ops.get_buftype ||
- !dev->hw_ops.enum_pix ||
- !dev->hw_ops.set_frame_format ||
- !dev->hw_ops.get_frame_format ||
- !dev->hw_ops.get_pixel_format ||
- !dev->hw_ops.set_pixel_format ||
- !dev->hw_ops.set_image_window ||
- !dev->hw_ops.get_image_window ||
- !dev->hw_ops.get_line_length ||
- !dev->hw_ops.getfid)
- return -EINVAL;
-
- mutex_lock(&ccdc_lock);
- if (!ccdc_cfg) {
- /*
- * TODO. Will this ever happen? if so, we need to fix it.
- * Probably we need to add the request to a linked list and
- * walk through it during vpfe probe
- */
- printk(KERN_ERR "vpfe capture not initialized\n");
- ret = -EFAULT;
- goto unlock;
- }
-
- if (strcmp(dev->name, ccdc_cfg->name)) {
- /* ignore this ccdc */
- ret = -EINVAL;
- goto unlock;
- }
-
- if (ccdc_dev) {
- printk(KERN_ERR "ccdc already registered\n");
- ret = -EINVAL;
- goto unlock;
- }
-
- ccdc_dev = dev;
-unlock:
- mutex_unlock(&ccdc_lock);
- return ret;
-}
-EXPORT_SYMBOL(vpfe_register_ccdc_device);
-
-/*
- * vpfe_unregister_ccdc_device. CCDC module calls this to
- * unregister with vpfe capture
- */
-void vpfe_unregister_ccdc_device(const struct ccdc_hw_device *dev)
-{
- if (!dev) {
- printk(KERN_ERR "invalid ccdc device ptr\n");
- return;
- }
-
- printk(KERN_NOTICE "vpfe_unregister_ccdc_device, dev->name = %s\n",
- dev->name);
-
- if (strcmp(dev->name, ccdc_cfg->name)) {
- /* ignore this ccdc */
- return;
- }
-
- mutex_lock(&ccdc_lock);
- ccdc_dev = NULL;
- mutex_unlock(&ccdc_lock);
-}
-EXPORT_SYMBOL(vpfe_unregister_ccdc_device);
-
-/*
- * vpfe_config_ccdc_image_format()
- * For a pix format, configure ccdc to setup the capture
- */
-static int vpfe_config_ccdc_image_format(struct vpfe_device *vpfe_dev)
-{
- enum ccdc_frmfmt frm_fmt = CCDC_FRMFMT_INTERLACED;
- int ret = 0;
-
- if (ccdc_dev->hw_ops.set_pixel_format(
- vpfe_dev->fmt.fmt.pix.pixelformat) < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "couldn't set pix format in ccdc\n");
- return -EINVAL;
- }
- /* configure the image window */
- ccdc_dev->hw_ops.set_image_window(&vpfe_dev->crop);
-
- switch (vpfe_dev->fmt.fmt.pix.field) {
- case V4L2_FIELD_INTERLACED:
- /* do nothing, since it is default */
- ret = ccdc_dev->hw_ops.set_buftype(
- CCDC_BUFTYPE_FLD_INTERLEAVED);
- break;
- case V4L2_FIELD_NONE:
- frm_fmt = CCDC_FRMFMT_PROGRESSIVE;
- /* buffer type only applicable for interlaced scan */
- break;
- case V4L2_FIELD_SEQ_TB:
- ret = ccdc_dev->hw_ops.set_buftype(
- CCDC_BUFTYPE_FLD_SEPARATED);
- break;
- default:
- return -EINVAL;
- }
-
- /* set the frame format */
- if (!ret)
- ret = ccdc_dev->hw_ops.set_frame_format(frm_fmt);
- return ret;
-}
-/*
- * vpfe_config_image_format()
- * For a given standard, this functions sets up the default
- * pix format & crop values in the vpfe device and ccdc. It first
- * starts with defaults based values from the standard table.
- * It then checks if sub device supports get_fmt and then override the
- * values based on that.Sets crop values to match with scan resolution
- * starting at 0,0. It calls vpfe_config_ccdc_image_format() set the
- * values in ccdc
- */
-static int vpfe_config_image_format(struct vpfe_device *vpfe_dev,
- v4l2_std_id std_id)
-{
- struct vpfe_subdev_info *sdinfo = vpfe_dev->current_subdev;
- struct v4l2_subdev_format fmt = {
- .which = V4L2_SUBDEV_FORMAT_ACTIVE,
- };
- struct v4l2_mbus_framefmt *mbus_fmt = &fmt.format;
- struct v4l2_pix_format *pix = &vpfe_dev->fmt.fmt.pix;
- int i, ret;
-
- for (i = 0; i < ARRAY_SIZE(vpfe_standards); i++) {
- if (vpfe_standards[i].std_id & std_id) {
- vpfe_dev->std_info.active_pixels =
- vpfe_standards[i].width;
- vpfe_dev->std_info.active_lines =
- vpfe_standards[i].height;
- vpfe_dev->std_info.frame_format =
- vpfe_standards[i].frame_format;
- vpfe_dev->std_index = i;
- break;
- }
- }
-
- if (i == ARRAY_SIZE(vpfe_standards)) {
- v4l2_err(&vpfe_dev->v4l2_dev, "standard not supported\n");
- return -EINVAL;
- }
-
- vpfe_dev->crop.top = 0;
- vpfe_dev->crop.left = 0;
- vpfe_dev->crop.width = vpfe_dev->std_info.active_pixels;
- vpfe_dev->crop.height = vpfe_dev->std_info.active_lines;
- pix->width = vpfe_dev->crop.width;
- pix->height = vpfe_dev->crop.height;
-
- /* first field and frame format based on standard frame format */
- if (vpfe_dev->std_info.frame_format) {
- pix->field = V4L2_FIELD_INTERLACED;
- /* assume V4L2_PIX_FMT_UYVY as default */
- pix->pixelformat = V4L2_PIX_FMT_UYVY;
- v4l2_fill_mbus_format(mbus_fmt, pix,
- MEDIA_BUS_FMT_YUYV10_2X10);
- } else {
- pix->field = V4L2_FIELD_NONE;
- /* assume V4L2_PIX_FMT_SBGGR8 */
- pix->pixelformat = V4L2_PIX_FMT_SBGGR8;
- v4l2_fill_mbus_format(mbus_fmt, pix,
- MEDIA_BUS_FMT_SBGGR8_1X8);
- }
-
- /* if sub device supports get_fmt, override the defaults */
- ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
- sdinfo->grp_id, pad, get_fmt, NULL, &fmt);
-
- if (ret && ret != -ENOIOCTLCMD) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "error in getting get_fmt from sub device\n");
- return ret;
- }
- v4l2_fill_pix_format(pix, mbus_fmt);
- pix->bytesperline = pix->width * 2;
- pix->sizeimage = pix->bytesperline * pix->height;
-
- /* Sets the values in CCDC */
- ret = vpfe_config_ccdc_image_format(vpfe_dev);
- if (ret)
- return ret;
-
- /* Update the values of sizeimage and bytesperline */
- pix->bytesperline = ccdc_dev->hw_ops.get_line_length();
- pix->sizeimage = pix->bytesperline * pix->height;
-
- return 0;
-}
-
-static int vpfe_initialize_device(struct vpfe_device *vpfe_dev)
-{
- int ret;
-
- /* set first input of current subdevice as the current input */
- vpfe_dev->current_input = 0;
-
- /* set default standard */
- vpfe_dev->std_index = 0;
-
- /* Configure the default format information */
- ret = vpfe_config_image_format(vpfe_dev,
- vpfe_standards[vpfe_dev->std_index].std_id);
- if (ret)
- return ret;
-
- /* now open the ccdc device to initialize it */
- mutex_lock(&ccdc_lock);
- if (!ccdc_dev) {
- v4l2_err(&vpfe_dev->v4l2_dev, "ccdc device not registered\n");
- ret = -ENODEV;
- goto unlock;
- }
-
- if (!try_module_get(ccdc_dev->owner)) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Couldn't lock ccdc module\n");
- ret = -ENODEV;
- goto unlock;
- }
- ret = ccdc_dev->hw_ops.open(vpfe_dev->pdev);
- if (!ret)
- vpfe_dev->initialized = 1;
-
- /* Clear all VPFE/CCDC interrupts */
- if (vpfe_dev->cfg->clr_intr)
- vpfe_dev->cfg->clr_intr(-1);
-
-unlock:
- mutex_unlock(&ccdc_lock);
- return ret;
-}
-
-/*
- * vpfe_open : It creates object of file handle structure and
- * stores it in private_data member of filepointer
- */
-static int vpfe_open(struct file *file)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct video_device *vdev = video_devdata(file);
- struct vpfe_fh *fh;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_open\n");
-
- if (!vpfe_dev->cfg->num_subdevs) {
- v4l2_err(&vpfe_dev->v4l2_dev, "No decoder registered\n");
- return -ENODEV;
- }
-
- /* Allocate memory for the file handle object */
- fh = kmalloc(sizeof(*fh), GFP_KERNEL);
- if (!fh)
- return -ENOMEM;
-
- /* store pointer to fh in private_data member of file */
- file->private_data = fh;
- fh->vpfe_dev = vpfe_dev;
- v4l2_fh_init(&fh->fh, vdev);
- mutex_lock(&vpfe_dev->lock);
- /* If decoder is not initialized. initialize it */
- if (!vpfe_dev->initialized) {
- if (vpfe_initialize_device(vpfe_dev)) {
- mutex_unlock(&vpfe_dev->lock);
- v4l2_fh_exit(&fh->fh);
- kfree(fh);
- return -ENODEV;
- }
- }
- /* Increment device usrs counter */
- vpfe_dev->usrs++;
- /* Set io_allowed member to false */
- fh->io_allowed = 0;
- v4l2_fh_add(&fh->fh);
- mutex_unlock(&vpfe_dev->lock);
- return 0;
-}
-
-static void vpfe_schedule_next_buffer(struct vpfe_device *vpfe_dev)
-{
- unsigned long addr;
-
- vpfe_dev->next_frm = list_entry(vpfe_dev->dma_queue.next,
- struct videobuf_buffer, queue);
- list_del(&vpfe_dev->next_frm->queue);
- vpfe_dev->next_frm->state = VIDEOBUF_ACTIVE;
- addr = videobuf_to_dma_contig(vpfe_dev->next_frm);
-
- ccdc_dev->hw_ops.setfbaddr(addr);
-}
-
-static void vpfe_schedule_bottom_field(struct vpfe_device *vpfe_dev)
-{
- unsigned long addr;
-
- addr = videobuf_to_dma_contig(vpfe_dev->cur_frm);
- addr += vpfe_dev->field_off;
- ccdc_dev->hw_ops.setfbaddr(addr);
-}
-
-static void vpfe_process_buffer_complete(struct vpfe_device *vpfe_dev)
-{
- vpfe_dev->cur_frm->ts = ktime_get_ns();
- vpfe_dev->cur_frm->state = VIDEOBUF_DONE;
- vpfe_dev->cur_frm->size = vpfe_dev->fmt.fmt.pix.sizeimage;
- wake_up_interruptible(&vpfe_dev->cur_frm->done);
- vpfe_dev->cur_frm = vpfe_dev->next_frm;
-}
-
-/* ISR for VINT0*/
-static irqreturn_t vpfe_isr(int irq, void *dev_id)
-{
- struct vpfe_device *vpfe_dev = dev_id;
- enum v4l2_field field;
- int fid;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "\nStarting vpfe_isr...\n");
- field = vpfe_dev->fmt.fmt.pix.field;
-
- /* if streaming not started, don't do anything */
- if (!vpfe_dev->started)
- goto clear_intr;
-
- /* only for 6446 this will be applicable */
- if (ccdc_dev->hw_ops.reset)
- ccdc_dev->hw_ops.reset();
-
- if (field == V4L2_FIELD_NONE) {
- /* handle progressive frame capture */
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "frame format is progressive...\n");
- if (vpfe_dev->cur_frm != vpfe_dev->next_frm)
- vpfe_process_buffer_complete(vpfe_dev);
- goto clear_intr;
- }
-
- /* interlaced or TB capture check which field we are in hardware */
- fid = ccdc_dev->hw_ops.getfid();
-
- /* switch the software maintained field id */
- vpfe_dev->field_id ^= 1;
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "field id = %x:%x.\n",
- fid, vpfe_dev->field_id);
- if (fid == vpfe_dev->field_id) {
- /* we are in-sync here,continue */
- if (fid == 0) {
- /*
- * One frame is just being captured. If the next frame
- * is available, release the current frame and move on
- */
- if (vpfe_dev->cur_frm != vpfe_dev->next_frm)
- vpfe_process_buffer_complete(vpfe_dev);
- /*
- * based on whether the two fields are stored
- * interleavely or separately in memory, reconfigure
- * the CCDC memory address
- */
- if (field == V4L2_FIELD_SEQ_TB)
- vpfe_schedule_bottom_field(vpfe_dev);
- goto clear_intr;
- }
- /*
- * if one field is just being captured configure
- * the next frame get the next frame from the empty
- * queue if no frame is available hold on to the
- * current buffer
- */
- spin_lock(&vpfe_dev->dma_queue_lock);
- if (!list_empty(&vpfe_dev->dma_queue) &&
- vpfe_dev->cur_frm == vpfe_dev->next_frm)
- vpfe_schedule_next_buffer(vpfe_dev);
- spin_unlock(&vpfe_dev->dma_queue_lock);
- } else if (fid == 0) {
- /*
- * out of sync. Recover from any hardware out-of-sync.
- * May loose one frame
- */
- vpfe_dev->field_id = fid;
- }
-clear_intr:
- if (vpfe_dev->cfg->clr_intr)
- vpfe_dev->cfg->clr_intr(irq);
-
- return IRQ_HANDLED;
-}
-
-/* vdint1_isr - isr handler for VINT1 interrupt */
-static irqreturn_t vdint1_isr(int irq, void *dev_id)
-{
- struct vpfe_device *vpfe_dev = dev_id;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "\nInside vdint1_isr...\n");
-
- /* if streaming not started, don't do anything */
- if (!vpfe_dev->started) {
- if (vpfe_dev->cfg->clr_intr)
- vpfe_dev->cfg->clr_intr(irq);
- return IRQ_HANDLED;
- }
-
- spin_lock(&vpfe_dev->dma_queue_lock);
- if ((vpfe_dev->fmt.fmt.pix.field == V4L2_FIELD_NONE) &&
- !list_empty(&vpfe_dev->dma_queue) &&
- vpfe_dev->cur_frm == vpfe_dev->next_frm)
- vpfe_schedule_next_buffer(vpfe_dev);
- spin_unlock(&vpfe_dev->dma_queue_lock);
-
- if (vpfe_dev->cfg->clr_intr)
- vpfe_dev->cfg->clr_intr(irq);
-
- return IRQ_HANDLED;
-}
-
-static void vpfe_detach_irq(struct vpfe_device *vpfe_dev)
-{
- enum ccdc_frmfmt frame_format;
-
- frame_format = ccdc_dev->hw_ops.get_frame_format();
- if (frame_format == CCDC_FRMFMT_PROGRESSIVE)
- free_irq(vpfe_dev->ccdc_irq1, vpfe_dev);
-}
-
-static int vpfe_attach_irq(struct vpfe_device *vpfe_dev)
-{
- enum ccdc_frmfmt frame_format;
-
- frame_format = ccdc_dev->hw_ops.get_frame_format();
- if (frame_format == CCDC_FRMFMT_PROGRESSIVE) {
- return request_irq(vpfe_dev->ccdc_irq1, vdint1_isr,
- 0, "vpfe_capture1",
- vpfe_dev);
- }
- return 0;
-}
-
-/* vpfe_stop_ccdc_capture: stop streaming in ccdc/isif */
-static void vpfe_stop_ccdc_capture(struct vpfe_device *vpfe_dev)
-{
- vpfe_dev->started = 0;
- ccdc_dev->hw_ops.enable(0);
- if (ccdc_dev->hw_ops.enable_out_to_sdram)
- ccdc_dev->hw_ops.enable_out_to_sdram(0);
-}
-
-/*
- * vpfe_release : This function deletes buffer queue, frees the
- * buffers and the vpfe file handle
- */
-static int vpfe_release(struct file *file)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct vpfe_fh *fh = file->private_data;
- struct vpfe_subdev_info *sdinfo;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_release\n");
-
- /* Get the device lock */
- mutex_lock(&vpfe_dev->lock);
- /* if this instance is doing IO */
- if (fh->io_allowed) {
- if (vpfe_dev->started) {
- sdinfo = vpfe_dev->current_subdev;
- ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev,
- sdinfo->grp_id,
- video, s_stream, 0);
- if (ret && (ret != -ENOIOCTLCMD))
- v4l2_err(&vpfe_dev->v4l2_dev,
- "stream off failed in subdev\n");
- vpfe_stop_ccdc_capture(vpfe_dev);
- vpfe_detach_irq(vpfe_dev);
- videobuf_streamoff(&vpfe_dev->buffer_queue);
- }
- vpfe_dev->io_usrs = 0;
- vpfe_dev->numbuffers = config_params.numbuffers;
- videobuf_stop(&vpfe_dev->buffer_queue);
- videobuf_mmap_free(&vpfe_dev->buffer_queue);
- }
-
- /* Decrement device usrs counter */
- vpfe_dev->usrs--;
- v4l2_fh_del(&fh->fh);
- v4l2_fh_exit(&fh->fh);
- /* If this is the last file handle */
- if (!vpfe_dev->usrs) {
- vpfe_dev->initialized = 0;
- if (ccdc_dev->hw_ops.close)
- ccdc_dev->hw_ops.close(vpfe_dev->pdev);
- module_put(ccdc_dev->owner);
- }
- mutex_unlock(&vpfe_dev->lock);
- file->private_data = NULL;
- /* Free memory allocated to file handle object */
- kfree(fh);
- return 0;
-}
-
-/*
- * vpfe_mmap : It is used to map kernel space buffers
- * into user spaces
- */
-static int vpfe_mmap(struct file *file, struct vm_area_struct *vma)
-{
- /* Get the device object and file handle object */
- struct vpfe_device *vpfe_dev = video_drvdata(file);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_mmap\n");
-
- return videobuf_mmap_mapper(&vpfe_dev->buffer_queue, vma);
-}
-
-/*
- * vpfe_poll: It is used for select/poll system call
- */
-static __poll_t vpfe_poll(struct file *file, poll_table *wait)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_poll\n");
-
- if (vpfe_dev->started)
- return videobuf_poll_stream(file,
- &vpfe_dev->buffer_queue, wait);
- return 0;
-}
-
-/* vpfe capture driver file operations */
-static const struct v4l2_file_operations vpfe_fops = {
- .owner = THIS_MODULE,
- .open = vpfe_open,
- .release = vpfe_release,
- .unlocked_ioctl = video_ioctl2,
- .mmap = vpfe_mmap,
- .poll = vpfe_poll
-};
-
-/*
- * vpfe_check_format()
- * This function adjust the input pixel format as per hardware
- * capabilities and update the same in pixfmt.
- * Following algorithm used :-
- *
- * If given pixformat is not in the vpfe list of pix formats or not
- * supported by the hardware, current value of pixformat in the device
- * is used
- * If given field is not supported, then current field is used. If field
- * is different from current, then it is matched with that from sub device.
- * Minimum height is 2 lines for interlaced or tb field and 1 line for
- * progressive. Maximum height is clamped to active active lines of scan
- * Minimum width is 32 bytes in memory and width is clamped to active
- * pixels of scan.
- * bytesperline is a multiple of 32.
- */
-static const struct vpfe_pixel_format *
- vpfe_check_format(struct vpfe_device *vpfe_dev,
- struct v4l2_pix_format *pixfmt)
-{
- u32 min_height = 1, min_width = 32, max_width, max_height;
- const struct vpfe_pixel_format *vpfe_pix_fmt;
- u32 pix;
- int temp, found;
-
- vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
- if (!vpfe_pix_fmt) {
- /*
- * use current pixel format in the vpfe device. We
- * will find this pix format in the table
- */
- pixfmt->pixelformat = vpfe_dev->fmt.fmt.pix.pixelformat;
- vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
- }
-
- /* check if hw supports it */
- temp = 0;
- found = 0;
- while (ccdc_dev->hw_ops.enum_pix(&pix, temp) >= 0) {
- if (vpfe_pix_fmt->pixelformat == pix) {
- found = 1;
- break;
- }
- temp++;
- }
-
- if (!found) {
- /* use current pixel format */
- pixfmt->pixelformat = vpfe_dev->fmt.fmt.pix.pixelformat;
- /*
- * Since this is currently used in the vpfe device, we
- * will find this pix format in the table
- */
- vpfe_pix_fmt = vpfe_lookup_pix_format(pixfmt->pixelformat);
- }
-
- /* check what field format is supported */
- if (pixfmt->field == V4L2_FIELD_ANY) {
- /* if field is any, use current value as default */
- pixfmt->field = vpfe_dev->fmt.fmt.pix.field;
- }
-
- /*
- * if field is not same as current field in the vpfe device
- * try matching the field with the sub device field
- */
- if (vpfe_dev->fmt.fmt.pix.field != pixfmt->field) {
- /*
- * If field value is not in the supported fields, use current
- * field used in the device as default
- */
- switch (pixfmt->field) {
- case V4L2_FIELD_INTERLACED:
- case V4L2_FIELD_SEQ_TB:
- /* if sub device is supporting progressive, use that */
- if (!vpfe_dev->std_info.frame_format)
- pixfmt->field = V4L2_FIELD_NONE;
- break;
- case V4L2_FIELD_NONE:
- if (vpfe_dev->std_info.frame_format)
- pixfmt->field = V4L2_FIELD_INTERLACED;
- break;
-
- default:
- /* use current field as default */
- pixfmt->field = vpfe_dev->fmt.fmt.pix.field;
- break;
- }
- }
-
- /* Now adjust image resolutions supported */
- if (pixfmt->field == V4L2_FIELD_INTERLACED ||
- pixfmt->field == V4L2_FIELD_SEQ_TB)
- min_height = 2;
-
- max_width = vpfe_dev->std_info.active_pixels;
- max_height = vpfe_dev->std_info.active_lines;
- min_width /= vpfe_pix_fmt->bpp;
-
- v4l2_info(&vpfe_dev->v4l2_dev, "width = %d, height = %d, bpp = %d\n",
- pixfmt->width, pixfmt->height, vpfe_pix_fmt->bpp);
-
- pixfmt->width = clamp((pixfmt->width), min_width, max_width);
- pixfmt->height = clamp((pixfmt->height), min_height, max_height);
-
- /* If interlaced, adjust height to be a multiple of 2 */
- if (pixfmt->field == V4L2_FIELD_INTERLACED)
- pixfmt->height &= (~1);
- /*
- * recalculate bytesperline and sizeimage since width
- * and height might have changed
- */
- pixfmt->bytesperline = (((pixfmt->width * vpfe_pix_fmt->bpp) + 31)
- & ~31);
- if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12)
- pixfmt->sizeimage =
- pixfmt->bytesperline * pixfmt->height +
- ((pixfmt->bytesperline * pixfmt->height) >> 1);
- else
- pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height;
-
- v4l2_info(&vpfe_dev->v4l2_dev, "adjusted width = %d, height = %d, bpp = %d, bytesperline = %d, sizeimage = %d\n",
- pixfmt->width, pixfmt->height, vpfe_pix_fmt->bpp,
- pixfmt->bytesperline, pixfmt->sizeimage);
- return vpfe_pix_fmt;
-}
-
-static int vpfe_querycap(struct file *file, void *priv,
- struct v4l2_capability *cap)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querycap\n");
-
- strscpy(cap->driver, CAPTURE_DRV_NAME, sizeof(cap->driver));
- strscpy(cap->bus_info, "VPFE", sizeof(cap->bus_info));
- strscpy(cap->card, vpfe_dev->cfg->card_name, sizeof(cap->card));
- return 0;
-}
-
-static int vpfe_g_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *fmt)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_fmt_vid_cap\n");
- /* Fill in the information about format */
- *fmt = vpfe_dev->fmt;
- return 0;
-}
-
-static int vpfe_enum_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_fmtdesc *fmt)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- const struct vpfe_pixel_format *pix_fmt;
- u32 pix;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt_vid_cap\n");
-
- if (ccdc_dev->hw_ops.enum_pix(&pix, fmt->index) < 0)
- return -EINVAL;
-
- /* Fill in the information about format */
- pix_fmt = vpfe_lookup_pix_format(pix);
- if (pix_fmt) {
- fmt->pixelformat = pix_fmt->pixelformat;
- return 0;
- }
- return -EINVAL;
-}
-
-static int vpfe_s_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *fmt)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- const struct vpfe_pixel_format *pix_fmts;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt_vid_cap\n");
-
- /* If streaming is started, return error */
- if (vpfe_dev->started) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is started\n");
- return -EBUSY;
- }
-
- /* Check for valid frame format */
- pix_fmts = vpfe_check_format(vpfe_dev, &fmt->fmt.pix);
- if (!pix_fmts)
- return -EINVAL;
-
- /* store the pixel format in the device object */
- ret = mutex_lock_interruptible(&vpfe_dev->lock);
- if (ret)
- return ret;
-
- /* First detach any IRQ if currently attached */
- vpfe_detach_irq(vpfe_dev);
- vpfe_dev->fmt = *fmt;
- /* set image capture parameters in the ccdc */
- ret = vpfe_config_ccdc_image_format(vpfe_dev);
- mutex_unlock(&vpfe_dev->lock);
- return ret;
-}
-
-static int vpfe_try_fmt_vid_cap(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- const struct vpfe_pixel_format *pix_fmts;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt_vid_cap\n");
-
- pix_fmts = vpfe_check_format(vpfe_dev, &f->fmt.pix);
- if (!pix_fmts)
- return -EINVAL;
- return 0;
-}
-
-/*
- * vpfe_get_subdev_input_index - Get subdev index and subdev input index for a
- * given app input index
- */
-static int vpfe_get_subdev_input_index(struct vpfe_device *vpfe_dev,
- int *subdev_index,
- int *subdev_input_index,
- int app_input_index)
-{
- struct vpfe_config *cfg = vpfe_dev->cfg;
- struct vpfe_subdev_info *sdinfo;
- int i, j = 0;
-
- for (i = 0; i < cfg->num_subdevs; i++) {
- sdinfo = &cfg->sub_devs[i];
- if (app_input_index < (j + sdinfo->num_inputs)) {
- *subdev_index = i;
- *subdev_input_index = app_input_index - j;
- return 0;
- }
- j += sdinfo->num_inputs;
- }
- return -EINVAL;
-}
-
-/*
- * vpfe_get_app_input - Get app input index for a given subdev input index
- * driver stores the input index of the current sub device and translate it
- * when application request the current input
- */
-static int vpfe_get_app_input_index(struct vpfe_device *vpfe_dev,
- int *app_input_index)
-{
- struct vpfe_config *cfg = vpfe_dev->cfg;
- struct vpfe_subdev_info *sdinfo;
- int i, j = 0;
-
- for (i = 0; i < cfg->num_subdevs; i++) {
- sdinfo = &cfg->sub_devs[i];
- if (!strcmp(sdinfo->name, vpfe_dev->current_subdev->name)) {
- if (vpfe_dev->current_input >= sdinfo->num_inputs)
- return -1;
- *app_input_index = j + vpfe_dev->current_input;
- return 0;
- }
- j += sdinfo->num_inputs;
- }
- return -EINVAL;
-}
-
-static int vpfe_enum_input(struct file *file, void *priv,
- struct v4l2_input *inp)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct vpfe_subdev_info *sdinfo;
- int subdev, index ;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_input\n");
-
- if (vpfe_get_subdev_input_index(vpfe_dev,
- &subdev,
- &index,
- inp->index) < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev, "input information not found for the subdev\n");
- return -EINVAL;
- }
- sdinfo = &vpfe_dev->cfg->sub_devs[subdev];
- *inp = sdinfo->inputs[index];
- return 0;
-}
-
-static int vpfe_g_input(struct file *file, void *priv, unsigned int *index)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_input\n");
-
- return vpfe_get_app_input_index(vpfe_dev, index);
-}
-
-
-static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct v4l2_subdev *sd;
- struct vpfe_subdev_info *sdinfo;
- int subdev_index, inp_index;
- struct vpfe_route *route;
- u32 input, output;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
-
- ret = mutex_lock_interruptible(&vpfe_dev->lock);
- if (ret)
- return ret;
-
- /*
- * If streaming is started return device busy
- * error
- */
- if (vpfe_dev->started) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Streaming is on\n");
- ret = -EBUSY;
- goto unlock_out;
- }
- ret = vpfe_get_subdev_input_index(vpfe_dev,
- &subdev_index,
- &inp_index,
- index);
- if (ret < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev, "invalid input index\n");
- goto unlock_out;
- }
-
- sdinfo = &vpfe_dev->cfg->sub_devs[subdev_index];
- sd = vpfe_dev->sd[subdev_index];
- route = &sdinfo->routes[inp_index];
- if (route && sdinfo->can_route) {
- input = route->input;
- output = route->output;
- } else {
- input = 0;
- output = 0;
- }
-
- if (sd)
- ret = v4l2_subdev_call(sd, video, s_routing, input, output, 0);
-
- if (ret) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "vpfe_doioctl:error in setting input in decoder\n");
- ret = -EINVAL;
- goto unlock_out;
- }
- vpfe_dev->current_subdev = sdinfo;
- if (sd)
- vpfe_dev->v4l2_dev.ctrl_handler = sd->ctrl_handler;
- vpfe_dev->current_input = index;
- vpfe_dev->std_index = 0;
-
- /* set the bus/interface parameter for the sub device in ccdc */
- ret = ccdc_dev->hw_ops.set_hw_if_params(&sdinfo->ccdc_if_params);
- if (ret)
- goto unlock_out;
-
- /* set the default image parameters in the device */
- ret = vpfe_config_image_format(vpfe_dev,
- vpfe_standards[vpfe_dev->std_index].std_id);
-unlock_out:
- mutex_unlock(&vpfe_dev->lock);
- return ret;
-}
-
-static int vpfe_querystd(struct file *file, void *priv, v4l2_std_id *std_id)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct vpfe_subdev_info *sdinfo;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querystd\n");
-
- ret = mutex_lock_interruptible(&vpfe_dev->lock);
- sdinfo = vpfe_dev->current_subdev;
- if (ret)
- return ret;
- /* Call querystd function of decoder device */
- ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
- video, querystd, std_id);
- mutex_unlock(&vpfe_dev->lock);
- return ret;
-}
-
-static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct vpfe_subdev_info *sdinfo;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
-
- /* Call decoder driver function to set the standard */
- ret = mutex_lock_interruptible(&vpfe_dev->lock);
- if (ret)
- return ret;
-
- sdinfo = vpfe_dev->current_subdev;
- /* If streaming is started, return device busy error */
- if (vpfe_dev->started) {
- v4l2_err(&vpfe_dev->v4l2_dev, "streaming is started\n");
- ret = -EBUSY;
- goto unlock_out;
- }
-
- ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
- video, s_std, std_id);
- if (ret < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Failed to set standard\n");
- goto unlock_out;
- }
- ret = vpfe_config_image_format(vpfe_dev, std_id);
-
-unlock_out:
- mutex_unlock(&vpfe_dev->lock);
- return ret;
-}
-
-static int vpfe_g_std(struct file *file, void *priv, v4l2_std_id *std_id)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_std\n");
-
- *std_id = vpfe_standards[vpfe_dev->std_index].std_id;
- return 0;
-}
-/*
- * Videobuf operations
- */
-static int vpfe_videobuf_setup(struct videobuf_queue *vq,
- unsigned int *count,
- unsigned int *size)
-{
- struct vpfe_fh *fh = vq->priv_data;
- struct vpfe_device *vpfe_dev = fh->vpfe_dev;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_setup\n");
- *size = vpfe_dev->fmt.fmt.pix.sizeimage;
- if (vpfe_dev->memory == V4L2_MEMORY_MMAP &&
- vpfe_dev->fmt.fmt.pix.sizeimage > config_params.device_bufsize)
- *size = config_params.device_bufsize;
-
- if (*count < config_params.min_numbuffers)
- *count = config_params.min_numbuffers;
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "count=%d, size=%d\n", *count, *size);
- return 0;
-}
-
-static int vpfe_videobuf_prepare(struct videobuf_queue *vq,
- struct videobuf_buffer *vb,
- enum v4l2_field field)
-{
- struct vpfe_fh *fh = vq->priv_data;
- struct vpfe_device *vpfe_dev = fh->vpfe_dev;
- unsigned long addr;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_prepare\n");
-
- /* If buffer is not initialized, initialize it */
- if (VIDEOBUF_NEEDS_INIT == vb->state) {
- vb->width = vpfe_dev->fmt.fmt.pix.width;
- vb->height = vpfe_dev->fmt.fmt.pix.height;
- vb->size = vpfe_dev->fmt.fmt.pix.sizeimage;
- vb->field = field;
-
- ret = videobuf_iolock(vq, vb, NULL);
- if (ret < 0)
- return ret;
-
- addr = videobuf_to_dma_contig(vb);
- /* Make sure user addresses are aligned to 32 bytes */
- if (!ALIGN(addr, 32))
- return -EINVAL;
-
- vb->state = VIDEOBUF_PREPARED;
- }
- return 0;
-}
-
-static void vpfe_videobuf_queue(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
-{
- /* Get the file handle object and device object */
- struct vpfe_fh *fh = vq->priv_data;
- struct vpfe_device *vpfe_dev = fh->vpfe_dev;
- unsigned long flags;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_buffer_queue\n");
-
- /* add the buffer to the DMA queue */
- spin_lock_irqsave(&vpfe_dev->dma_queue_lock, flags);
- list_add_tail(&vb->queue, &vpfe_dev->dma_queue);
- spin_unlock_irqrestore(&vpfe_dev->dma_queue_lock, flags);
-
- /* Change state of the buffer */
- vb->state = VIDEOBUF_QUEUED;
-}
-
-static void vpfe_videobuf_release(struct videobuf_queue *vq,
- struct videobuf_buffer *vb)
-{
- struct vpfe_fh *fh = vq->priv_data;
- struct vpfe_device *vpfe_dev = fh->vpfe_dev;
- unsigned long flags;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_videobuf_release\n");
-
- /*
- * We need to flush the buffer from the dma queue since
- * they are de-allocated
- */
- spin_lock_irqsave(&vpfe_dev->dma_queue_lock, flags);
- INIT_LIST_HEAD(&vpfe_dev->dma_queue);
- spin_unlock_irqrestore(&vpfe_dev->dma_queue_lock, flags);
- videobuf_dma_contig_free(vq, vb);
- vb->state = VIDEOBUF_NEEDS_INIT;
-}
-
-static const struct videobuf_queue_ops vpfe_videobuf_qops = {
- .buf_setup = vpfe_videobuf_setup,
- .buf_prepare = vpfe_videobuf_prepare,
- .buf_queue = vpfe_videobuf_queue,
- .buf_release = vpfe_videobuf_release,
-};
-
-/*
- * vpfe_reqbufs. currently support REQBUF only once opening
- * the device.
- */
-static int vpfe_reqbufs(struct file *file, void *priv,
- struct v4l2_requestbuffers *req_buf)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct vpfe_fh *fh = file->private_data;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_reqbufs\n");
-
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != req_buf->type) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buffer type\n");
- return -EINVAL;
- }
-
- ret = mutex_lock_interruptible(&vpfe_dev->lock);
- if (ret)
- return ret;
-
- if (vpfe_dev->io_usrs != 0) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
- ret = -EBUSY;
- goto unlock_out;
- }
-
- vpfe_dev->memory = req_buf->memory;
- videobuf_queue_dma_contig_init(&vpfe_dev->buffer_queue,
- &vpfe_videobuf_qops,
- vpfe_dev->pdev,
- &vpfe_dev->irqlock,
- req_buf->type,
- vpfe_dev->fmt.fmt.pix.field,
- sizeof(struct videobuf_buffer),
- fh, NULL);
-
- fh->io_allowed = 1;
- vpfe_dev->io_usrs = 1;
- INIT_LIST_HEAD(&vpfe_dev->dma_queue);
- ret = videobuf_reqbufs(&vpfe_dev->buffer_queue, req_buf);
-unlock_out:
- mutex_unlock(&vpfe_dev->lock);
- return ret;
-}
-
-static int vpfe_querybuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_querybuf\n");
-
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
- return -EINVAL;
- }
-
- if (vpfe_dev->memory != V4L2_MEMORY_MMAP) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid memory\n");
- return -EINVAL;
- }
- /* Call videobuf_querybuf to get information */
- return videobuf_querybuf(&vpfe_dev->buffer_queue, buf);
-}
-
-static int vpfe_qbuf(struct file *file, void *priv,
- struct v4l2_buffer *p)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct vpfe_fh *fh = file->private_data;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_qbuf\n");
-
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != p->type) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
- return -EINVAL;
- }
-
- /*
- * If this file handle is not allowed to do IO,
- * return error
- */
- if (!fh->io_allowed) {
- v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
- return -EACCES;
- }
- return videobuf_qbuf(&vpfe_dev->buffer_queue, p);
-}
-
-static int vpfe_dqbuf(struct file *file, void *priv,
- struct v4l2_buffer *buf)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_dqbuf\n");
-
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf->type) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
- return -EINVAL;
- }
- return videobuf_dqbuf(&vpfe_dev->buffer_queue,
- buf, file->f_flags & O_NONBLOCK);
-}
-
-/*
- * vpfe_calculate_offsets : This function calculates buffers offset
- * for top and bottom field
- */
-static void vpfe_calculate_offsets(struct vpfe_device *vpfe_dev)
-{
- struct v4l2_rect image_win;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_calculate_offsets\n");
-
- ccdc_dev->hw_ops.get_image_window(&image_win);
- vpfe_dev->field_off = image_win.height * image_win.width;
-}
-
-/* vpfe_start_ccdc_capture: start streaming in ccdc/isif */
-static void vpfe_start_ccdc_capture(struct vpfe_device *vpfe_dev)
-{
- ccdc_dev->hw_ops.enable(1);
- if (ccdc_dev->hw_ops.enable_out_to_sdram)
- ccdc_dev->hw_ops.enable_out_to_sdram(1);
- vpfe_dev->started = 1;
-}
-
-/*
- * vpfe_streamon. Assume the DMA queue is not empty.
- * application is expected to call QBUF before calling
- * this ioctl. If not, driver returns error
- */
-static int vpfe_streamon(struct file *file, void *priv,
- enum v4l2_buf_type buf_type)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct vpfe_fh *fh = file->private_data;
- struct vpfe_subdev_info *sdinfo;
- unsigned long addr;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamon\n");
-
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf_type) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
- return -EINVAL;
- }
-
- /* If file handle is not allowed IO, return error */
- if (!fh->io_allowed) {
- v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
- return -EACCES;
- }
-
- sdinfo = vpfe_dev->current_subdev;
- ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
- video, s_stream, 1);
-
- if (ret && (ret != -ENOIOCTLCMD)) {
- v4l2_err(&vpfe_dev->v4l2_dev, "stream on failed in subdev\n");
- return -EINVAL;
- }
-
- /* If buffer queue is empty, return error */
- if (list_empty(&vpfe_dev->buffer_queue.stream)) {
- v4l2_err(&vpfe_dev->v4l2_dev, "buffer queue is empty\n");
- return -EIO;
- }
-
- /* Call videobuf_streamon to start streaming * in videobuf */
- ret = videobuf_streamon(&vpfe_dev->buffer_queue);
- if (ret)
- return ret;
-
-
- ret = mutex_lock_interruptible(&vpfe_dev->lock);
- if (ret)
- goto streamoff;
- /* Get the next frame from the buffer queue */
- vpfe_dev->next_frm = list_entry(vpfe_dev->dma_queue.next,
- struct videobuf_buffer, queue);
- vpfe_dev->cur_frm = vpfe_dev->next_frm;
- /* Remove buffer from the buffer queue */
- list_del(&vpfe_dev->cur_frm->queue);
- /* Mark state of the current frame to active */
- vpfe_dev->cur_frm->state = VIDEOBUF_ACTIVE;
- /* Initialize field_id and started member */
- vpfe_dev->field_id = 0;
- addr = videobuf_to_dma_contig(vpfe_dev->cur_frm);
-
- /* Calculate field offset */
- vpfe_calculate_offsets(vpfe_dev);
-
- if (vpfe_attach_irq(vpfe_dev) < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "Error in attaching interrupt handle\n");
- ret = -EFAULT;
- goto unlock_out;
- }
- if (ccdc_dev->hw_ops.configure() < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "Error in configuring ccdc\n");
- ret = -EINVAL;
- goto unlock_out;
- }
- ccdc_dev->hw_ops.setfbaddr((unsigned long)(addr));
- vpfe_start_ccdc_capture(vpfe_dev);
- mutex_unlock(&vpfe_dev->lock);
- return ret;
-unlock_out:
- mutex_unlock(&vpfe_dev->lock);
-streamoff:
- videobuf_streamoff(&vpfe_dev->buffer_queue);
- return ret;
-}
-
-static int vpfe_streamoff(struct file *file, void *priv,
- enum v4l2_buf_type buf_type)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct vpfe_fh *fh = file->private_data;
- struct vpfe_subdev_info *sdinfo;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_streamoff\n");
-
- if (V4L2_BUF_TYPE_VIDEO_CAPTURE != buf_type) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Invalid buf type\n");
- return -EINVAL;
- }
-
- /* If io is allowed for this file handle, return error */
- if (!fh->io_allowed) {
- v4l2_err(&vpfe_dev->v4l2_dev, "fh->io_allowed\n");
- return -EACCES;
- }
-
- /* If streaming is not started, return error */
- if (!vpfe_dev->started) {
- v4l2_err(&vpfe_dev->v4l2_dev, "device started\n");
- return -EINVAL;
- }
-
- ret = mutex_lock_interruptible(&vpfe_dev->lock);
- if (ret)
- return ret;
-
- vpfe_stop_ccdc_capture(vpfe_dev);
- vpfe_detach_irq(vpfe_dev);
-
- sdinfo = vpfe_dev->current_subdev;
- ret = v4l2_device_call_until_err(&vpfe_dev->v4l2_dev, sdinfo->grp_id,
- video, s_stream, 0);
-
- if (ret && (ret != -ENOIOCTLCMD))
- v4l2_err(&vpfe_dev->v4l2_dev, "stream off failed in subdev\n");
- ret = videobuf_streamoff(&vpfe_dev->buffer_queue);
- mutex_unlock(&vpfe_dev->lock);
- return ret;
-}
-
-static int vpfe_g_pixelaspect(struct file *file, void *priv,
- int type, struct v4l2_fract *f)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_pixelaspect\n");
-
- if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
- /* If std_index is invalid, then just return (== 1:1 aspect) */
- if (vpfe_dev->std_index >= ARRAY_SIZE(vpfe_standards))
- return 0;
-
- *f = vpfe_standards[vpfe_dev->std_index].pixelaspect;
- return 0;
-}
-
-static int vpfe_g_selection(struct file *file, void *priv,
- struct v4l2_selection *sel)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_g_selection\n");
-
- if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
- return -EINVAL;
-
- switch (sel->target) {
- case V4L2_SEL_TGT_CROP:
- sel->r = vpfe_dev->crop;
- break;
- case V4L2_SEL_TGT_CROP_DEFAULT:
- case V4L2_SEL_TGT_CROP_BOUNDS:
- sel->r.width = vpfe_standards[vpfe_dev->std_index].width;
- sel->r.height = vpfe_standards[vpfe_dev->std_index].height;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int vpfe_s_selection(struct file *file, void *priv,
- struct v4l2_selection *sel)
-{
- struct vpfe_device *vpfe_dev = video_drvdata(file);
- struct v4l2_rect rect = sel->r;
- int ret;
-
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_selection\n");
-
- if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
- sel->target != V4L2_SEL_TGT_CROP)
- return -EINVAL;
-
- if (vpfe_dev->started) {
- /* make sure streaming is not started */
- v4l2_err(&vpfe_dev->v4l2_dev,
- "Cannot change crop when streaming is ON\n");
- return -EBUSY;
- }
-
- ret = mutex_lock_interruptible(&vpfe_dev->lock);
- if (ret)
- return ret;
-
- if (rect.top < 0 || rect.left < 0) {
- v4l2_err(&vpfe_dev->v4l2_dev,
- "doesn't support negative values for top & left\n");
- ret = -EINVAL;
- goto unlock_out;
- }
-
- /* adjust the width to 16 pixel boundary */
- rect.width = ((rect.width + 15) & ~0xf);
-
- /* make sure parameters are valid */
- if ((rect.left + rect.width >
- vpfe_dev->std_info.active_pixels) ||
- (rect.top + rect.height >
- vpfe_dev->std_info.active_lines)) {
- v4l2_err(&vpfe_dev->v4l2_dev, "Error in S_SELECTION params\n");
- ret = -EINVAL;
- goto unlock_out;
- }
- ccdc_dev->hw_ops.set_image_window(&rect);
- vpfe_dev->fmt.fmt.pix.width = rect.width;
- vpfe_dev->fmt.fmt.pix.height = rect.height;
- vpfe_dev->fmt.fmt.pix.bytesperline =
- ccdc_dev->hw_ops.get_line_length();
- vpfe_dev->fmt.fmt.pix.sizeimage =
- vpfe_dev->fmt.fmt.pix.bytesperline *
- vpfe_dev->fmt.fmt.pix.height;
- vpfe_dev->crop = rect;
- sel->r = rect;
-unlock_out:
- mutex_unlock(&vpfe_dev->lock);
- return ret;
-}
-
-/* vpfe capture ioctl operations */
-static const struct v4l2_ioctl_ops vpfe_ioctl_ops = {
- .vidioc_querycap = vpfe_querycap,
- .vidioc_g_fmt_vid_cap = vpfe_g_fmt_vid_cap,
- .vidioc_enum_fmt_vid_cap = vpfe_enum_fmt_vid_cap,
- .vidioc_s_fmt_vid_cap = vpfe_s_fmt_vid_cap,
- .vidioc_try_fmt_vid_cap = vpfe_try_fmt_vid_cap,
- .vidioc_enum_input = vpfe_enum_input,
- .vidioc_g_input = vpfe_g_input,
- .vidioc_s_input = vpfe_s_input,
- .vidioc_querystd = vpfe_querystd,
- .vidioc_s_std = vpfe_s_std,
- .vidioc_g_std = vpfe_g_std,
- .vidioc_reqbufs = vpfe_reqbufs,
- .vidioc_querybuf = vpfe_querybuf,
- .vidioc_qbuf = vpfe_qbuf,
- .vidioc_dqbuf = vpfe_dqbuf,
- .vidioc_streamon = vpfe_streamon,
- .vidioc_streamoff = vpfe_streamoff,
- .vidioc_g_pixelaspect = vpfe_g_pixelaspect,
- .vidioc_g_selection = vpfe_g_selection,
- .vidioc_s_selection = vpfe_s_selection,
-};
-
-static struct vpfe_device *vpfe_initialize(void)
-{
- struct vpfe_device *vpfe_dev;
-
- /* Default number of buffers should be 3 */
- if ((numbuffers > 0) &&
- (numbuffers < config_params.min_numbuffers))
- numbuffers = config_params.min_numbuffers;
-
- /*
- * Set buffer size to min buffers size if invalid buffer size is
- * given
- */
- if (bufsize < config_params.min_bufsize)
- bufsize = config_params.min_bufsize;
-
- config_params.numbuffers = numbuffers;
-
- if (numbuffers)
- config_params.device_bufsize = bufsize;
-
- /* Allocate memory for device objects */
- vpfe_dev = kzalloc(sizeof(*vpfe_dev), GFP_KERNEL);
-
- return vpfe_dev;
-}
-
-/*
- * vpfe_probe : This function creates device entries by register
- * itself to the V4L2 driver and initializes fields of each
- * device objects
- */
-static int vpfe_probe(struct platform_device *pdev)
-{
- struct vpfe_subdev_info *sdinfo;
- struct vpfe_config *vpfe_cfg;
- struct resource *res1;
- struct vpfe_device *vpfe_dev;
- struct i2c_adapter *i2c_adap;
- struct video_device *vfd;
- int ret, i, j;
- int num_subdevs = 0;
-
- /* Get the pointer to the device object */
- vpfe_dev = vpfe_initialize();
-
- if (!vpfe_dev) {
- v4l2_err(pdev->dev.driver,
- "Failed to allocate memory for vpfe_dev\n");
- return -ENOMEM;
- }
-
- vpfe_dev->pdev = &pdev->dev;
-
- if (!pdev->dev.platform_data) {
- v4l2_err(pdev->dev.driver, "Unable to get vpfe config\n");
- ret = -ENODEV;
- goto probe_free_dev_mem;
- }
-
- vpfe_cfg = pdev->dev.platform_data;
- vpfe_dev->cfg = vpfe_cfg;
- if (!vpfe_cfg->ccdc || !vpfe_cfg->card_name || !vpfe_cfg->sub_devs) {
- v4l2_err(pdev->dev.driver, "null ptr in vpfe_cfg\n");
- ret = -ENOENT;
- goto probe_free_dev_mem;
- }
-
- /* Allocate memory for ccdc configuration */
- ccdc_cfg = kmalloc(sizeof(*ccdc_cfg), GFP_KERNEL);
- if (!ccdc_cfg) {
- ret = -ENOMEM;
- goto probe_free_dev_mem;
- }
-
- mutex_lock(&ccdc_lock);
-
- strscpy(ccdc_cfg->name, vpfe_cfg->ccdc, sizeof(ccdc_cfg->name));
- /* Get VINT0 irq resource */
- res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res1) {
- v4l2_err(pdev->dev.driver,
- "Unable to get interrupt for VINT0\n");
- ret = -ENODEV;
- goto probe_free_ccdc_cfg_mem;
- }
- vpfe_dev->ccdc_irq0 = res1->start;
-
- /* Get VINT1 irq resource */
- res1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
- if (!res1) {
- v4l2_err(pdev->dev.driver,
- "Unable to get interrupt for VINT1\n");
- ret = -ENODEV;
- goto probe_free_ccdc_cfg_mem;
- }
- vpfe_dev->ccdc_irq1 = res1->start;
-
- ret = request_irq(vpfe_dev->ccdc_irq0, vpfe_isr, 0,
- "vpfe_capture0", vpfe_dev);
-
- if (0 != ret) {
- v4l2_err(pdev->dev.driver, "Unable to request interrupt\n");
- goto probe_free_ccdc_cfg_mem;
- }
-
- vfd = &vpfe_dev->video_dev;
- /* Initialize field of video device */
- vfd->release = video_device_release_empty;
- vfd->fops = &vpfe_fops;
- vfd->ioctl_ops = &vpfe_ioctl_ops;
- vfd->tvnorms = 0;
- vfd->v4l2_dev = &vpfe_dev->v4l2_dev;
- vfd->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
- snprintf(vfd->name, sizeof(vfd->name),
- "%s_V%d.%d.%d",
- CAPTURE_DRV_NAME,
- (VPFE_CAPTURE_VERSION_CODE >> 16) & 0xff,
- (VPFE_CAPTURE_VERSION_CODE >> 8) & 0xff,
- (VPFE_CAPTURE_VERSION_CODE) & 0xff);
-
- ret = v4l2_device_register(&pdev->dev, &vpfe_dev->v4l2_dev);
- if (ret) {
- v4l2_err(pdev->dev.driver,
- "Unable to register v4l2 device.\n");
- goto probe_out_release_irq;
- }
- v4l2_info(&vpfe_dev->v4l2_dev, "v4l2 device registered\n");
- spin_lock_init(&vpfe_dev->irqlock);
- spin_lock_init(&vpfe_dev->dma_queue_lock);
- mutex_init(&vpfe_dev->lock);
-
- /* Initialize field of the device objects */
- vpfe_dev->numbuffers = config_params.numbuffers;
-
- /* register video device */
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "trying to register vpfe device.\n");
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "video_dev=%p\n", &vpfe_dev->video_dev);
- vpfe_dev->fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- ret = video_register_device(&vpfe_dev->video_dev,
- VFL_TYPE_VIDEO, -1);
-
- if (ret) {
- v4l2_err(pdev->dev.driver,
- "Unable to register video device.\n");
- goto probe_out_v4l2_unregister;
- }
-
- v4l2_info(&vpfe_dev->v4l2_dev, "video device registered\n");
- /* set the driver data in platform device */
- platform_set_drvdata(pdev, vpfe_dev);
- /* set driver private data */
- video_set_drvdata(&vpfe_dev->video_dev, vpfe_dev);
- i2c_adap = i2c_get_adapter(vpfe_cfg->i2c_adapter_id);
- num_subdevs = vpfe_cfg->num_subdevs;
- vpfe_dev->sd = kmalloc_array(num_subdevs,
- sizeof(*vpfe_dev->sd),
- GFP_KERNEL);
- if (!vpfe_dev->sd) {
- ret = -ENOMEM;
- goto probe_out_video_unregister;
- }
-
- for (i = 0; i < num_subdevs; i++) {
- struct v4l2_input *inps;
-
- sdinfo = &vpfe_cfg->sub_devs[i];
-
- /* Load up the subdevice */
- vpfe_dev->sd[i] =
- v4l2_i2c_new_subdev_board(&vpfe_dev->v4l2_dev,
- i2c_adap,
- &sdinfo->board_info,
- NULL);
- if (vpfe_dev->sd[i]) {
- v4l2_info(&vpfe_dev->v4l2_dev,
- "v4l2 sub device %s registered\n",
- sdinfo->name);
- vpfe_dev->sd[i]->grp_id = sdinfo->grp_id;
- /* update tvnorms from the sub devices */
- for (j = 0; j < sdinfo->num_inputs; j++) {
- inps = &sdinfo->inputs[j];
- vfd->tvnorms |= inps->std;
- }
- } else {
- v4l2_info(&vpfe_dev->v4l2_dev,
- "v4l2 sub device %s register fails\n",
- sdinfo->name);
- ret = -ENXIO;
- goto probe_sd_out;
- }
- }
-
- /* set first sub device as current one */
- vpfe_dev->current_subdev = &vpfe_cfg->sub_devs[0];
- vpfe_dev->v4l2_dev.ctrl_handler = vpfe_dev->sd[0]->ctrl_handler;
-
- /* We have at least one sub device to work with */
- mutex_unlock(&ccdc_lock);
- return 0;
-
-probe_sd_out:
- kfree(vpfe_dev->sd);
-probe_out_video_unregister:
- video_unregister_device(&vpfe_dev->video_dev);
-probe_out_v4l2_unregister:
- v4l2_device_unregister(&vpfe_dev->v4l2_dev);
-probe_out_release_irq:
- free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
-probe_free_ccdc_cfg_mem:
- kfree(ccdc_cfg);
- mutex_unlock(&ccdc_lock);
-probe_free_dev_mem:
- kfree(vpfe_dev);
- return ret;
-}
-
-/*
- * vpfe_remove : It un-register device from V4L2 driver
- */
-static int vpfe_remove(struct platform_device *pdev)
-{
- struct vpfe_device *vpfe_dev = platform_get_drvdata(pdev);
-
- v4l2_info(pdev->dev.driver, "vpfe_remove\n");
-
- free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
- kfree(vpfe_dev->sd);
- v4l2_device_unregister(&vpfe_dev->v4l2_dev);
- video_unregister_device(&vpfe_dev->video_dev);
- kfree(vpfe_dev);
- kfree(ccdc_cfg);
- return 0;
-}
-
-static int vpfe_suspend(struct device *dev)
-{
- return 0;
-}
-
-static int vpfe_resume(struct device *dev)
-{
- return 0;
-}
-
-static const struct dev_pm_ops vpfe_dev_pm_ops = {
- .suspend = vpfe_suspend,
- .resume = vpfe_resume,
-};
-
-static struct platform_driver vpfe_driver = {
- .driver = {
- .name = CAPTURE_DRV_NAME,
- .pm = &vpfe_dev_pm_ops,
- },
- .probe = vpfe_probe,
- .remove = vpfe_remove,
-};
-
-module_platform_driver(vpfe_driver);
diff --git a/drivers/media/platform/ti/davinci/vpif.h b/drivers/media/platform/ti/davinci/vpif.h
index 651943e3e375..52ecc2562216 100644
--- a/drivers/media/platform/ti/davinci/vpif.h
+++ b/drivers/media/platform/ti/davinci/vpif.h
@@ -322,10 +322,10 @@ static inline void channel1_intr_enable(int enable)
}
/* inline function to set buffer addresses in case of Y/C non mux mode */
-static inline void ch0_set_videobuf_addr_yc_nmux(unsigned long top_strt_luma,
- unsigned long btm_strt_luma,
- unsigned long top_strt_chroma,
- unsigned long btm_strt_chroma)
+static inline void ch0_set_video_buf_addr_yc_nmux(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
{
regw(top_strt_luma, VPIF_CH0_TOP_STRT_ADD_LUMA);
regw(btm_strt_luma, VPIF_CH0_BTM_STRT_ADD_LUMA);
@@ -334,10 +334,10 @@ static inline void ch0_set_videobuf_addr_yc_nmux(unsigned long top_strt_luma,
}
/* inline function to set buffer addresses in VPIF registers for video data */
-static inline void ch0_set_videobuf_addr(unsigned long top_strt_luma,
- unsigned long btm_strt_luma,
- unsigned long top_strt_chroma,
- unsigned long btm_strt_chroma)
+static inline void ch0_set_video_buf_addr(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
{
regw(top_strt_luma, VPIF_CH0_TOP_STRT_ADD_LUMA);
regw(btm_strt_luma, VPIF_CH0_BTM_STRT_ADD_LUMA);
@@ -345,10 +345,10 @@ static inline void ch0_set_videobuf_addr(unsigned long top_strt_luma,
regw(btm_strt_chroma, VPIF_CH0_BTM_STRT_ADD_CHROMA);
}
-static inline void ch1_set_videobuf_addr(unsigned long top_strt_luma,
- unsigned long btm_strt_luma,
- unsigned long top_strt_chroma,
- unsigned long btm_strt_chroma)
+static inline void ch1_set_video_buf_addr(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
{
regw(top_strt_luma, VPIF_CH1_TOP_STRT_ADD_LUMA);
@@ -538,10 +538,10 @@ static inline void channel3_clipping_enable(int enable)
}
/* inline function to set buffer addresses in case of Y/C non mux mode */
-static inline void ch2_set_videobuf_addr_yc_nmux(unsigned long top_strt_luma,
- unsigned long btm_strt_luma,
- unsigned long top_strt_chroma,
- unsigned long btm_strt_chroma)
+static inline void ch2_set_video_buf_addr_yc_nmux(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
{
regw(top_strt_luma, VPIF_CH2_TOP_STRT_ADD_LUMA);
regw(btm_strt_luma, VPIF_CH2_BTM_STRT_ADD_LUMA);
@@ -550,10 +550,10 @@ static inline void ch2_set_videobuf_addr_yc_nmux(unsigned long top_strt_luma,
}
/* inline function to set buffer addresses in VPIF registers for video data */
-static inline void ch2_set_videobuf_addr(unsigned long top_strt_luma,
- unsigned long btm_strt_luma,
- unsigned long top_strt_chroma,
- unsigned long btm_strt_chroma)
+static inline void ch2_set_video_buf_addr(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
{
regw(top_strt_luma, VPIF_CH2_TOP_STRT_ADD_LUMA);
regw(btm_strt_luma, VPIF_CH2_BTM_STRT_ADD_LUMA);
@@ -561,10 +561,10 @@ static inline void ch2_set_videobuf_addr(unsigned long top_strt_luma,
regw(btm_strt_chroma, VPIF_CH2_BTM_STRT_ADD_CHROMA);
}
-static inline void ch3_set_videobuf_addr(unsigned long top_strt_luma,
- unsigned long btm_strt_luma,
- unsigned long top_strt_chroma,
- unsigned long btm_strt_chroma)
+static inline void ch3_set_video_buf_addr(unsigned long top_strt_luma,
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
{
regw(top_strt_luma, VPIF_CH3_TOP_STRT_ADD_LUMA);
regw(btm_strt_luma, VPIF_CH3_BTM_STRT_ADD_LUMA);
@@ -574,18 +574,18 @@ static inline void ch3_set_videobuf_addr(unsigned long top_strt_luma,
/* inline function to set buffer addresses in VPIF registers for vbi data */
static inline void ch2_set_vbi_addr(unsigned long top_strt_luma,
- unsigned long btm_strt_luma,
- unsigned long top_strt_chroma,
- unsigned long btm_strt_chroma)
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
{
regw(top_strt_luma, VPIF_CH2_TOP_STRT_ADD_VANC);
regw(btm_strt_luma, VPIF_CH2_BTM_STRT_ADD_VANC);
}
static inline void ch3_set_vbi_addr(unsigned long top_strt_luma,
- unsigned long btm_strt_luma,
- unsigned long top_strt_chroma,
- unsigned long btm_strt_chroma)
+ unsigned long btm_strt_luma,
+ unsigned long top_strt_chroma,
+ unsigned long btm_strt_chroma)
{
regw(top_strt_luma, VPIF_CH3_TOP_STRT_ADD_VANC);
regw(btm_strt_luma, VPIF_CH3_BTM_STRT_ADD_VANC);
diff --git a/drivers/media/platform/ti/davinci/vpif_capture.c b/drivers/media/platform/ti/davinci/vpif_capture.c
index b91eec899eb5..580723333fcc 100644
--- a/drivers/media/platform/ti/davinci/vpif_capture.c
+++ b/drivers/media/platform/ti/davinci/vpif_capture.c
@@ -632,11 +632,11 @@ static void vpif_config_addr(struct channel_obj *ch, int muxmode)
common = &(ch->common[VPIF_VIDEO_INDEX]);
if (VPIF_CHANNEL1_VIDEO == ch->channel_id)
- common->set_addr = ch1_set_videobuf_addr;
+ common->set_addr = ch1_set_video_buf_addr;
else if (2 == muxmode)
- common->set_addr = ch0_set_videobuf_addr_yc_nmux;
+ common->set_addr = ch0_set_video_buf_addr_yc_nmux;
else
- common->set_addr = ch0_set_videobuf_addr;
+ common->set_addr = ch0_set_video_buf_addr;
}
/**
diff --git a/drivers/media/platform/ti/davinci/vpif_capture.h b/drivers/media/platform/ti/davinci/vpif_capture.h
index d5951f61df47..6191056500cf 100644
--- a/drivers/media/platform/ti/davinci/vpif_capture.h
+++ b/drivers/media/platform/ti/davinci/vpif_capture.h
@@ -50,7 +50,7 @@ struct common_obj {
struct vpif_cap_buffer *next_frm;
/* Used to store pixel format */
struct v4l2_format fmt;
- /* Buffer queue used in video-buf */
+ /* Buffer queue used in vb2 */
struct vb2_queue buffer_queue;
/* Queue of filled frames */
struct list_head dma_queue;
diff --git a/drivers/media/platform/ti/davinci/vpif_display.c b/drivers/media/platform/ti/davinci/vpif_display.c
index 5d524acc995d..b2df81603f62 100644
--- a/drivers/media/platform/ti/davinci/vpif_display.c
+++ b/drivers/media/platform/ti/davinci/vpif_display.c
@@ -563,12 +563,12 @@ static void vpif_config_addr(struct channel_obj *ch, int muxmode)
struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
if (VPIF_CHANNEL3_VIDEO == ch->channel_id) {
- common->set_addr = ch3_set_videobuf_addr;
+ common->set_addr = ch3_set_video_buf_addr;
} else {
if (2 == muxmode)
- common->set_addr = ch2_set_videobuf_addr_yc_nmux;
+ common->set_addr = ch2_set_video_buf_addr_yc_nmux;
else
- common->set_addr = ch2_set_videobuf_addr;
+ common->set_addr = ch2_set_video_buf_addr;
}
}
diff --git a/drivers/media/platform/ti/davinci/vpif_display.h b/drivers/media/platform/ti/davinci/vpif_display.h
index f27474e0fc36..dae20053dd73 100644
--- a/drivers/media/platform/ti/davinci/vpif_display.h
+++ b/drivers/media/platform/ti/davinci/vpif_display.h
@@ -64,11 +64,11 @@ struct common_obj {
struct vpif_disp_buffer *next_frm; /* Pointer pointing to next
* vb2_buffer */
struct v4l2_format fmt; /* Used to store the format */
- struct vb2_queue buffer_queue; /* Buffer queue used in
- * video-buf */
+ struct vb2_queue buffer_queue; /* Buffer queue used in vb2 */
struct list_head dma_queue; /* Queue of filled frames */
- spinlock_t irqlock; /* Used in video-buf */
+ spinlock_t irqlock; /* Used for video buffer
+ * handling */
/* channel specific parameters */
struct mutex lock; /* lock used to access this
diff --git a/drivers/media/platform/ti/omap/omap_voutlib.c b/drivers/media/platform/ti/omap/omap_voutlib.c
index fdea2309ee37..0ac46458e41c 100644
--- a/drivers/media/platform/ti/omap/omap_voutlib.c
+++ b/drivers/media/platform/ti/omap/omap_voutlib.c
@@ -107,7 +107,7 @@ EXPORT_SYMBOL_GPL(omap_vout_try_window);
/* Given a new render window in new_win, adjust the window to the
* nearest supported configuration. The image cropping window in crop
* will also be adjusted if necessary. Preference is given to keeping the
- * the window as close to the requested configuration as possible. If
+ * window as close to the requested configuration as possible. If
* successful, new_win, vout->win, and crop are updated.
* Returns zero if successful, or -EINVAL if the requested preview window is
* impossible and cannot reasonably be adjusted.
diff --git a/drivers/media/platform/ti/omap3isp/isp.c b/drivers/media/platform/ti/omap3isp/isp.c
index d251736eb420..a6052df9bb19 100644
--- a/drivers/media/platform/ti/omap3isp/isp.c
+++ b/drivers/media/platform/ti/omap3isp/isp.c
@@ -1528,7 +1528,7 @@ void omap3isp_print_status(struct isp_device *isp)
* To solve this problem power management support is split into prepare/complete
* and suspend/resume operations. The pipelines are stopped in prepare() and the
* ISP clocks get disabled in suspend(). Similarly, the clocks are re-enabled in
- * resume(), and the the pipelines are restarted in complete().
+ * resume(), and the pipelines are restarted in complete().
*
* TODO: PM dependencies between the ISP and sensors are not modelled explicitly
* yet.
diff --git a/drivers/media/platform/ti/omap3isp/ispvideo.c b/drivers/media/platform/ti/omap3isp/ispvideo.c
index d7059180e80e..cc9a97d5d505 100644
--- a/drivers/media/platform/ti/omap3isp/ispvideo.c
+++ b/drivers/media/platform/ti/omap3isp/ispvideo.c
@@ -1071,7 +1071,7 @@ static int isp_video_check_external_subdevs(struct isp_video *video,
* processing might be possible but requires more testing.
*
* Stream start must be delayed until buffers are available at both the input
- * and output. The pipeline must be started in the videobuf queue callback with
+ * and output. The pipeline must be started in the vb2 queue callback with
* the buffers queue spinlock held. The modules subdev set stream operation must
* not sleep.
*/
diff --git a/drivers/media/platform/verisilicon/Kconfig b/drivers/media/platform/verisilicon/Kconfig
new file mode 100644
index 000000000000..e65b836b9d78
--- /dev/null
+++ b/drivers/media/platform/verisilicon/Kconfig
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+comment "Verisilicon media platform drivers"
+
+config VIDEO_HANTRO
+ tristate "Hantro VPU driver"
+ depends on ARCH_MXC || ARCH_ROCKCHIP || ARCH_AT91 || ARCH_SUNXI || COMPILE_TEST
+ depends on V4L_MEM2MEM_DRIVERS
+ depends on VIDEO_DEV
+ select MEDIA_CONTROLLER
+ select MEDIA_CONTROLLER_REQUEST_API
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_VMALLOC
+ select V4L2_MEM2MEM_DEV
+ select V4L2_H264
+ select V4L2_VP9
+ help
+ Support for the Hantro IP based Video Processing Units present on
+ Rockchip and NXP i.MX8M SoCs, which accelerate video and image
+ encoding and decoding.
+ To compile this driver as a module, choose M here: the module
+ will be called hantro-vpu.
+
+config VIDEO_HANTRO_IMX8M
+ bool "Hantro VPU i.MX8M support"
+ depends on VIDEO_HANTRO
+ depends on ARCH_MXC || COMPILE_TEST
+ default y
+ help
+ Enable support for i.MX8M SoCs.
+
+config VIDEO_HANTRO_SAMA5D4
+ bool "Hantro VDEC SAMA5D4 support"
+ depends on VIDEO_HANTRO
+ depends on ARCH_AT91 || COMPILE_TEST
+ default y
+ help
+ Enable support for Microchip SAMA5D4 SoCs.
+
+config VIDEO_HANTRO_ROCKCHIP
+ bool "Hantro VPU Rockchip support"
+ depends on VIDEO_HANTRO
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
+ default y
+ help
+ Enable support for RK3288, RK3328, and RK3399 SoCs.
+
+config VIDEO_HANTRO_SUNXI
+ bool "Hantro VPU Allwinner support"
+ depends on VIDEO_HANTRO
+ depends on ARCH_SUNXI || COMPILE_TEST
+ default y
+ help
+ Enable support for H6 SoC.
diff --git a/drivers/media/platform/verisilicon/Makefile b/drivers/media/platform/verisilicon/Makefile
new file mode 100644
index 000000000000..ebd5ede7bef7
--- /dev/null
+++ b/drivers/media/platform/verisilicon/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_VIDEO_HANTRO) += hantro-vpu.o
+
+hantro-vpu-y += \
+ hantro_drv.o \
+ hantro_v4l2.o \
+ hantro_postproc.o \
+ hantro_h1_jpeg_enc.o \
+ hantro_g1.o \
+ hantro_g1_h264_dec.o \
+ hantro_g1_mpeg2_dec.o \
+ hantro_g1_vp8_dec.o \
+ hantro_g2.o \
+ hantro_g2_hevc_dec.o \
+ hantro_g2_vp9_dec.o \
+ rockchip_vpu2_hw_jpeg_enc.o \
+ rockchip_vpu2_hw_h264_dec.o \
+ rockchip_vpu2_hw_mpeg2_dec.o \
+ rockchip_vpu2_hw_vp8_dec.o \
+ hantro_jpeg.o \
+ hantro_h264.o \
+ hantro_hevc.o \
+ hantro_mpeg2.o \
+ hantro_vp8.o \
+ hantro_vp9.o
+
+hantro-vpu-$(CONFIG_VIDEO_HANTRO_IMX8M) += \
+ imx8m_vpu_hw.o
+
+hantro-vpu-$(CONFIG_VIDEO_HANTRO_SAMA5D4) += \
+ sama5d4_vdec_hw.o
+
+hantro-vpu-$(CONFIG_VIDEO_HANTRO_ROCKCHIP) += \
+ rockchip_vpu_hw.o
+
+hantro-vpu-$(CONFIG_VIDEO_HANTRO_SUNXI) += \
+ sunxi_vpu_hw.o
diff --git a/drivers/media/platform/verisilicon/hantro.h b/drivers/media/platform/verisilicon/hantro.h
new file mode 100644
index 000000000000..2989ebc631cc
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro.h
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef HANTRO_H_
+#define HANTRO_H_
+
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+#include <linux/wait.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "hantro_hw.h"
+
+struct hantro_ctx;
+struct hantro_codec_ops;
+struct hantro_postproc_ops;
+
+#define HANTRO_JPEG_ENCODER BIT(0)
+#define HANTRO_ENCODERS 0x0000ffff
+#define HANTRO_MPEG2_DECODER BIT(16)
+#define HANTRO_VP8_DECODER BIT(17)
+#define HANTRO_H264_DECODER BIT(18)
+#define HANTRO_HEVC_DECODER BIT(19)
+#define HANTRO_VP9_DECODER BIT(20)
+#define HANTRO_DECODERS 0xffff0000
+
+/**
+ * struct hantro_irq - irq handler and name
+ *
+ * @name: irq name for device tree lookup
+ * @handler: interrupt handler
+ */
+struct hantro_irq {
+ const char *name;
+ irqreturn_t (*handler)(int irq, void *priv);
+};
+
+/**
+ * struct hantro_variant - information about VPU hardware variant
+ *
+ * @enc_offset: Offset from VPU base to encoder registers.
+ * @dec_offset: Offset from VPU base to decoder registers.
+ * @enc_fmts: Encoder formats.
+ * @num_enc_fmts: Number of encoder formats.
+ * @dec_fmts: Decoder formats.
+ * @num_dec_fmts: Number of decoder formats.
+ * @postproc_fmts: Post-processor formats.
+ * @num_postproc_fmts: Number of post-processor formats.
+ * @postproc_ops: Post-processor ops.
+ * @codec: Supported codecs
+ * @codec_ops: Codec ops.
+ * @init: Initialize hardware, optional.
+ * @runtime_resume: reenable hardware after power gating, optional.
+ * @irqs: array of irq names and interrupt handlers
+ * @num_irqs: number of irqs in the array
+ * @clk_names: array of clock names
+ * @num_clocks: number of clocks in the array
+ * @reg_names: array of register range names
+ * @num_regs: number of register range names in the array
+ * @double_buffer: core needs double buffering
+ * @legacy_regs: core uses legacy register set
+ * @late_postproc: postproc must be set up at the end of the job
+ */
+struct hantro_variant {
+ unsigned int enc_offset;
+ unsigned int dec_offset;
+ const struct hantro_fmt *enc_fmts;
+ unsigned int num_enc_fmts;
+ const struct hantro_fmt *dec_fmts;
+ unsigned int num_dec_fmts;
+ const struct hantro_fmt *postproc_fmts;
+ unsigned int num_postproc_fmts;
+ const struct hantro_postproc_ops *postproc_ops;
+ unsigned int codec;
+ const struct hantro_codec_ops *codec_ops;
+ int (*init)(struct hantro_dev *vpu);
+ int (*runtime_resume)(struct hantro_dev *vpu);
+ const struct hantro_irq *irqs;
+ int num_irqs;
+ const char * const *clk_names;
+ int num_clocks;
+ const char * const *reg_names;
+ int num_regs;
+ unsigned int double_buffer : 1;
+ unsigned int legacy_regs : 1;
+ unsigned int late_postproc : 1;
+};
+
+/**
+ * enum hantro_codec_mode - codec operating mode.
+ * @HANTRO_MODE_NONE: No operating mode. Used for RAW video formats.
+ * @HANTRO_MODE_JPEG_ENC: JPEG encoder.
+ * @HANTRO_MODE_H264_DEC: H264 decoder.
+ * @HANTRO_MODE_MPEG2_DEC: MPEG-2 decoder.
+ * @HANTRO_MODE_VP8_DEC: VP8 decoder.
+ * @HANTRO_MODE_HEVC_DEC: HEVC decoder.
+ * @HANTRO_MODE_VP9_DEC: VP9 decoder.
+ */
+enum hantro_codec_mode {
+ HANTRO_MODE_NONE = -1,
+ HANTRO_MODE_JPEG_ENC,
+ HANTRO_MODE_H264_DEC,
+ HANTRO_MODE_MPEG2_DEC,
+ HANTRO_MODE_VP8_DEC,
+ HANTRO_MODE_HEVC_DEC,
+ HANTRO_MODE_VP9_DEC,
+};
+
+/*
+ * struct hantro_ctrl - helper type to declare supported controls
+ * @codec: codec id this control belong to (HANTRO_JPEG_ENCODER, etc.)
+ * @cfg: control configuration
+ */
+struct hantro_ctrl {
+ unsigned int codec;
+ struct v4l2_ctrl_config cfg;
+};
+
+/*
+ * struct hantro_func - Hantro VPU functionality
+ *
+ * @id: processing functionality ID (can be
+ * %MEDIA_ENT_F_PROC_VIDEO_ENCODER or
+ * %MEDIA_ENT_F_PROC_VIDEO_DECODER)
+ * @vdev: &struct video_device that exposes the encoder or
+ * decoder functionality
+ * @source_pad: &struct media_pad with the source pad.
+ * @sink: &struct media_entity pointer with the sink entity
+ * @sink_pad: &struct media_pad with the sink pad.
+ * @proc: &struct media_entity pointer with the M2M device itself.
+ * @proc_pads: &struct media_pad with the @proc pads.
+ * @intf_devnode: &struct media_intf devnode pointer with the interface
+ * with controls the M2M device.
+ *
+ * Contains everything needed to attach the video device to the media device.
+ */
+struct hantro_func {
+ unsigned int id;
+ struct video_device vdev;
+ struct media_pad source_pad;
+ struct media_entity sink;
+ struct media_pad sink_pad;
+ struct media_entity proc;
+ struct media_pad proc_pads[2];
+ struct media_intf_devnode *intf_devnode;
+};
+
+static inline struct hantro_func *
+hantro_vdev_to_func(struct video_device *vdev)
+{
+ return container_of(vdev, struct hantro_func, vdev);
+}
+
+/**
+ * struct hantro_dev - driver data
+ * @v4l2_dev: V4L2 device to register video devices for.
+ * @m2m_dev: mem2mem device associated to this device.
+ * @mdev: media device associated to this device.
+ * @encoder: encoder functionality.
+ * @decoder: decoder functionality.
+ * @pdev: Pointer to VPU platform device.
+ * @dev: Pointer to device for convenient logging using
+ * dev_ macros.
+ * @clocks: Array of clock handles.
+ * @resets: Array of reset handles.
+ * @reg_bases: Mapped addresses of VPU registers.
+ * @enc_base: Mapped address of VPU encoder register for convenience.
+ * @dec_base: Mapped address of VPU decoder register for convenience.
+ * @ctrl_base: Mapped address of VPU control block.
+ * @vpu_mutex: Mutex to synchronize V4L2 calls.
+ * @irqlock: Spinlock to synchronize access to data structures
+ * shared with interrupt handlers.
+ * @variant: Hardware variant-specific parameters.
+ * @watchdog_work: Delayed work for hardware timeout handling.
+ */
+struct hantro_dev {
+ struct v4l2_device v4l2_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct media_device mdev;
+ struct hantro_func *encoder;
+ struct hantro_func *decoder;
+ struct platform_device *pdev;
+ struct device *dev;
+ struct clk_bulk_data *clocks;
+ struct reset_control *resets;
+ void __iomem **reg_bases;
+ void __iomem *enc_base;
+ void __iomem *dec_base;
+ void __iomem *ctrl_base;
+
+ struct mutex vpu_mutex; /* video_device lock */
+ spinlock_t irqlock;
+ const struct hantro_variant *variant;
+ struct delayed_work watchdog_work;
+};
+
+/**
+ * struct hantro_ctx - Context (instance) private data.
+ *
+ * @dev: VPU driver data to which the context belongs.
+ * @fh: V4L2 file handler.
+ * @is_encoder: Decoder or encoder context?
+ *
+ * @sequence_cap: Sequence counter for capture queue
+ * @sequence_out: Sequence counter for output queue
+ *
+ * @vpu_src_fmt: Descriptor of active source format.
+ * @src_fmt: V4L2 pixel format of active source format.
+ * @vpu_dst_fmt: Descriptor of active destination format.
+ * @dst_fmt: V4L2 pixel format of active destination format.
+ *
+ * @ctrl_handler: Control handler used to register controls.
+ * @jpeg_quality: User-specified JPEG compression quality.
+ * @bit_depth: Bit depth of current frame
+ *
+ * @codec_ops: Set of operations related to codec mode.
+ * @postproc: Post-processing context.
+ * @h264_dec: H.264-decoding context.
+ * @jpeg_enc: JPEG-encoding context.
+ * @mpeg2_dec: MPEG-2-decoding context.
+ * @vp8_dec: VP8-decoding context.
+ * @hevc_dec: HEVC-decoding context.
+ * @vp9_dec: VP9-decoding context.
+ */
+struct hantro_ctx {
+ struct hantro_dev *dev;
+ struct v4l2_fh fh;
+ bool is_encoder;
+
+ u32 sequence_cap;
+ u32 sequence_out;
+
+ const struct hantro_fmt *vpu_src_fmt;
+ struct v4l2_pix_format_mplane src_fmt;
+ const struct hantro_fmt *vpu_dst_fmt;
+ struct v4l2_pix_format_mplane dst_fmt;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ int jpeg_quality;
+ int bit_depth;
+
+ const struct hantro_codec_ops *codec_ops;
+ struct hantro_postproc_ctx postproc;
+
+ /* Specific for particular codec modes. */
+ union {
+ struct hantro_h264_dec_hw_ctx h264_dec;
+ struct hantro_mpeg2_dec_hw_ctx mpeg2_dec;
+ struct hantro_vp8_dec_hw_ctx vp8_dec;
+ struct hantro_hevc_dec_hw_ctx hevc_dec;
+ struct hantro_vp9_dec_hw_ctx vp9_dec;
+ };
+};
+
+/**
+ * struct hantro_fmt - information about supported video formats.
+ * @name: Human readable name of the format.
+ * @fourcc: FourCC code of the format. See V4L2_PIX_FMT_*.
+ * @codec_mode: Codec mode related to this format. See
+ * enum hantro_codec_mode.
+ * @header_size: Optional header size. Currently used by JPEG encoder.
+ * @max_depth: Maximum depth, for bitstream formats
+ * @enc_fmt: Format identifier for encoder registers.
+ * @frmsize: Supported range of frame sizes (only for bitstream formats).
+ * @postprocessed: Indicates if this format needs the post-processor.
+ * @match_depth: Indicates if format bit depth must match video bit depth
+ */
+struct hantro_fmt {
+ char *name;
+ u32 fourcc;
+ enum hantro_codec_mode codec_mode;
+ int header_size;
+ int max_depth;
+ enum hantro_enc_fmt enc_fmt;
+ struct v4l2_frmsize_stepwise frmsize;
+ bool postprocessed;
+ bool match_depth;
+};
+
+struct hantro_reg {
+ u32 base;
+ u32 shift;
+ u32 mask;
+};
+
+struct hantro_postproc_regs {
+ struct hantro_reg pipeline_en;
+ struct hantro_reg max_burst;
+ struct hantro_reg clk_gate;
+ struct hantro_reg out_swap32;
+ struct hantro_reg out_endian;
+ struct hantro_reg out_luma_base;
+ struct hantro_reg input_width;
+ struct hantro_reg input_height;
+ struct hantro_reg output_width;
+ struct hantro_reg output_height;
+ struct hantro_reg input_fmt;
+ struct hantro_reg output_fmt;
+ struct hantro_reg orig_width;
+ struct hantro_reg display_width;
+};
+
+struct hantro_vp9_decoded_buffer_info {
+ /* Info needed when the decoded frame serves as a reference frame. */
+ unsigned short width;
+ unsigned short height;
+ u32 bit_depth : 4;
+};
+
+struct hantro_decoded_buffer {
+ /* Must be the first field in this struct. */
+ struct v4l2_m2m_buffer base;
+
+ union {
+ struct hantro_vp9_decoded_buffer_info vp9;
+ };
+};
+
+/* Logging helpers */
+
+/**
+ * DOC: hantro_debug: Module parameter to control level of debugging messages.
+ *
+ * Level of debugging messages can be controlled by bits of
+ * module parameter called "debug". Meaning of particular
+ * bits is as follows:
+ *
+ * bit 0 - global information: mode, size, init, release
+ * bit 1 - each run start/result information
+ * bit 2 - contents of small controls from userspace
+ * bit 3 - contents of big controls from userspace
+ * bit 4 - detail fmt, ctrl, buffer q/dq information
+ * bit 5 - detail function enter/leave trace information
+ * bit 6 - register write/read information
+ */
+extern int hantro_debug;
+
+#define vpu_debug(level, fmt, args...) \
+ do { \
+ if (hantro_debug & BIT(level)) \
+ pr_info("%s:%d: " fmt, \
+ __func__, __LINE__, ##args); \
+ } while (0)
+
+#define vpu_err(fmt, args...) \
+ pr_err("%s:%d: " fmt, __func__, __LINE__, ##args)
+
+/* Structure access helpers. */
+static inline struct hantro_ctx *fh_to_ctx(struct v4l2_fh *fh)
+{
+ return container_of(fh, struct hantro_ctx, fh);
+}
+
+/* Register accessors. */
+static inline void vepu_write_relaxed(struct hantro_dev *vpu,
+ u32 val, u32 reg)
+{
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ writel_relaxed(val, vpu->enc_base + reg);
+}
+
+static inline void vepu_write(struct hantro_dev *vpu, u32 val, u32 reg)
+{
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ writel(val, vpu->enc_base + reg);
+}
+
+static inline u32 vepu_read(struct hantro_dev *vpu, u32 reg)
+{
+ u32 val = readl(vpu->enc_base + reg);
+
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ return val;
+}
+
+static inline void vdpu_write_relaxed(struct hantro_dev *vpu,
+ u32 val, u32 reg)
+{
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ writel_relaxed(val, vpu->dec_base + reg);
+}
+
+static inline void vdpu_write(struct hantro_dev *vpu, u32 val, u32 reg)
+{
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ writel(val, vpu->dec_base + reg);
+}
+
+static inline void hantro_write_addr(struct hantro_dev *vpu,
+ unsigned long offset,
+ dma_addr_t addr)
+{
+ vdpu_write(vpu, addr & 0xffffffff, offset);
+}
+
+static inline u32 vdpu_read(struct hantro_dev *vpu, u32 reg)
+{
+ u32 val = readl(vpu->dec_base + reg);
+
+ vpu_debug(6, "0x%04x = 0x%08x\n", reg / 4, val);
+ return val;
+}
+
+static inline u32 vdpu_read_mask(struct hantro_dev *vpu,
+ const struct hantro_reg *reg,
+ u32 val)
+{
+ u32 v;
+
+ v = vdpu_read(vpu, reg->base);
+ v &= ~(reg->mask << reg->shift);
+ v |= ((val & reg->mask) << reg->shift);
+ return v;
+}
+
+static inline void hantro_reg_write(struct hantro_dev *vpu,
+ const struct hantro_reg *reg,
+ u32 val)
+{
+ vdpu_write_relaxed(vpu, vdpu_read_mask(vpu, reg, val), reg->base);
+}
+
+static inline void hantro_reg_write_s(struct hantro_dev *vpu,
+ const struct hantro_reg *reg,
+ u32 val)
+{
+ vdpu_write(vpu, vdpu_read_mask(vpu, reg, val), reg->base);
+}
+
+void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id);
+dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts);
+
+static inline struct vb2_v4l2_buffer *
+hantro_get_src_buf(struct hantro_ctx *ctx)
+{
+ return v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+}
+
+static inline struct vb2_v4l2_buffer *
+hantro_get_dst_buf(struct hantro_ctx *ctx)
+{
+ return v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+}
+
+bool hantro_needs_postproc(const struct hantro_ctx *ctx,
+ const struct hantro_fmt *fmt);
+
+static inline dma_addr_t
+hantro_get_dec_buf_addr(struct hantro_ctx *ctx, struct vb2_buffer *vb)
+{
+ if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
+ return ctx->postproc.dec_q[vb->index].dma;
+ return vb2_dma_contig_plane_dma_addr(vb, 0);
+}
+
+static inline struct hantro_decoded_buffer *
+vb2_to_hantro_decoded_buf(struct vb2_buffer *buf)
+{
+ return container_of(buf, struct hantro_decoded_buffer, base.vb.vb2_buf);
+}
+
+void hantro_postproc_disable(struct hantro_ctx *ctx);
+void hantro_postproc_enable(struct hantro_ctx *ctx);
+void hantro_postproc_free(struct hantro_ctx *ctx);
+int hantro_postproc_alloc(struct hantro_ctx *ctx);
+int hanto_postproc_enum_framesizes(struct hantro_ctx *ctx,
+ struct v4l2_frmsizeenum *fsize);
+
+#endif /* HANTRO_H_ */
diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c
new file mode 100644
index 000000000000..2036f72eeb4a
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_drv.c
@@ -0,0 +1,1146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Collabora, Ltd.
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-vmalloc.h>
+
+#include "hantro_v4l2.h"
+#include "hantro.h"
+#include "hantro_hw.h"
+
+#define DRIVER_NAME "hantro-vpu"
+
+int hantro_debug;
+module_param_named(debug, hantro_debug, int, 0644);
+MODULE_PARM_DESC(debug,
+ "Debug level - higher value produces more verbose messages");
+
+void *hantro_get_ctrl(struct hantro_ctx *ctx, u32 id)
+{
+ struct v4l2_ctrl *ctrl;
+
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_handler, id);
+ return ctrl ? ctrl->p_cur.p : NULL;
+}
+
+dma_addr_t hantro_get_ref(struct hantro_ctx *ctx, u64 ts)
+{
+ struct vb2_queue *q = v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx);
+ struct vb2_buffer *buf;
+
+ buf = vb2_find_buffer(q, ts);
+ if (!buf)
+ return 0;
+ return hantro_get_dec_buf_addr(ctx, buf);
+}
+
+static const struct v4l2_event hantro_eos_event = {
+ .type = V4L2_EVENT_EOS
+};
+
+static void hantro_job_finish_no_pm(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ enum vb2_buffer_state result)
+{
+ struct vb2_v4l2_buffer *src, *dst;
+
+ src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+
+ if (WARN_ON(!src))
+ return;
+ if (WARN_ON(!dst))
+ return;
+
+ src->sequence = ctx->sequence_out++;
+ dst->sequence = ctx->sequence_cap++;
+
+ if (v4l2_m2m_is_last_draining_src_buf(ctx->fh.m2m_ctx, src)) {
+ dst->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_event_queue_fh(&ctx->fh, &hantro_eos_event);
+ v4l2_m2m_mark_stopped(ctx->fh.m2m_ctx);
+ }
+
+ v4l2_m2m_buf_done_and_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx,
+ result);
+}
+
+static void hantro_job_finish(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ enum vb2_buffer_state result)
+{
+ pm_runtime_mark_last_busy(vpu->dev);
+ pm_runtime_put_autosuspend(vpu->dev);
+
+ clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks);
+
+ hantro_job_finish_no_pm(vpu, ctx, result);
+}
+
+void hantro_irq_done(struct hantro_dev *vpu,
+ enum vb2_buffer_state result)
+{
+ struct hantro_ctx *ctx =
+ v4l2_m2m_get_curr_priv(vpu->m2m_dev);
+
+ /*
+ * If cancel_delayed_work returns false
+ * the timeout expired. The watchdog is running,
+ * and will take care of finishing the job.
+ */
+ if (cancel_delayed_work(&vpu->watchdog_work)) {
+ if (result == VB2_BUF_STATE_DONE && ctx->codec_ops->done)
+ ctx->codec_ops->done(ctx);
+ hantro_job_finish(vpu, ctx, result);
+ }
+}
+
+void hantro_watchdog(struct work_struct *work)
+{
+ struct hantro_dev *vpu;
+ struct hantro_ctx *ctx;
+
+ vpu = container_of(to_delayed_work(work),
+ struct hantro_dev, watchdog_work);
+ ctx = v4l2_m2m_get_curr_priv(vpu->m2m_dev);
+ if (ctx) {
+ vpu_err("frame processing timed out!\n");
+ ctx->codec_ops->reset(ctx);
+ hantro_job_finish(vpu, ctx, VB2_BUF_STATE_ERROR);
+ }
+}
+
+void hantro_start_prepare_run(struct hantro_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *src_buf;
+
+ src_buf = hantro_get_src_buf(ctx);
+ v4l2_ctrl_request_setup(src_buf->vb2_buf.req_obj.req,
+ &ctx->ctrl_handler);
+
+ if (!ctx->is_encoder && !ctx->dev->variant->late_postproc) {
+ if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
+ hantro_postproc_enable(ctx);
+ else
+ hantro_postproc_disable(ctx);
+ }
+}
+
+void hantro_end_prepare_run(struct hantro_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *src_buf;
+
+ if (!ctx->is_encoder && ctx->dev->variant->late_postproc) {
+ if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt))
+ hantro_postproc_enable(ctx);
+ else
+ hantro_postproc_disable(ctx);
+ }
+
+ src_buf = hantro_get_src_buf(ctx);
+ v4l2_ctrl_request_complete(src_buf->vb2_buf.req_obj.req,
+ &ctx->ctrl_handler);
+
+ /* Kick the watchdog. */
+ schedule_delayed_work(&ctx->dev->watchdog_work,
+ msecs_to_jiffies(2000));
+}
+
+static void device_run(void *priv)
+{
+ struct hantro_ctx *ctx = priv;
+ struct vb2_v4l2_buffer *src, *dst;
+ int ret;
+
+ src = hantro_get_src_buf(ctx);
+ dst = hantro_get_dst_buf(ctx);
+
+ ret = pm_runtime_resume_and_get(ctx->dev->dev);
+ if (ret < 0)
+ goto err_cancel_job;
+
+ ret = clk_bulk_enable(ctx->dev->variant->num_clocks, ctx->dev->clocks);
+ if (ret)
+ goto err_cancel_job;
+
+ v4l2_m2m_buf_copy_metadata(src, dst, true);
+
+ if (ctx->codec_ops->run(ctx))
+ goto err_cancel_job;
+
+ return;
+
+err_cancel_job:
+ hantro_job_finish_no_pm(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
+}
+
+static const struct v4l2_m2m_ops vpu_m2m_ops = {
+ .device_run = device_run,
+};
+
+static int
+queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
+{
+ struct hantro_ctx *ctx = priv;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->drv_priv = ctx;
+ src_vq->ops = &hantro_queue_ops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+
+ /*
+ * Driver does mostly sequential access, so sacrifice TLB efficiency
+ * for faster allocation. Also, no CPU access on the source queue,
+ * so no kernel mapping needed.
+ */
+ src_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES |
+ DMA_ATTR_NO_KERNEL_MAPPING;
+ src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &ctx->dev->vpu_mutex;
+ src_vq->dev = ctx->dev->v4l2_dev.dev;
+ src_vq->supports_requests = true;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->bidirectional = true;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES;
+ /*
+ * The Kernel needs access to the JPEG destination buffer for the
+ * JPEG encoder to fill in the JPEG headers.
+ */
+ if (!ctx->is_encoder)
+ dst_vq->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ dst_vq->drv_priv = ctx;
+ dst_vq->ops = &hantro_queue_ops;
+ dst_vq->buf_struct_size = sizeof(struct hantro_decoded_buffer);
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &ctx->dev->vpu_mutex;
+ dst_vq->dev = ctx->dev->v4l2_dev.dev;
+
+ return vb2_queue_init(dst_vq);
+}
+
+static int hantro_try_ctrl(struct v4l2_ctrl *ctrl)
+{
+ if (ctrl->id == V4L2_CID_STATELESS_H264_SPS) {
+ const struct v4l2_ctrl_h264_sps *sps = ctrl->p_new.p_h264_sps;
+
+ if (sps->chroma_format_idc > 1)
+ /* Only 4:0:0 and 4:2:0 are supported */
+ return -EINVAL;
+ if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
+ /* Luma and chroma bit depth mismatch */
+ return -EINVAL;
+ if (sps->bit_depth_luma_minus8 != 0)
+ /* Only 8-bit is supported */
+ return -EINVAL;
+ } else if (ctrl->id == V4L2_CID_STATELESS_HEVC_SPS) {
+ const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps;
+
+ if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
+ /* Luma and chroma bit depth mismatch */
+ return -EINVAL;
+ if (sps->bit_depth_luma_minus8 != 0)
+ /* Only 8-bit is supported */
+ return -EINVAL;
+ } else if (ctrl->id == V4L2_CID_STATELESS_VP9_FRAME) {
+ const struct v4l2_ctrl_vp9_frame *dec_params = ctrl->p_new.p_vp9_frame;
+
+ /* We only support profile 0 */
+ if (dec_params->profile != 0)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int hantro_jpeg_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hantro_ctx *ctx;
+
+ ctx = container_of(ctrl->handler,
+ struct hantro_ctx, ctrl_handler);
+
+ vpu_debug(1, "s_ctrl: id = %d, val = %d\n", ctrl->id, ctrl->val);
+
+ switch (ctrl->id) {
+ case V4L2_CID_JPEG_COMPRESSION_QUALITY:
+ ctx->jpeg_quality = ctrl->val;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hantro_vp9_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hantro_ctx *ctx;
+
+ ctx = container_of(ctrl->handler,
+ struct hantro_ctx, ctrl_handler);
+
+ switch (ctrl->id) {
+ case V4L2_CID_STATELESS_VP9_FRAME:
+ ctx->bit_depth = ctrl->p_new.p_vp9_frame->bit_depth;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops hantro_ctrl_ops = {
+ .try_ctrl = hantro_try_ctrl,
+};
+
+static const struct v4l2_ctrl_ops hantro_jpeg_ctrl_ops = {
+ .s_ctrl = hantro_jpeg_s_ctrl,
+};
+
+static const struct v4l2_ctrl_ops hantro_vp9_ctrl_ops = {
+ .s_ctrl = hantro_vp9_s_ctrl,
+};
+
+#define HANTRO_JPEG_ACTIVE_MARKERS (V4L2_JPEG_ACTIVE_MARKER_APP0 | \
+ V4L2_JPEG_ACTIVE_MARKER_COM | \
+ V4L2_JPEG_ACTIVE_MARKER_DQT | \
+ V4L2_JPEG_ACTIVE_MARKER_DHT)
+
+static const struct hantro_ctrl controls[] = {
+ {
+ .codec = HANTRO_JPEG_ENCODER,
+ .cfg = {
+ .id = V4L2_CID_JPEG_COMPRESSION_QUALITY,
+ .min = 5,
+ .max = 100,
+ .step = 1,
+ .def = 50,
+ .ops = &hantro_jpeg_ctrl_ops,
+ },
+ }, {
+ .codec = HANTRO_JPEG_ENCODER,
+ .cfg = {
+ .id = V4L2_CID_JPEG_ACTIVE_MARKER,
+ .max = HANTRO_JPEG_ACTIVE_MARKERS,
+ .def = HANTRO_JPEG_ACTIVE_MARKERS,
+ /*
+ * Changing the set of active markers/segments also
+ * messes up the alignment of the JPEG header, which
+ * is needed to allow the hardware to write directly
+ * to the output buffer. Implementing this introduces
+ * a lot of complexity for little gain, as the markers
+ * enabled is already the minimum required set.
+ */
+ .flags = V4L2_CTRL_FLAG_READ_ONLY,
+ },
+ }, {
+ .codec = HANTRO_MPEG2_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_MPEG2_SEQUENCE,
+ },
+ }, {
+ .codec = HANTRO_MPEG2_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_MPEG2_PICTURE,
+ },
+ }, {
+ .codec = HANTRO_MPEG2_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_MPEG2_QUANTISATION,
+ },
+ }, {
+ .codec = HANTRO_VP8_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_VP8_FRAME,
+ },
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
+ },
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_SPS,
+ .ops = &hantro_ctrl_ops,
+ },
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_PPS,
+ },
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_SCALING_MATRIX,
+ },
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_DECODE_MODE,
+ .min = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+ .def = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+ .max = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+ },
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_START_CODE,
+ .min = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+ .def = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+ .max = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+ },
+ }, {
+ .codec = HANTRO_H264_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ .min = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE,
+ .max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ .menu_skip_mask =
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
+ .def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
+ }
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_DECODE_MODE,
+ .min = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
+ .max = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
+ .def = V4L2_STATELESS_HEVC_DECODE_MODE_FRAME_BASED,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_START_CODE,
+ .min = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
+ .max = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
+ .def = V4L2_STATELESS_HEVC_START_CODE_ANNEX_B,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_PROFILE,
+ .min = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ .max = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10,
+ .def = V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_LEVEL,
+ .min = V4L2_MPEG_VIDEO_HEVC_LEVEL_1,
+ .max = V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_SPS,
+ .ops = &hantro_ctrl_ops,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_PPS,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_DECODE_PARAMS,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_HEVC_SCALING_MATRIX,
+ },
+ }, {
+ .codec = HANTRO_VP9_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_VP9_FRAME,
+ .ops = &hantro_vp9_ctrl_ops,
+ },
+ }, {
+ .codec = HANTRO_VP9_DECODER,
+ .cfg = {
+ .id = V4L2_CID_STATELESS_VP9_COMPRESSED_HDR,
+ },
+ },
+};
+
+static int hantro_ctrls_setup(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ int allowed_codecs)
+{
+ int i, num_ctrls = ARRAY_SIZE(controls);
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_handler, num_ctrls);
+
+ for (i = 0; i < num_ctrls; i++) {
+ if (!(allowed_codecs & controls[i].codec))
+ continue;
+
+ v4l2_ctrl_new_custom(&ctx->ctrl_handler,
+ &controls[i].cfg, NULL);
+ if (ctx->ctrl_handler.error) {
+ vpu_err("Adding control (%d) failed %d\n",
+ controls[i].cfg.id,
+ ctx->ctrl_handler.error);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ return ctx->ctrl_handler.error;
+ }
+ }
+ return v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+}
+
+/*
+ * V4L2 file operations.
+ */
+
+static int hantro_open(struct file *filp)
+{
+ struct hantro_dev *vpu = video_drvdata(filp);
+ struct video_device *vdev = video_devdata(filp);
+ struct hantro_func *func = hantro_vdev_to_func(vdev);
+ struct hantro_ctx *ctx;
+ int allowed_codecs, ret;
+
+ /*
+ * We do not need any extra locking here, because we operate only
+ * on local data here, except reading few fields from dev, which
+ * do not change through device's lifetime (which is guaranteed by
+ * reference on module from open()) and V4L2 internal objects (such
+ * as vdev and ctx->fh), which have proper locking done in respective
+ * helper functions used here.
+ */
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = vpu;
+ if (func->id == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
+ allowed_codecs = vpu->variant->codec & HANTRO_ENCODERS;
+ ctx->is_encoder = true;
+ } else if (func->id == MEDIA_ENT_F_PROC_VIDEO_DECODER) {
+ allowed_codecs = vpu->variant->codec & HANTRO_DECODERS;
+ ctx->is_encoder = false;
+ } else {
+ ret = -ENODEV;
+ goto err_ctx_free;
+ }
+
+ ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(vpu->m2m_dev, ctx, queue_init);
+ if (IS_ERR(ctx->fh.m2m_ctx)) {
+ ret = PTR_ERR(ctx->fh.m2m_ctx);
+ goto err_ctx_free;
+ }
+
+ v4l2_fh_init(&ctx->fh, vdev);
+ filp->private_data = &ctx->fh;
+ v4l2_fh_add(&ctx->fh);
+
+ hantro_reset_fmts(ctx);
+
+ ret = hantro_ctrls_setup(vpu, ctx, allowed_codecs);
+ if (ret) {
+ vpu_err("Failed to set up controls\n");
+ goto err_fh_free;
+ }
+ ctx->fh.ctrl_handler = &ctx->ctrl_handler;
+
+ return 0;
+
+err_fh_free:
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+err_ctx_free:
+ kfree(ctx);
+ return ret;
+}
+
+static int hantro_release(struct file *filp)
+{
+ struct hantro_ctx *ctx =
+ container_of(filp->private_data, struct hantro_ctx, fh);
+
+ /*
+ * No need for extra locking because this was the last reference
+ * to this file.
+ */
+ v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
+ v4l2_fh_del(&ctx->fh);
+ v4l2_fh_exit(&ctx->fh);
+ v4l2_ctrl_handler_free(&ctx->ctrl_handler);
+ kfree(ctx);
+
+ return 0;
+}
+
+static const struct v4l2_file_operations hantro_fops = {
+ .owner = THIS_MODULE,
+ .open = hantro_open,
+ .release = hantro_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static const struct of_device_id of_hantro_match[] = {
+#ifdef CONFIG_VIDEO_HANTRO_ROCKCHIP
+ { .compatible = "rockchip,px30-vpu", .data = &px30_vpu_variant, },
+ { .compatible = "rockchip,rk3036-vpu", .data = &rk3036_vpu_variant, },
+ { .compatible = "rockchip,rk3066-vpu", .data = &rk3066_vpu_variant, },
+ { .compatible = "rockchip,rk3288-vpu", .data = &rk3288_vpu_variant, },
+ { .compatible = "rockchip,rk3328-vpu", .data = &rk3328_vpu_variant, },
+ { .compatible = "rockchip,rk3399-vpu", .data = &rk3399_vpu_variant, },
+ { .compatible = "rockchip,rk3568-vepu", .data = &rk3568_vepu_variant, },
+ { .compatible = "rockchip,rk3568-vpu", .data = &rk3568_vpu_variant, },
+#endif
+#ifdef CONFIG_VIDEO_HANTRO_IMX8M
+ { .compatible = "nxp,imx8mm-vpu-g1", .data = &imx8mm_vpu_g1_variant, },
+ { .compatible = "nxp,imx8mq-vpu", .data = &imx8mq_vpu_variant, },
+ { .compatible = "nxp,imx8mq-vpu-g1", .data = &imx8mq_vpu_g1_variant },
+ { .compatible = "nxp,imx8mq-vpu-g2", .data = &imx8mq_vpu_g2_variant },
+#endif
+#ifdef CONFIG_VIDEO_HANTRO_SAMA5D4
+ { .compatible = "microchip,sama5d4-vdec", .data = &sama5d4_vdec_variant, },
+#endif
+#ifdef CONFIG_VIDEO_HANTRO_SUNXI
+ { .compatible = "allwinner,sun50i-h6-vpu-g2", .data = &sunxi_vpu_variant, },
+#endif
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, of_hantro_match);
+
+static int hantro_register_entity(struct media_device *mdev,
+ struct media_entity *entity,
+ const char *entity_name,
+ struct media_pad *pads, int num_pads,
+ int function, struct video_device *vdev)
+{
+ char *name;
+ int ret;
+
+ entity->obj_type = MEDIA_ENTITY_TYPE_BASE;
+ if (function == MEDIA_ENT_F_IO_V4L) {
+ entity->info.dev.major = VIDEO_MAJOR;
+ entity->info.dev.minor = vdev->minor;
+ }
+
+ name = devm_kasprintf(mdev->dev, GFP_KERNEL, "%s-%s", vdev->name,
+ entity_name);
+ if (!name)
+ return -ENOMEM;
+
+ entity->name = name;
+ entity->function = function;
+
+ ret = media_entity_pads_init(entity, num_pads, pads);
+ if (ret)
+ return ret;
+
+ ret = media_device_register_entity(mdev, entity);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hantro_attach_func(struct hantro_dev *vpu,
+ struct hantro_func *func)
+{
+ struct media_device *mdev = &vpu->mdev;
+ struct media_link *link;
+ int ret;
+
+ /* Create the three encoder entities with their pads */
+ func->source_pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = hantro_register_entity(mdev, &func->vdev.entity, "source",
+ &func->source_pad, 1, MEDIA_ENT_F_IO_V4L,
+ &func->vdev);
+ if (ret)
+ return ret;
+
+ func->proc_pads[0].flags = MEDIA_PAD_FL_SINK;
+ func->proc_pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ ret = hantro_register_entity(mdev, &func->proc, "proc",
+ func->proc_pads, 2, func->id,
+ &func->vdev);
+ if (ret)
+ goto err_rel_entity0;
+
+ func->sink_pad.flags = MEDIA_PAD_FL_SINK;
+ ret = hantro_register_entity(mdev, &func->sink, "sink",
+ &func->sink_pad, 1, MEDIA_ENT_F_IO_V4L,
+ &func->vdev);
+ if (ret)
+ goto err_rel_entity1;
+
+ /* Connect the three entities */
+ ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rel_entity2;
+
+ ret = media_create_pad_link(&func->proc, 1, &func->sink, 0,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ goto err_rm_links0;
+
+ /* Create video interface */
+ func->intf_devnode = media_devnode_create(mdev, MEDIA_INTF_T_V4L_VIDEO,
+ 0, VIDEO_MAJOR,
+ func->vdev.minor);
+ if (!func->intf_devnode) {
+ ret = -ENOMEM;
+ goto err_rm_links1;
+ }
+
+ /* Connect the two DMA engines to the interface */
+ link = media_create_intf_link(&func->vdev.entity,
+ &func->intf_devnode->intf,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (!link) {
+ ret = -ENOMEM;
+ goto err_rm_devnode;
+ }
+
+ link = media_create_intf_link(&func->sink, &func->intf_devnode->intf,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (!link) {
+ ret = -ENOMEM;
+ goto err_rm_devnode;
+ }
+ return 0;
+
+err_rm_devnode:
+ media_devnode_remove(func->intf_devnode);
+
+err_rm_links1:
+ media_entity_remove_links(&func->sink);
+
+err_rm_links0:
+ media_entity_remove_links(&func->proc);
+ media_entity_remove_links(&func->vdev.entity);
+
+err_rel_entity2:
+ media_device_unregister_entity(&func->sink);
+
+err_rel_entity1:
+ media_device_unregister_entity(&func->proc);
+
+err_rel_entity0:
+ media_device_unregister_entity(&func->vdev.entity);
+ return ret;
+}
+
+static void hantro_detach_func(struct hantro_func *func)
+{
+ media_devnode_remove(func->intf_devnode);
+ media_entity_remove_links(&func->sink);
+ media_entity_remove_links(&func->proc);
+ media_entity_remove_links(&func->vdev.entity);
+ media_device_unregister_entity(&func->sink);
+ media_device_unregister_entity(&func->proc);
+ media_device_unregister_entity(&func->vdev.entity);
+}
+
+static int hantro_add_func(struct hantro_dev *vpu, unsigned int funcid)
+{
+ const struct of_device_id *match;
+ struct hantro_func *func;
+ struct video_device *vfd;
+ int ret;
+
+ match = of_match_node(of_hantro_match, vpu->dev->of_node);
+ func = devm_kzalloc(vpu->dev, sizeof(*func), GFP_KERNEL);
+ if (!func) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to allocate video device\n");
+ return -ENOMEM;
+ }
+
+ func->id = funcid;
+
+ vfd = &func->vdev;
+ vfd->fops = &hantro_fops;
+ vfd->release = video_device_release_empty;
+ vfd->lock = &vpu->vpu_mutex;
+ vfd->v4l2_dev = &vpu->v4l2_dev;
+ vfd->vfl_dir = VFL_DIR_M2M;
+ vfd->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
+ vfd->ioctl_ops = &hantro_ioctl_ops;
+ snprintf(vfd->name, sizeof(vfd->name), "%s-%s", match->compatible,
+ funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER ? "enc" : "dec");
+
+ if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER) {
+ vpu->encoder = func;
+ } else {
+ vpu->decoder = func;
+ v4l2_disable_ioctl(vfd, VIDIOC_TRY_ENCODER_CMD);
+ v4l2_disable_ioctl(vfd, VIDIOC_ENCODER_CMD);
+ }
+
+ video_set_drvdata(vfd, vpu);
+
+ ret = video_register_device(vfd, VFL_TYPE_VIDEO, -1);
+ if (ret) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to register video device\n");
+ return ret;
+ }
+
+ ret = hantro_attach_func(vpu, func);
+ if (ret) {
+ v4l2_err(&vpu->v4l2_dev,
+ "Failed to attach functionality to the media device\n");
+ goto err_unreg_dev;
+ }
+
+ v4l2_info(&vpu->v4l2_dev, "registered %s as /dev/video%d\n", vfd->name,
+ vfd->num);
+
+ return 0;
+
+err_unreg_dev:
+ video_unregister_device(vfd);
+ return ret;
+}
+
+static int hantro_add_enc_func(struct hantro_dev *vpu)
+{
+ if (!vpu->variant->enc_fmts)
+ return 0;
+
+ return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
+}
+
+static int hantro_add_dec_func(struct hantro_dev *vpu)
+{
+ if (!vpu->variant->dec_fmts)
+ return 0;
+
+ return hantro_add_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
+}
+
+static void hantro_remove_func(struct hantro_dev *vpu,
+ unsigned int funcid)
+{
+ struct hantro_func *func;
+
+ if (funcid == MEDIA_ENT_F_PROC_VIDEO_ENCODER)
+ func = vpu->encoder;
+ else
+ func = vpu->decoder;
+
+ if (!func)
+ return;
+
+ hantro_detach_func(func);
+ video_unregister_device(&func->vdev);
+}
+
+static void hantro_remove_enc_func(struct hantro_dev *vpu)
+{
+ hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_ENCODER);
+}
+
+static void hantro_remove_dec_func(struct hantro_dev *vpu)
+{
+ hantro_remove_func(vpu, MEDIA_ENT_F_PROC_VIDEO_DECODER);
+}
+
+static const struct media_device_ops hantro_m2m_media_ops = {
+ .req_validate = vb2_request_validate,
+ .req_queue = v4l2_m2m_request_queue,
+};
+
+static int hantro_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct hantro_dev *vpu;
+ struct resource *res;
+ int num_bases;
+ int i, ret;
+
+ vpu = devm_kzalloc(&pdev->dev, sizeof(*vpu), GFP_KERNEL);
+ if (!vpu)
+ return -ENOMEM;
+
+ vpu->dev = &pdev->dev;
+ vpu->pdev = pdev;
+ mutex_init(&vpu->vpu_mutex);
+ spin_lock_init(&vpu->irqlock);
+
+ match = of_match_node(of_hantro_match, pdev->dev.of_node);
+ vpu->variant = match->data;
+
+ /*
+ * Support for nxp,imx8mq-vpu is kept for backwards compatibility
+ * but it's deprecated. Please update your DTS file to use
+ * nxp,imx8mq-vpu-g1 or nxp,imx8mq-vpu-g2 instead.
+ */
+ if (of_device_is_compatible(pdev->dev.of_node, "nxp,imx8mq-vpu"))
+ dev_warn(&pdev->dev, "%s compatible is deprecated\n",
+ match->compatible);
+
+ INIT_DELAYED_WORK(&vpu->watchdog_work, hantro_watchdog);
+
+ vpu->clocks = devm_kcalloc(&pdev->dev, vpu->variant->num_clocks,
+ sizeof(*vpu->clocks), GFP_KERNEL);
+ if (!vpu->clocks)
+ return -ENOMEM;
+
+ if (vpu->variant->num_clocks > 1) {
+ for (i = 0; i < vpu->variant->num_clocks; i++)
+ vpu->clocks[i].id = vpu->variant->clk_names[i];
+
+ ret = devm_clk_bulk_get(&pdev->dev, vpu->variant->num_clocks,
+ vpu->clocks);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * If the driver has a single clk, chances are there will be no
+ * actual name in the DT bindings.
+ */
+ vpu->clocks[0].clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(vpu->clocks[0].clk))
+ return PTR_ERR(vpu->clocks[0].clk);
+ }
+
+ vpu->resets = devm_reset_control_array_get(&pdev->dev, false, true);
+ if (IS_ERR(vpu->resets))
+ return PTR_ERR(vpu->resets);
+
+ num_bases = vpu->variant->num_regs ?: 1;
+ vpu->reg_bases = devm_kcalloc(&pdev->dev, num_bases,
+ sizeof(*vpu->reg_bases), GFP_KERNEL);
+ if (!vpu->reg_bases)
+ return -ENOMEM;
+
+ for (i = 0; i < num_bases; i++) {
+ res = vpu->variant->reg_names ?
+ platform_get_resource_byname(vpu->pdev, IORESOURCE_MEM,
+ vpu->variant->reg_names[i]) :
+ platform_get_resource(vpu->pdev, IORESOURCE_MEM, 0);
+ vpu->reg_bases[i] = devm_ioremap_resource(vpu->dev, res);
+ if (IS_ERR(vpu->reg_bases[i]))
+ return PTR_ERR(vpu->reg_bases[i]);
+ }
+ vpu->enc_base = vpu->reg_bases[0] + vpu->variant->enc_offset;
+ vpu->dec_base = vpu->reg_bases[0] + vpu->variant->dec_offset;
+
+ /**
+ * TODO: Eventually allow taking advantage of full 64-bit address space.
+ * Until then we assume the MSB portion of buffers' base addresses is
+ * always 0 due to this masking operation.
+ */
+ ret = dma_set_coherent_mask(vpu->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(vpu->dev, "Could not set DMA coherent mask.\n");
+ return ret;
+ }
+ vb2_dma_contig_set_max_seg_size(&pdev->dev, DMA_BIT_MASK(32));
+
+ for (i = 0; i < vpu->variant->num_irqs; i++) {
+ const char *irq_name;
+ int irq;
+
+ if (!vpu->variant->irqs[i].handler)
+ continue;
+
+ if (vpu->variant->num_irqs > 1) {
+ irq_name = vpu->variant->irqs[i].name;
+ irq = platform_get_irq_byname(vpu->pdev, irq_name);
+ } else {
+ /*
+ * If the driver has a single IRQ, chances are there
+ * will be no actual name in the DT bindings.
+ */
+ irq_name = "default";
+ irq = platform_get_irq(vpu->pdev, 0);
+ }
+ if (irq <= 0)
+ return -ENXIO;
+
+ ret = devm_request_irq(vpu->dev, irq,
+ vpu->variant->irqs[i].handler, 0,
+ dev_name(vpu->dev), vpu);
+ if (ret) {
+ dev_err(vpu->dev, "Could not request %s IRQ.\n",
+ irq_name);
+ return ret;
+ }
+ }
+
+ if (vpu->variant->init) {
+ ret = vpu->variant->init(vpu);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to init VPU hardware\n");
+ return ret;
+ }
+ }
+
+ pm_runtime_set_autosuspend_delay(vpu->dev, 100);
+ pm_runtime_use_autosuspend(vpu->dev);
+ pm_runtime_enable(vpu->dev);
+
+ ret = reset_control_deassert(vpu->resets);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to deassert resets\n");
+ goto err_pm_disable;
+ }
+
+ ret = clk_bulk_prepare(vpu->variant->num_clocks, vpu->clocks);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to prepare clocks\n");
+ goto err_rst_assert;
+ }
+
+ ret = v4l2_device_register(&pdev->dev, &vpu->v4l2_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register v4l2 device\n");
+ goto err_clk_unprepare;
+ }
+ platform_set_drvdata(pdev, vpu);
+
+ vpu->m2m_dev = v4l2_m2m_init(&vpu_m2m_ops);
+ if (IS_ERR(vpu->m2m_dev)) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to init mem2mem device\n");
+ ret = PTR_ERR(vpu->m2m_dev);
+ goto err_v4l2_unreg;
+ }
+
+ vpu->mdev.dev = vpu->dev;
+ strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
+ strscpy(vpu->mdev.bus_info, "platform: " DRIVER_NAME,
+ sizeof(vpu->mdev.bus_info));
+ media_device_init(&vpu->mdev);
+ vpu->mdev.ops = &hantro_m2m_media_ops;
+ vpu->v4l2_dev.mdev = &vpu->mdev;
+
+ ret = hantro_add_enc_func(vpu);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register encoder\n");
+ goto err_m2m_rel;
+ }
+
+ ret = hantro_add_dec_func(vpu);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register decoder\n");
+ goto err_rm_enc_func;
+ }
+
+ ret = media_device_register(&vpu->mdev);
+ if (ret) {
+ v4l2_err(&vpu->v4l2_dev, "Failed to register mem2mem media device\n");
+ goto err_rm_dec_func;
+ }
+
+ return 0;
+
+err_rm_dec_func:
+ hantro_remove_dec_func(vpu);
+err_rm_enc_func:
+ hantro_remove_enc_func(vpu);
+err_m2m_rel:
+ media_device_cleanup(&vpu->mdev);
+ v4l2_m2m_release(vpu->m2m_dev);
+err_v4l2_unreg:
+ v4l2_device_unregister(&vpu->v4l2_dev);
+err_clk_unprepare:
+ clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+err_rst_assert:
+ reset_control_assert(vpu->resets);
+err_pm_disable:
+ pm_runtime_dont_use_autosuspend(vpu->dev);
+ pm_runtime_disable(vpu->dev);
+ return ret;
+}
+
+static int hantro_remove(struct platform_device *pdev)
+{
+ struct hantro_dev *vpu = platform_get_drvdata(pdev);
+
+ v4l2_info(&vpu->v4l2_dev, "Removing %s\n", pdev->name);
+
+ media_device_unregister(&vpu->mdev);
+ hantro_remove_dec_func(vpu);
+ hantro_remove_enc_func(vpu);
+ media_device_cleanup(&vpu->mdev);
+ v4l2_m2m_release(vpu->m2m_dev);
+ v4l2_device_unregister(&vpu->v4l2_dev);
+ clk_bulk_unprepare(vpu->variant->num_clocks, vpu->clocks);
+ reset_control_assert(vpu->resets);
+ pm_runtime_dont_use_autosuspend(vpu->dev);
+ pm_runtime_disable(vpu->dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int hantro_runtime_resume(struct device *dev)
+{
+ struct hantro_dev *vpu = dev_get_drvdata(dev);
+
+ if (vpu->variant->runtime_resume)
+ return vpu->variant->runtime_resume(vpu);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops hantro_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(NULL, hantro_runtime_resume, NULL)
+};
+
+static struct platform_driver hantro_driver = {
+ .probe = hantro_probe,
+ .remove = hantro_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(of_hantro_match),
+ .pm = &hantro_pm_ops,
+ },
+};
+module_platform_driver(hantro_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Alpha Lin <Alpha.Lin@Rock-Chips.com>");
+MODULE_AUTHOR("Tomasz Figa <tfiga@chromium.org>");
+MODULE_AUTHOR("Ezequiel Garcia <ezequiel@collabora.com>");
+MODULE_DESCRIPTION("Hantro VPU codec driver");
diff --git a/drivers/media/platform/verisilicon/hantro_g1.c b/drivers/media/platform/verisilicon/hantro_g1.c
new file mode 100644
index 000000000000..0ab1cee62218
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_g1.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ * Copyright (C) 2019 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ * Copyright (C) 2021 Collabora Ltd, Emil Velikov <emil.velikov@collabora.com>
+ */
+
+#include "hantro.h"
+#include "hantro_g1_regs.h"
+
+irqreturn_t hantro_g1_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vdpu_read(vpu, G1_REG_INTERRUPT);
+ state = (status & G1_REG_INTERRUPT_DEC_RDY_INT) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vdpu_write(vpu, 0, G1_REG_INTERRUPT);
+ vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
+
+void hantro_g1_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vdpu_write(vpu, G1_REG_INTERRUPT_DEC_IRQ_DIS, G1_REG_INTERRUPT);
+ vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
+ vdpu_write(vpu, 1, G1_REG_SOFT_RESET);
+}
diff --git a/drivers/media/platform/verisilicon/hantro_g1_h264_dec.c b/drivers/media/platform/verisilicon/hantro_g1_h264_dec.c
new file mode 100644
index 000000000000..9de7f05eff2a
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_g1_h264_dec.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip RK3288 VPU codec driver
+ *
+ * Copyright (c) 2014 Rockchip Electronics Co., Ltd.
+ * Hertz Wong <hertz.wong@rock-chips.com>
+ * Herman Chen <herman.chen@rock-chips.com>
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#include <linux/types.h>
+#include <linux/sort.h>
+
+#include <media/v4l2-mem2mem.h>
+
+#include "hantro_g1_regs.h"
+#include "hantro_hw.h"
+#include "hantro_v4l2.h"
+
+static void set_params(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *src_buf)
+{
+ const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
+ const struct v4l2_ctrl_h264_decode_params *dec_param = ctrls->decode;
+ const struct v4l2_ctrl_h264_sps *sps = ctrls->sps;
+ const struct v4l2_ctrl_h264_pps *pps = ctrls->pps;
+ struct hantro_dev *vpu = ctx->dev;
+ u32 reg;
+
+ /* Decoder control register 0. */
+ reg = G1_REG_DEC_CTRL0_DEC_AXI_AUTO;
+ if (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)
+ reg |= G1_REG_DEC_CTRL0_SEQ_MBAFF_E;
+ if (sps->profile_idc > 66) {
+ reg |= G1_REG_DEC_CTRL0_PICORD_COUNT_E;
+ if (dec_param->nal_ref_idc)
+ reg |= G1_REG_DEC_CTRL0_WRITE_MVS_E;
+ }
+
+ if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY) &&
+ (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD ||
+ dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC))
+ reg |= G1_REG_DEC_CTRL0_PIC_INTERLACE_E;
+ if (dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC)
+ reg |= G1_REG_DEC_CTRL0_PIC_FIELDMODE_E;
+ if (!(dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD))
+ reg |= G1_REG_DEC_CTRL0_PIC_TOPFIELD_E;
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL0);
+
+ /* Decoder control register 1. */
+ reg = G1_REG_DEC_CTRL1_PIC_MB_WIDTH(MB_WIDTH(ctx->src_fmt.width)) |
+ G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->src_fmt.height)) |
+ G1_REG_DEC_CTRL1_REF_FRAMES(sps->max_num_ref_frames);
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL1);
+
+ /* Decoder control register 2. */
+ reg = G1_REG_DEC_CTRL2_CH_QP_OFFSET(pps->chroma_qp_index_offset) |
+ G1_REG_DEC_CTRL2_CH_QP_OFFSET2(pps->second_chroma_qp_index_offset);
+
+ if (pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT)
+ reg |= G1_REG_DEC_CTRL2_TYPE1_QUANT_E;
+ if (!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY))
+ reg |= G1_REG_DEC_CTRL2_FIELDPIC_FLAG_E;
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL2);
+
+ /* Decoder control register 3. */
+ reg = G1_REG_DEC_CTRL3_START_CODE_E |
+ G1_REG_DEC_CTRL3_INIT_QP(pps->pic_init_qp_minus26 + 26) |
+ G1_REG_DEC_CTRL3_STREAM_LEN(vb2_get_plane_payload(&src_buf->vb2_buf, 0));
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL3);
+
+ /* Decoder control register 4. */
+ reg = G1_REG_DEC_CTRL4_FRAMENUM_LEN(sps->log2_max_frame_num_minus4 + 4) |
+ G1_REG_DEC_CTRL4_FRAMENUM(dec_param->frame_num) |
+ G1_REG_DEC_CTRL4_WEIGHT_BIPR_IDC(pps->weighted_bipred_idc);
+ if (pps->flags & V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE)
+ reg |= G1_REG_DEC_CTRL4_CABAC_E;
+ if (sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE)
+ reg |= G1_REG_DEC_CTRL4_DIR_8X8_INFER_E;
+ if (sps->profile_idc >= 100 && sps->chroma_format_idc == 0)
+ reg |= G1_REG_DEC_CTRL4_BLACKWHITE_E;
+ if (pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED)
+ reg |= G1_REG_DEC_CTRL4_WEIGHT_PRED_E;
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL4);
+
+ /* Decoder control register 5. */
+ reg = G1_REG_DEC_CTRL5_REFPIC_MK_LEN(dec_param->dec_ref_pic_marking_bit_size) |
+ G1_REG_DEC_CTRL5_IDR_PIC_ID(dec_param->idr_pic_id);
+ if (pps->flags & V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED)
+ reg |= G1_REG_DEC_CTRL5_CONST_INTRA_E;
+ if (pps->flags & V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT)
+ reg |= G1_REG_DEC_CTRL5_FILT_CTRL_PRES;
+ if (pps->flags & V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT)
+ reg |= G1_REG_DEC_CTRL5_RDPIC_CNT_PRES;
+ if (pps->flags & V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE)
+ reg |= G1_REG_DEC_CTRL5_8X8TRANS_FLAG_E;
+ if (dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC)
+ reg |= G1_REG_DEC_CTRL5_IDR_PIC_E;
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL5);
+
+ /* Decoder control register 6. */
+ reg = G1_REG_DEC_CTRL6_PPS_ID(pps->pic_parameter_set_id) |
+ G1_REG_DEC_CTRL6_REFIDX0_ACTIVE(pps->num_ref_idx_l0_default_active_minus1 + 1) |
+ G1_REG_DEC_CTRL6_REFIDX1_ACTIVE(pps->num_ref_idx_l1_default_active_minus1 + 1) |
+ G1_REG_DEC_CTRL6_POC_LENGTH(dec_param->pic_order_cnt_bit_size);
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL6);
+
+ /* Error concealment register. */
+ vdpu_write_relaxed(vpu, 0, G1_REG_ERR_CONC);
+
+ /* Prediction filter tap register. */
+ vdpu_write_relaxed(vpu,
+ G1_REG_PRED_FLT_PRED_BC_TAP_0_0(1) |
+ G1_REG_PRED_FLT_PRED_BC_TAP_0_1(-5 & 0x3ff) |
+ G1_REG_PRED_FLT_PRED_BC_TAP_0_2(20),
+ G1_REG_PRED_FLT);
+
+ /* Reference picture buffer control register. */
+ vdpu_write_relaxed(vpu, 0, G1_REG_REF_BUF_CTRL);
+
+ /* Reference picture buffer control register 2. */
+ vdpu_write_relaxed(vpu, G1_REG_REF_BUF_CTRL2_APF_THRESHOLD(8),
+ G1_REG_REF_BUF_CTRL2);
+}
+
+static void set_ref(struct hantro_ctx *ctx)
+{
+ const struct v4l2_h264_reference *b0_reflist, *b1_reflist, *p_reflist;
+ struct hantro_dev *vpu = ctx->dev;
+ int reg_num;
+ u32 reg;
+ int i;
+
+ vdpu_write_relaxed(vpu, ctx->h264_dec.dpb_valid, G1_REG_VALID_REF);
+ vdpu_write_relaxed(vpu, ctx->h264_dec.dpb_longterm, G1_REG_LT_REF);
+
+ /*
+ * Set up reference frame picture numbers.
+ *
+ * Each G1_REG_REF_PIC(x) register contains numbers of two
+ * subsequential reference pictures.
+ */
+ for (i = 0; i < HANTRO_H264_DPB_SIZE; i += 2) {
+ reg = G1_REG_REF_PIC_REFER0_NBR(hantro_h264_get_ref_nbr(ctx, i)) |
+ G1_REG_REF_PIC_REFER1_NBR(hantro_h264_get_ref_nbr(ctx, i + 1));
+ vdpu_write_relaxed(vpu, reg, G1_REG_REF_PIC(i / 2));
+ }
+
+ b0_reflist = ctx->h264_dec.reflists.b0;
+ b1_reflist = ctx->h264_dec.reflists.b1;
+ p_reflist = ctx->h264_dec.reflists.p;
+
+ /*
+ * Each G1_REG_BD_REF_PIC(x) register contains three entries
+ * of each forward and backward picture list.
+ */
+ reg_num = 0;
+ for (i = 0; i < 15; i += 3) {
+ reg = G1_REG_BD_REF_PIC_BINIT_RLIST_F0(b0_reflist[i].index) |
+ G1_REG_BD_REF_PIC_BINIT_RLIST_F1(b0_reflist[i + 1].index) |
+ G1_REG_BD_REF_PIC_BINIT_RLIST_F2(b0_reflist[i + 2].index) |
+ G1_REG_BD_REF_PIC_BINIT_RLIST_B0(b1_reflist[i].index) |
+ G1_REG_BD_REF_PIC_BINIT_RLIST_B1(b1_reflist[i + 1].index) |
+ G1_REG_BD_REF_PIC_BINIT_RLIST_B2(b1_reflist[i + 2].index);
+ vdpu_write_relaxed(vpu, reg, G1_REG_BD_REF_PIC(reg_num++));
+ }
+
+ /*
+ * G1_REG_BD_P_REF_PIC register contains last entries (index 15)
+ * of forward and backward reference picture lists and first 4 entries
+ * of P forward picture list.
+ */
+ reg = G1_REG_BD_P_REF_PIC_BINIT_RLIST_F15(b0_reflist[15].index) |
+ G1_REG_BD_P_REF_PIC_BINIT_RLIST_B15(b1_reflist[15].index) |
+ G1_REG_BD_P_REF_PIC_PINIT_RLIST_F0(p_reflist[0].index) |
+ G1_REG_BD_P_REF_PIC_PINIT_RLIST_F1(p_reflist[1].index) |
+ G1_REG_BD_P_REF_PIC_PINIT_RLIST_F2(p_reflist[2].index) |
+ G1_REG_BD_P_REF_PIC_PINIT_RLIST_F3(p_reflist[3].index);
+ vdpu_write_relaxed(vpu, reg, G1_REG_BD_P_REF_PIC);
+
+ /*
+ * Each G1_REG_FWD_PIC(x) register contains six consecutive
+ * entries of P forward picture list, starting from index 4.
+ */
+ reg_num = 0;
+ for (i = 4; i < HANTRO_H264_DPB_SIZE; i += 6) {
+ reg = G1_REG_FWD_PIC_PINIT_RLIST_F0(p_reflist[i].index) |
+ G1_REG_FWD_PIC_PINIT_RLIST_F1(p_reflist[i + 1].index) |
+ G1_REG_FWD_PIC_PINIT_RLIST_F2(p_reflist[i + 2].index) |
+ G1_REG_FWD_PIC_PINIT_RLIST_F3(p_reflist[i + 3].index) |
+ G1_REG_FWD_PIC_PINIT_RLIST_F4(p_reflist[i + 4].index) |
+ G1_REG_FWD_PIC_PINIT_RLIST_F5(p_reflist[i + 5].index);
+ vdpu_write_relaxed(vpu, reg, G1_REG_FWD_PIC(reg_num++));
+ }
+
+ /* Set up addresses of DPB buffers. */
+ for (i = 0; i < HANTRO_H264_DPB_SIZE; i++) {
+ dma_addr_t dma_addr = hantro_h264_get_ref_buf(ctx, i);
+
+ vdpu_write_relaxed(vpu, dma_addr, G1_REG_ADDR_REF(i));
+ }
+}
+
+static void set_buffers(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *src_buf)
+{
+ const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
+ struct vb2_v4l2_buffer *dst_buf;
+ struct hantro_dev *vpu = ctx->dev;
+ dma_addr_t src_dma, dst_dma;
+ size_t offset = 0;
+
+ /* Source (stream) buffer. */
+ src_dma = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ vdpu_write_relaxed(vpu, src_dma, G1_REG_ADDR_STR);
+
+ /* Destination (decoded frame) buffer. */
+ dst_buf = hantro_get_dst_buf(ctx);
+ dst_dma = hantro_get_dec_buf_addr(ctx, &dst_buf->vb2_buf);
+ /* Adjust dma addr to start at second line for bottom field */
+ if (ctrls->decode->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)
+ offset = ALIGN(ctx->src_fmt.width, MB_DIM);
+ vdpu_write_relaxed(vpu, dst_dma + offset, G1_REG_ADDR_DST);
+
+ /* Higher profiles require DMV buffer appended to reference frames. */
+ if (ctrls->sps->profile_idc > 66 && ctrls->decode->nal_ref_idc) {
+ unsigned int bytes_per_mb = 384;
+
+ /* DMV buffer for monochrome start directly after Y-plane */
+ if (ctrls->sps->profile_idc >= 100 &&
+ ctrls->sps->chroma_format_idc == 0)
+ bytes_per_mb = 256;
+ offset = bytes_per_mb * MB_WIDTH(ctx->src_fmt.width) *
+ MB_HEIGHT(ctx->src_fmt.height);
+
+ /*
+ * DMV buffer is split in two for field encoded frames,
+ * adjust offset for bottom field
+ */
+ if (ctrls->decode->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)
+ offset += 32 * MB_WIDTH(ctx->src_fmt.width) *
+ MB_HEIGHT(ctx->src_fmt.height);
+ vdpu_write_relaxed(vpu, dst_dma + offset, G1_REG_ADDR_DIR_MV);
+ }
+
+ /* Auxiliary buffer prepared in hantro_g1_h264_dec_prepare_table(). */
+ vdpu_write_relaxed(vpu, ctx->h264_dec.priv.dma, G1_REG_ADDR_QTABLE);
+}
+
+int hantro_g1_h264_dec_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf;
+ int ret;
+
+ /* Prepare the H264 decoder context. */
+ ret = hantro_h264_dec_prepare_run(ctx);
+ if (ret)
+ return ret;
+
+ /* Configure hardware registers. */
+ src_buf = hantro_get_src_buf(ctx);
+ set_params(ctx, src_buf);
+ set_ref(ctx);
+ set_buffers(ctx, src_buf);
+
+ hantro_end_prepare_run(ctx);
+
+ /* Start decoding! */
+ vdpu_write_relaxed(vpu,
+ G1_REG_CONFIG_DEC_AXI_RD_ID(0xffu) |
+ G1_REG_CONFIG_DEC_TIMEOUT_E |
+ G1_REG_CONFIG_DEC_OUT_ENDIAN |
+ G1_REG_CONFIG_DEC_STRENDIAN_E |
+ G1_REG_CONFIG_DEC_MAX_BURST(16) |
+ G1_REG_CONFIG_DEC_OUTSWAP32_E |
+ G1_REG_CONFIG_DEC_INSWAP32_E |
+ G1_REG_CONFIG_DEC_STRSWAP32_E |
+ G1_REG_CONFIG_DEC_CLK_GATE_E,
+ G1_REG_CONFIG);
+ vdpu_write(vpu, G1_REG_INTERRUPT_DEC_E, G1_REG_INTERRUPT);
+
+ return 0;
+}
diff --git a/drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c b/drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c
new file mode 100644
index 000000000000..9aea331e1a3c
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_g1_mpeg2_dec.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/bitfield.h>
+#include <media/v4l2-mem2mem.h>
+#include "hantro.h"
+#include "hantro_hw.h"
+#include "hantro_g1_regs.h"
+
+#define G1_SWREG(nr) ((nr) * 4)
+
+#define G1_REG_RLC_VLC_BASE G1_SWREG(12)
+#define G1_REG_DEC_OUT_BASE G1_SWREG(13)
+#define G1_REG_REFER0_BASE G1_SWREG(14)
+#define G1_REG_REFER1_BASE G1_SWREG(15)
+#define G1_REG_REFER2_BASE G1_SWREG(16)
+#define G1_REG_REFER3_BASE G1_SWREG(17)
+#define G1_REG_QTABLE_BASE G1_SWREG(40)
+
+#define G1_REG_DEC_AXI_RD_ID(v) (((v) << 24) & GENMASK(31, 24))
+#define G1_REG_DEC_TIMEOUT_E(v) ((v) ? BIT(23) : 0)
+#define G1_REG_DEC_STRSWAP32_E(v) ((v) ? BIT(22) : 0)
+#define G1_REG_DEC_STRENDIAN_E(v) ((v) ? BIT(21) : 0)
+#define G1_REG_DEC_INSWAP32_E(v) ((v) ? BIT(20) : 0)
+#define G1_REG_DEC_OUTSWAP32_E(v) ((v) ? BIT(19) : 0)
+#define G1_REG_DEC_DATA_DISC_E(v) ((v) ? BIT(18) : 0)
+#define G1_REG_DEC_LATENCY(v) (((v) << 11) & GENMASK(16, 11))
+#define G1_REG_DEC_CLK_GATE_E(v) ((v) ? BIT(10) : 0)
+#define G1_REG_DEC_IN_ENDIAN(v) ((v) ? BIT(9) : 0)
+#define G1_REG_DEC_OUT_ENDIAN(v) ((v) ? BIT(8) : 0)
+#define G1_REG_DEC_ADV_PRE_DIS(v) ((v) ? BIT(6) : 0)
+#define G1_REG_DEC_SCMD_DIS(v) ((v) ? BIT(5) : 0)
+#define G1_REG_DEC_MAX_BURST(v) (((v) << 0) & GENMASK(4, 0))
+
+#define G1_REG_DEC_MODE(v) (((v) << 28) & GENMASK(31, 28))
+#define G1_REG_RLC_MODE_E(v) ((v) ? BIT(27) : 0)
+#define G1_REG_PIC_INTERLACE_E(v) ((v) ? BIT(23) : 0)
+#define G1_REG_PIC_FIELDMODE_E(v) ((v) ? BIT(22) : 0)
+#define G1_REG_PIC_B_E(v) ((v) ? BIT(21) : 0)
+#define G1_REG_PIC_INTER_E(v) ((v) ? BIT(20) : 0)
+#define G1_REG_PIC_TOPFIELD_E(v) ((v) ? BIT(19) : 0)
+#define G1_REG_FWD_INTERLACE_E(v) ((v) ? BIT(18) : 0)
+#define G1_REG_FILTERING_DIS(v) ((v) ? BIT(14) : 0)
+#define G1_REG_WRITE_MVS_E(v) ((v) ? BIT(12) : 0)
+#define G1_REG_DEC_AXI_WR_ID(v) (((v) << 0) & GENMASK(7, 0))
+
+#define G1_REG_PIC_MB_WIDTH(v) (((v) << 23) & GENMASK(31, 23))
+#define G1_REG_PIC_MB_HEIGHT_P(v) (((v) << 11) & GENMASK(18, 11))
+#define G1_REG_ALT_SCAN_E(v) ((v) ? BIT(6) : 0)
+#define G1_REG_TOPFIELDFIRST_E(v) ((v) ? BIT(5) : 0)
+
+#define G1_REG_STRM_START_BIT(v) (((v) << 26) & GENMASK(31, 26))
+#define G1_REG_QSCALE_TYPE(v) ((v) ? BIT(24) : 0)
+#define G1_REG_CON_MV_E(v) ((v) ? BIT(4) : 0)
+#define G1_REG_INTRA_DC_PREC(v) (((v) << 2) & GENMASK(3, 2))
+#define G1_REG_INTRA_VLC_TAB(v) ((v) ? BIT(1) : 0)
+#define G1_REG_FRAME_PRED_DCT(v) ((v) ? BIT(0) : 0)
+
+#define G1_REG_INIT_QP(v) (((v) << 25) & GENMASK(30, 25))
+#define G1_REG_STREAM_LEN(v) (((v) << 0) & GENMASK(23, 0))
+
+#define G1_REG_ALT_SCAN_FLAG_E(v) ((v) ? BIT(19) : 0)
+#define G1_REG_FCODE_FWD_HOR(v) (((v) << 15) & GENMASK(18, 15))
+#define G1_REG_FCODE_FWD_VER(v) (((v) << 11) & GENMASK(14, 11))
+#define G1_REG_FCODE_BWD_HOR(v) (((v) << 7) & GENMASK(10, 7))
+#define G1_REG_FCODE_BWD_VER(v) (((v) << 3) & GENMASK(6, 3))
+#define G1_REG_MV_ACCURACY_FWD(v) ((v) ? BIT(2) : 0)
+#define G1_REG_MV_ACCURACY_BWD(v) ((v) ? BIT(1) : 0)
+
+#define G1_REG_STARTMB_X(v) (((v) << 23) & GENMASK(31, 23))
+#define G1_REG_STARTMB_Y(v) (((v) << 15) & GENMASK(22, 15))
+
+#define G1_REG_APF_THRESHOLD(v) (((v) << 0) & GENMASK(13, 0))
+
+static void
+hantro_g1_mpeg2_dec_set_quantisation(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx)
+{
+ struct v4l2_ctrl_mpeg2_quantisation *q;
+
+ q = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_MPEG2_QUANTISATION);
+ hantro_mpeg2_dec_copy_qtable(ctx->mpeg2_dec.qtable.cpu, q);
+ vdpu_write_relaxed(vpu, ctx->mpeg2_dec.qtable.dma, G1_REG_QTABLE_BASE);
+}
+
+static void
+hantro_g1_mpeg2_dec_set_buffers(struct hantro_dev *vpu, struct hantro_ctx *ctx,
+ struct vb2_buffer *src_buf,
+ struct vb2_buffer *dst_buf,
+ const struct v4l2_ctrl_mpeg2_sequence *seq,
+ const struct v4l2_ctrl_mpeg2_picture *pic)
+{
+ dma_addr_t forward_addr = 0, backward_addr = 0;
+ dma_addr_t current_addr, addr;
+
+ switch (pic->picture_coding_type) {
+ case V4L2_MPEG2_PIC_CODING_TYPE_B:
+ backward_addr = hantro_get_ref(ctx, pic->backward_ref_ts);
+ fallthrough;
+ case V4L2_MPEG2_PIC_CODING_TYPE_P:
+ forward_addr = hantro_get_ref(ctx, pic->forward_ref_ts);
+ }
+
+ /* Source bitstream buffer */
+ addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ vdpu_write_relaxed(vpu, addr, G1_REG_RLC_VLC_BASE);
+
+ /* Destination frame buffer */
+ addr = hantro_get_dec_buf_addr(ctx, dst_buf);
+ current_addr = addr;
+
+ if (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD)
+ addr += ALIGN(ctx->dst_fmt.width, 16);
+ vdpu_write_relaxed(vpu, addr, G1_REG_DEC_OUT_BASE);
+
+ if (!forward_addr)
+ forward_addr = current_addr;
+ if (!backward_addr)
+ backward_addr = current_addr;
+
+ /* Set forward ref frame (top/bottom field) */
+ if (pic->picture_structure == V4L2_MPEG2_PIC_FRAME ||
+ pic->picture_coding_type == V4L2_MPEG2_PIC_CODING_TYPE_B ||
+ (pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD &&
+ pic->flags & V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST) ||
+ (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD &&
+ !(pic->flags & V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST))) {
+ vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER1_BASE);
+ } else if (pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD) {
+ vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, current_addr, G1_REG_REFER1_BASE);
+ } else if (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD) {
+ vdpu_write_relaxed(vpu, current_addr, G1_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, forward_addr, G1_REG_REFER1_BASE);
+ }
+
+ /* Set backward ref frame (top/bottom field) */
+ vdpu_write_relaxed(vpu, backward_addr, G1_REG_REFER2_BASE);
+ vdpu_write_relaxed(vpu, backward_addr, G1_REG_REFER3_BASE);
+}
+
+int hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ const struct v4l2_ctrl_mpeg2_sequence *seq;
+ const struct v4l2_ctrl_mpeg2_picture *pic;
+ u32 reg;
+
+ src_buf = hantro_get_src_buf(ctx);
+ dst_buf = hantro_get_dst_buf(ctx);
+
+ /* Apply request controls if any */
+ hantro_start_prepare_run(ctx);
+
+ seq = hantro_get_ctrl(ctx,
+ V4L2_CID_STATELESS_MPEG2_SEQUENCE);
+ pic = hantro_get_ctrl(ctx,
+ V4L2_CID_STATELESS_MPEG2_PICTURE);
+
+ reg = G1_REG_DEC_AXI_RD_ID(0) |
+ G1_REG_DEC_TIMEOUT_E(1) |
+ G1_REG_DEC_STRSWAP32_E(1) |
+ G1_REG_DEC_STRENDIAN_E(1) |
+ G1_REG_DEC_INSWAP32_E(1) |
+ G1_REG_DEC_OUTSWAP32_E(1) |
+ G1_REG_DEC_DATA_DISC_E(0) |
+ G1_REG_DEC_LATENCY(0) |
+ G1_REG_DEC_CLK_GATE_E(1) |
+ G1_REG_DEC_IN_ENDIAN(1) |
+ G1_REG_DEC_OUT_ENDIAN(1) |
+ G1_REG_DEC_ADV_PRE_DIS(0) |
+ G1_REG_DEC_SCMD_DIS(0) |
+ G1_REG_DEC_MAX_BURST(16);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(2));
+
+ reg = G1_REG_DEC_MODE(5) |
+ G1_REG_RLC_MODE_E(0) |
+ G1_REG_PIC_INTERLACE_E(!(seq->flags & V4L2_MPEG2_SEQ_FLAG_PROGRESSIVE)) |
+ G1_REG_PIC_FIELDMODE_E(pic->picture_structure != V4L2_MPEG2_PIC_FRAME) |
+ G1_REG_PIC_B_E(pic->picture_coding_type == V4L2_MPEG2_PIC_CODING_TYPE_B) |
+ G1_REG_PIC_INTER_E(pic->picture_coding_type != V4L2_MPEG2_PIC_CODING_TYPE_I) |
+ G1_REG_PIC_TOPFIELD_E(pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD) |
+ G1_REG_FWD_INTERLACE_E(0) |
+ G1_REG_FILTERING_DIS(1) |
+ G1_REG_WRITE_MVS_E(0) |
+ G1_REG_DEC_AXI_WR_ID(0);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(3));
+
+ reg = G1_REG_PIC_MB_WIDTH(MB_WIDTH(ctx->dst_fmt.width)) |
+ G1_REG_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->dst_fmt.height)) |
+ G1_REG_ALT_SCAN_E(pic->flags & V4L2_MPEG2_PIC_FLAG_ALT_SCAN) |
+ G1_REG_TOPFIELDFIRST_E(pic->flags & V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(4));
+
+ reg = G1_REG_STRM_START_BIT(0) |
+ G1_REG_QSCALE_TYPE(pic->flags & V4L2_MPEG2_PIC_FLAG_Q_SCALE_TYPE) |
+ G1_REG_CON_MV_E(pic->flags & V4L2_MPEG2_PIC_FLAG_CONCEALMENT_MV) |
+ G1_REG_INTRA_DC_PREC(pic->intra_dc_precision) |
+ G1_REG_INTRA_VLC_TAB(pic->flags & V4L2_MPEG2_PIC_FLAG_INTRA_VLC) |
+ G1_REG_FRAME_PRED_DCT(pic->flags & V4L2_MPEG2_PIC_FLAG_FRAME_PRED_DCT);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(5));
+
+ reg = G1_REG_INIT_QP(1) |
+ G1_REG_STREAM_LEN(vb2_get_plane_payload(&src_buf->vb2_buf, 0));
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(6));
+
+ reg = G1_REG_ALT_SCAN_FLAG_E(pic->flags & V4L2_MPEG2_PIC_FLAG_ALT_SCAN) |
+ G1_REG_FCODE_FWD_HOR(pic->f_code[0][0]) |
+ G1_REG_FCODE_FWD_VER(pic->f_code[0][1]) |
+ G1_REG_FCODE_BWD_HOR(pic->f_code[1][0]) |
+ G1_REG_FCODE_BWD_VER(pic->f_code[1][1]) |
+ G1_REG_MV_ACCURACY_FWD(1) |
+ G1_REG_MV_ACCURACY_BWD(1);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(18));
+
+ reg = G1_REG_STARTMB_X(0) |
+ G1_REG_STARTMB_Y(0);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(48));
+
+ reg = G1_REG_APF_THRESHOLD(8);
+ vdpu_write_relaxed(vpu, reg, G1_SWREG(55));
+
+ hantro_g1_mpeg2_dec_set_quantisation(vpu, ctx);
+ hantro_g1_mpeg2_dec_set_buffers(vpu, ctx, &src_buf->vb2_buf,
+ &dst_buf->vb2_buf,
+ seq, pic);
+
+ hantro_end_prepare_run(ctx);
+
+ vdpu_write(vpu, G1_REG_INTERRUPT_DEC_E, G1_REG_INTERRUPT);
+
+ return 0;
+}
diff --git a/drivers/media/platform/verisilicon/hantro_g1_regs.h b/drivers/media/platform/verisilicon/hantro_g1_regs.h
new file mode 100644
index 000000000000..c623b3b0be18
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_g1_regs.h
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#ifndef HANTRO_G1_REGS_H_
+#define HANTRO_G1_REGS_H_
+
+#define G1_SWREG(nr) ((nr) * 4)
+
+/* Decoder registers. */
+#define G1_REG_INTERRUPT 0x004
+#define G1_REG_INTERRUPT_DEC_PIC_INF BIT(24)
+#define G1_REG_INTERRUPT_DEC_TIMEOUT BIT(18)
+#define G1_REG_INTERRUPT_DEC_SLICE_INT BIT(17)
+#define G1_REG_INTERRUPT_DEC_ERROR_INT BIT(16)
+#define G1_REG_INTERRUPT_DEC_ASO_INT BIT(15)
+#define G1_REG_INTERRUPT_DEC_BUFFER_INT BIT(14)
+#define G1_REG_INTERRUPT_DEC_BUS_INT BIT(13)
+#define G1_REG_INTERRUPT_DEC_RDY_INT BIT(12)
+#define G1_REG_INTERRUPT_DEC_IRQ BIT(8)
+#define G1_REG_INTERRUPT_DEC_IRQ_DIS BIT(4)
+#define G1_REG_INTERRUPT_DEC_E BIT(0)
+#define G1_REG_CONFIG 0x008
+#define G1_REG_CONFIG_DEC_AXI_RD_ID(x) (((x) & 0xff) << 24)
+#define G1_REG_CONFIG_DEC_TIMEOUT_E BIT(23)
+#define G1_REG_CONFIG_DEC_STRSWAP32_E BIT(22)
+#define G1_REG_CONFIG_DEC_STRENDIAN_E BIT(21)
+#define G1_REG_CONFIG_DEC_INSWAP32_E BIT(20)
+#define G1_REG_CONFIG_DEC_OUTSWAP32_E BIT(19)
+#define G1_REG_CONFIG_DEC_DATA_DISC_E BIT(18)
+#define G1_REG_CONFIG_TILED_MODE_MSB BIT(17)
+#define G1_REG_CONFIG_DEC_OUT_TILED_E BIT(17)
+#define G1_REG_CONFIG_DEC_LATENCY(x) (((x) & 0x3f) << 11)
+#define G1_REG_CONFIG_DEC_CLK_GATE_E BIT(10)
+#define G1_REG_CONFIG_DEC_IN_ENDIAN BIT(9)
+#define G1_REG_CONFIG_DEC_OUT_ENDIAN BIT(8)
+#define G1_REG_CONFIG_PRIORITY_MODE(x) (((x) & 0x7) << 5)
+#define G1_REG_CONFIG_TILED_MODE_LSB BIT(7)
+#define G1_REG_CONFIG_DEC_ADV_PRE_DIS BIT(6)
+#define G1_REG_CONFIG_DEC_SCMD_DIS BIT(5)
+#define G1_REG_CONFIG_DEC_MAX_BURST(x) (((x) & 0x1f) << 0)
+#define G1_REG_DEC_CTRL0 0x00c
+#define G1_REG_DEC_CTRL0_DEC_MODE(x) (((x) & 0xf) << 28)
+#define G1_REG_DEC_CTRL0_RLC_MODE_E BIT(27)
+#define G1_REG_DEC_CTRL0_SKIP_MODE BIT(26)
+#define G1_REG_DEC_CTRL0_DIVX3_E BIT(25)
+#define G1_REG_DEC_CTRL0_PJPEG_E BIT(24)
+#define G1_REG_DEC_CTRL0_PIC_INTERLACE_E BIT(23)
+#define G1_REG_DEC_CTRL0_PIC_FIELDMODE_E BIT(22)
+#define G1_REG_DEC_CTRL0_PIC_B_E BIT(21)
+#define G1_REG_DEC_CTRL0_PIC_INTER_E BIT(20)
+#define G1_REG_DEC_CTRL0_PIC_TOPFIELD_E BIT(19)
+#define G1_REG_DEC_CTRL0_FWD_INTERLACE_E BIT(18)
+#define G1_REG_DEC_CTRL0_SORENSON_E BIT(17)
+#define G1_REG_DEC_CTRL0_REF_TOPFIELD_E BIT(16)
+#define G1_REG_DEC_CTRL0_DEC_OUT_DIS BIT(15)
+#define G1_REG_DEC_CTRL0_FILTERING_DIS BIT(14)
+#define G1_REG_DEC_CTRL0_WEBP_E BIT(13)
+#define G1_REG_DEC_CTRL0_MVC_E BIT(13)
+#define G1_REG_DEC_CTRL0_PIC_FIXED_QUANT BIT(13)
+#define G1_REG_DEC_CTRL0_WRITE_MVS_E BIT(12)
+#define G1_REG_DEC_CTRL0_REFTOPFIRST_E BIT(11)
+#define G1_REG_DEC_CTRL0_SEQ_MBAFF_E BIT(10)
+#define G1_REG_DEC_CTRL0_PICORD_COUNT_E BIT(9)
+#define G1_REG_DEC_CTRL0_DEC_AHB_HLOCK_E BIT(8)
+#define G1_REG_DEC_CTRL0_DEC_AXI_WR_ID(x) (((x) & 0xff) << 0)
+/* Setting AXI ID to 0xff to get auto generated ID to avoid possible conflicts */
+#define G1_REG_DEC_CTRL0_DEC_AXI_AUTO G1_REG_DEC_CTRL0_DEC_AXI_WR_ID(0xff)
+#define G1_REG_DEC_CTRL1 0x010
+#define G1_REG_DEC_CTRL1_PIC_MB_WIDTH(x) (((x) & 0x1ff) << 23)
+#define G1_REG_DEC_CTRL1_MB_WIDTH_OFF(x) (((x) & 0xf) << 19)
+#define G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(x) (((x) & 0xff) << 11)
+#define G1_REG_DEC_CTRL1_MB_HEIGHT_OFF(x) (((x) & 0xf) << 7)
+#define G1_REG_DEC_CTRL1_ALT_SCAN_E BIT(6)
+#define G1_REG_DEC_CTRL1_TOPFIELDFIRST_E BIT(5)
+#define G1_REG_DEC_CTRL1_REF_FRAMES(x) (((x) & 0x1f) << 0)
+#define G1_REG_DEC_CTRL1_PIC_MB_W_EXT(x) (((x) & 0x7) << 3)
+#define G1_REG_DEC_CTRL1_PIC_MB_H_EXT(x) (((x) & 0x7) << 0)
+#define G1_REG_DEC_CTRL1_PIC_REFER_FLAG BIT(0)
+#define G1_REG_DEC_CTRL2 0x014
+#define G1_REG_DEC_CTRL2_STRM_START_BIT(x) (((x) & 0x3f) << 26)
+#define G1_REG_DEC_CTRL2_SYNC_MARKER_E BIT(25)
+#define G1_REG_DEC_CTRL2_TYPE1_QUANT_E BIT(24)
+#define G1_REG_DEC_CTRL2_CH_QP_OFFSET(x) (((x) & 0x1f) << 19)
+#define G1_REG_DEC_CTRL2_CH_QP_OFFSET2(x) (((x) & 0x1f) << 14)
+#define G1_REG_DEC_CTRL2_FIELDPIC_FLAG_E BIT(0)
+#define G1_REG_DEC_CTRL2_INTRADC_VLC_THR(x) (((x) & 0x7) << 16)
+#define G1_REG_DEC_CTRL2_VOP_TIME_INCR(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL2_DQ_PROFILE BIT(24)
+#define G1_REG_DEC_CTRL2_DQBI_LEVEL BIT(23)
+#define G1_REG_DEC_CTRL2_RANGE_RED_FRM_E BIT(22)
+#define G1_REG_DEC_CTRL2_FAST_UVMC_E BIT(20)
+#define G1_REG_DEC_CTRL2_TRANSDCTAB BIT(17)
+#define G1_REG_DEC_CTRL2_TRANSACFRM(x) (((x) & 0x3) << 15)
+#define G1_REG_DEC_CTRL2_TRANSACFRM2(x) (((x) & 0x3) << 13)
+#define G1_REG_DEC_CTRL2_MB_MODE_TAB(x) (((x) & 0x7) << 10)
+#define G1_REG_DEC_CTRL2_MVTAB(x) (((x) & 0x7) << 7)
+#define G1_REG_DEC_CTRL2_CBPTAB(x) (((x) & 0x7) << 4)
+#define G1_REG_DEC_CTRL2_2MV_BLK_PAT_TAB(x) (((x) & 0x3) << 2)
+#define G1_REG_DEC_CTRL2_4MV_BLK_PAT_TAB(x) (((x) & 0x3) << 0)
+#define G1_REG_DEC_CTRL2_QSCALE_TYPE BIT(24)
+#define G1_REG_DEC_CTRL2_CON_MV_E BIT(4)
+#define G1_REG_DEC_CTRL2_INTRA_DC_PREC(x) (((x) & 0x3) << 2)
+#define G1_REG_DEC_CTRL2_INTRA_VLC_TAB BIT(1)
+#define G1_REG_DEC_CTRL2_FRAME_PRED_DCT BIT(0)
+#define G1_REG_DEC_CTRL2_JPEG_QTABLES(x) (((x) & 0x3) << 11)
+#define G1_REG_DEC_CTRL2_JPEG_MODE(x) (((x) & 0x7) << 8)
+#define G1_REG_DEC_CTRL2_JPEG_FILRIGHT_E BIT(7)
+#define G1_REG_DEC_CTRL2_JPEG_STREAM_ALL BIT(6)
+#define G1_REG_DEC_CTRL2_CR_AC_VLCTABLE BIT(5)
+#define G1_REG_DEC_CTRL2_CB_AC_VLCTABLE BIT(4)
+#define G1_REG_DEC_CTRL2_CR_DC_VLCTABLE BIT(3)
+#define G1_REG_DEC_CTRL2_CB_DC_VLCTABLE BIT(2)
+#define G1_REG_DEC_CTRL2_CR_DC_VLCTABLE3 BIT(1)
+#define G1_REG_DEC_CTRL2_CB_DC_VLCTABLE3 BIT(0)
+#define G1_REG_DEC_CTRL2_STRM1_START_BIT(x) (((x) & 0x3f) << 18)
+#define G1_REG_DEC_CTRL2_HUFFMAN_E BIT(17)
+#define G1_REG_DEC_CTRL2_MULTISTREAM_E BIT(16)
+#define G1_REG_DEC_CTRL2_BOOLEAN_VALUE(x) (((x) & 0xff) << 8)
+#define G1_REG_DEC_CTRL2_BOOLEAN_RANGE(x) (((x) & 0xff) << 0)
+#define G1_REG_DEC_CTRL2_ALPHA_OFFSET(x) (((x) & 0x1f) << 5)
+#define G1_REG_DEC_CTRL2_BETA_OFFSET(x) (((x) & 0x1f) << 0)
+#define G1_REG_DEC_CTRL3 0x018
+#define G1_REG_DEC_CTRL3_START_CODE_E BIT(31)
+#define G1_REG_DEC_CTRL3_INIT_QP(x) (((x) & 0x3f) << 25)
+#define G1_REG_DEC_CTRL3_CH_8PIX_ILEAV_E BIT(24)
+#define G1_REG_DEC_CTRL3_STREAM_LEN_EXT(x) (((x) & 0xff) << 24)
+#define G1_REG_DEC_CTRL3_STREAM_LEN(x) (((x) & 0xffffff) << 0)
+#define G1_REG_DEC_CTRL4 0x01c
+#define G1_REG_DEC_CTRL4_CABAC_E BIT(31)
+#define G1_REG_DEC_CTRL4_BLACKWHITE_E BIT(30)
+#define G1_REG_DEC_CTRL4_DIR_8X8_INFER_E BIT(29)
+#define G1_REG_DEC_CTRL4_WEIGHT_PRED_E BIT(28)
+#define G1_REG_DEC_CTRL4_WEIGHT_BIPR_IDC(x) (((x) & 0x3) << 26)
+#define G1_REG_DEC_CTRL4_AVS_H264_H_EXT BIT(25)
+#define G1_REG_DEC_CTRL4_FRAMENUM_LEN(x) (((x) & 0x1f) << 16)
+#define G1_REG_DEC_CTRL4_FRAMENUM(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL4_BITPLANE0_E BIT(31)
+#define G1_REG_DEC_CTRL4_BITPLANE1_E BIT(30)
+#define G1_REG_DEC_CTRL4_BITPLANE2_E BIT(29)
+#define G1_REG_DEC_CTRL4_ALT_PQUANT(x) (((x) & 0x1f) << 24)
+#define G1_REG_DEC_CTRL4_DQ_EDGES(x) (((x) & 0xf) << 20)
+#define G1_REG_DEC_CTRL4_TTMBF BIT(19)
+#define G1_REG_DEC_CTRL4_PQINDEX(x) (((x) & 0x1f) << 14)
+#define G1_REG_DEC_CTRL4_VC1_HEIGHT_EXT BIT(13)
+#define G1_REG_DEC_CTRL4_BILIN_MC_E BIT(12)
+#define G1_REG_DEC_CTRL4_UNIQP_E BIT(11)
+#define G1_REG_DEC_CTRL4_HALFQP_E BIT(10)
+#define G1_REG_DEC_CTRL4_TTFRM(x) (((x) & 0x3) << 8)
+#define G1_REG_DEC_CTRL4_2ND_BYTE_EMUL_E BIT(7)
+#define G1_REG_DEC_CTRL4_DQUANT_E BIT(6)
+#define G1_REG_DEC_CTRL4_VC1_ADV_E BIT(5)
+#define G1_REG_DEC_CTRL4_PJPEG_FILDOWN_E BIT(26)
+#define G1_REG_DEC_CTRL4_PJPEG_WDIV8 BIT(25)
+#define G1_REG_DEC_CTRL4_PJPEG_HDIV8 BIT(24)
+#define G1_REG_DEC_CTRL4_PJPEG_AH(x) (((x) & 0xf) << 20)
+#define G1_REG_DEC_CTRL4_PJPEG_AL(x) (((x) & 0xf) << 16)
+#define G1_REG_DEC_CTRL4_PJPEG_SS(x) (((x) & 0xff) << 8)
+#define G1_REG_DEC_CTRL4_PJPEG_SE(x) (((x) & 0xff) << 0)
+#define G1_REG_DEC_CTRL4_DCT1_START_BIT(x) (((x) & 0x3f) << 26)
+#define G1_REG_DEC_CTRL4_DCT2_START_BIT(x) (((x) & 0x3f) << 20)
+#define G1_REG_DEC_CTRL4_CH_MV_RES BIT(13)
+#define G1_REG_DEC_CTRL4_INIT_DC_MATCH0(x) (((x) & 0x7) << 9)
+#define G1_REG_DEC_CTRL4_INIT_DC_MATCH1(x) (((x) & 0x7) << 6)
+#define G1_REG_DEC_CTRL4_VP7_VERSION BIT(5)
+#define G1_REG_DEC_CTRL5 0x020
+#define G1_REG_DEC_CTRL5_CONST_INTRA_E BIT(31)
+#define G1_REG_DEC_CTRL5_FILT_CTRL_PRES BIT(30)
+#define G1_REG_DEC_CTRL5_RDPIC_CNT_PRES BIT(29)
+#define G1_REG_DEC_CTRL5_8X8TRANS_FLAG_E BIT(28)
+#define G1_REG_DEC_CTRL5_REFPIC_MK_LEN(x) (((x) & 0x7ff) << 17)
+#define G1_REG_DEC_CTRL5_IDR_PIC_E BIT(16)
+#define G1_REG_DEC_CTRL5_IDR_PIC_ID(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL5_MV_SCALEFACTOR(x) (((x) & 0xff) << 24)
+#define G1_REG_DEC_CTRL5_REF_DIST_FWD(x) (((x) & 0x1f) << 19)
+#define G1_REG_DEC_CTRL5_REF_DIST_BWD(x) (((x) & 0x1f) << 14)
+#define G1_REG_DEC_CTRL5_LOOP_FILT_LIMIT(x) (((x) & 0xf) << 14)
+#define G1_REG_DEC_CTRL5_VARIANCE_TEST_E BIT(13)
+#define G1_REG_DEC_CTRL5_MV_THRESHOLD(x) (((x) & 0x7) << 10)
+#define G1_REG_DEC_CTRL5_VAR_THRESHOLD(x) (((x) & 0x3ff) << 0)
+#define G1_REG_DEC_CTRL5_DIVX_IDCT_E BIT(8)
+#define G1_REG_DEC_CTRL5_DIVX3_SLICE_SIZE(x) (((x) & 0xff) << 0)
+#define G1_REG_DEC_CTRL5_PJPEG_REST_FREQ(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL5_RV_PROFILE(x) (((x) & 0x3) << 30)
+#define G1_REG_DEC_CTRL5_RV_OSV_QUANT(x) (((x) & 0x3) << 28)
+#define G1_REG_DEC_CTRL5_RV_FWD_SCALE(x) (((x) & 0x3fff) << 14)
+#define G1_REG_DEC_CTRL5_RV_BWD_SCALE(x) (((x) & 0x3fff) << 0)
+#define G1_REG_DEC_CTRL5_INIT_DC_COMP0(x) (((x) & 0xffff) << 16)
+#define G1_REG_DEC_CTRL5_INIT_DC_COMP1(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL6 0x024
+#define G1_REG_DEC_CTRL6_PPS_ID(x) (((x) & 0xff) << 24)
+#define G1_REG_DEC_CTRL6_REFIDX1_ACTIVE(x) (((x) & 0x1f) << 19)
+#define G1_REG_DEC_CTRL6_REFIDX0_ACTIVE(x) (((x) & 0x1f) << 14)
+#define G1_REG_DEC_CTRL6_POC_LENGTH(x) (((x) & 0xff) << 0)
+#define G1_REG_DEC_CTRL6_ICOMP0_E BIT(24)
+#define G1_REG_DEC_CTRL6_ISCALE0(x) (((x) & 0xff) << 16)
+#define G1_REG_DEC_CTRL6_ISHIFT0(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL6_STREAM1_LEN(x) (((x) & 0xffffff) << 0)
+#define G1_REG_DEC_CTRL6_PIC_SLICE_AM(x) (((x) & 0x1fff) << 0)
+#define G1_REG_DEC_CTRL6_COEFFS_PART_AM(x) (((x) & 0xf) << 24)
+#define G1_REG_FWD_PIC(i) (0x028 + ((i) * 0x4))
+#define G1_REG_FWD_PIC_PINIT_RLIST_F5(x) (((x) & 0x1f) << 25)
+#define G1_REG_FWD_PIC_PINIT_RLIST_F4(x) (((x) & 0x1f) << 20)
+#define G1_REG_FWD_PIC_PINIT_RLIST_F3(x) (((x) & 0x1f) << 15)
+#define G1_REG_FWD_PIC_PINIT_RLIST_F2(x) (((x) & 0x1f) << 10)
+#define G1_REG_FWD_PIC_PINIT_RLIST_F1(x) (((x) & 0x1f) << 5)
+#define G1_REG_FWD_PIC_PINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
+#define G1_REG_FWD_PIC1_ICOMP1_E BIT(24)
+#define G1_REG_FWD_PIC1_ISCALE1(x) (((x) & 0xff) << 16)
+#define G1_REG_FWD_PIC1_ISHIFT1(x) (((x) & 0xffff) << 0)
+#define G1_REG_FWD_PIC1_SEGMENT_BASE(x) ((x) << 0)
+#define G1_REG_FWD_PIC1_SEGMENT_UPD_E BIT(1)
+#define G1_REG_FWD_PIC1_SEGMENT_E BIT(0)
+#define G1_REG_DEC_CTRL7 0x02c
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F15(x) (((x) & 0x1f) << 25)
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F14(x) (((x) & 0x1f) << 20)
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F13(x) (((x) & 0x1f) << 15)
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F12(x) (((x) & 0x1f) << 10)
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F11(x) (((x) & 0x1f) << 5)
+#define G1_REG_DEC_CTRL7_PINIT_RLIST_F10(x) (((x) & 0x1f) << 0)
+#define G1_REG_DEC_CTRL7_ICOMP2_E BIT(24)
+#define G1_REG_DEC_CTRL7_ISCALE2(x) (((x) & 0xff) << 16)
+#define G1_REG_DEC_CTRL7_ISHIFT2(x) (((x) & 0xffff) << 0)
+#define G1_REG_DEC_CTRL7_DCT3_START_BIT(x) (((x) & 0x3f) << 24)
+#define G1_REG_DEC_CTRL7_DCT4_START_BIT(x) (((x) & 0x3f) << 18)
+#define G1_REG_DEC_CTRL7_DCT5_START_BIT(x) (((x) & 0x3f) << 12)
+#define G1_REG_DEC_CTRL7_DCT6_START_BIT(x) (((x) & 0x3f) << 6)
+#define G1_REG_DEC_CTRL7_DCT7_START_BIT(x) (((x) & 0x3f) << 0)
+#define G1_REG_ADDR_STR 0x030
+#define G1_REG_ADDR_DST 0x034
+#define G1_REG_ADDR_REF(i) (0x038 + ((i) * 0x4))
+#define G1_REG_ADDR_REF_FIELD_E BIT(1)
+#define G1_REG_ADDR_REF_TOPC_E BIT(0)
+#define G1_REG_REF_PIC(i) (0x078 + ((i) * 0x4))
+#define G1_REG_REF_PIC_FILT_TYPE_E BIT(31)
+#define G1_REG_REF_PIC_FILT_SHARPNESS(x) (((x) & 0x7) << 28)
+#define G1_REG_REF_PIC_MB_ADJ_0(x) (((x) & 0x7f) << 21)
+#define G1_REG_REF_PIC_MB_ADJ_1(x) (((x) & 0x7f) << 14)
+#define G1_REG_REF_PIC_MB_ADJ_2(x) (((x) & 0x7f) << 7)
+#define G1_REG_REF_PIC_MB_ADJ_3(x) (((x) & 0x7f) << 0)
+#define G1_REG_REF_PIC_REFER1_NBR(x) (((x) & 0xffff) << 16)
+#define G1_REG_REF_PIC_REFER0_NBR(x) (((x) & 0xffff) << 0)
+#define G1_REG_REF_PIC_LF_LEVEL_0(x) (((x) & 0x3f) << 18)
+#define G1_REG_REF_PIC_LF_LEVEL_1(x) (((x) & 0x3f) << 12)
+#define G1_REG_REF_PIC_LF_LEVEL_2(x) (((x) & 0x3f) << 6)
+#define G1_REG_REF_PIC_LF_LEVEL_3(x) (((x) & 0x3f) << 0)
+#define G1_REG_REF_PIC_QUANT_DELTA_0(x) (((x) & 0x1f) << 27)
+#define G1_REG_REF_PIC_QUANT_DELTA_1(x) (((x) & 0x1f) << 22)
+#define G1_REG_REF_PIC_QUANT_0(x) (((x) & 0x7ff) << 11)
+#define G1_REG_REF_PIC_QUANT_1(x) (((x) & 0x7ff) << 0)
+#define G1_REG_LT_REF 0x098
+#define G1_REG_VALID_REF 0x09c
+#define G1_REG_ADDR_QTABLE 0x0a0
+#define G1_REG_ADDR_DIR_MV 0x0a4
+#define G1_REG_BD_REF_PIC(i) (0x0a8 + ((i) * 0x4))
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_B2(x) (((x) & 0x1f) << 25)
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_F2(x) (((x) & 0x1f) << 20)
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_B1(x) (((x) & 0x1f) << 15)
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_F1(x) (((x) & 0x1f) << 10)
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_B0(x) (((x) & 0x1f) << 5)
+#define G1_REG_BD_REF_PIC_BINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
+#define G1_REG_BD_REF_PIC_PRED_TAP_2_M1(x) (((x) & 0x3) << 10)
+#define G1_REG_BD_REF_PIC_PRED_TAP_2_4(x) (((x) & 0x3) << 8)
+#define G1_REG_BD_REF_PIC_PRED_TAP_4_M1(x) (((x) & 0x3) << 6)
+#define G1_REG_BD_REF_PIC_PRED_TAP_4_4(x) (((x) & 0x3) << 4)
+#define G1_REG_BD_REF_PIC_PRED_TAP_6_M1(x) (((x) & 0x3) << 2)
+#define G1_REG_BD_REF_PIC_PRED_TAP_6_4(x) (((x) & 0x3) << 0)
+#define G1_REG_BD_REF_PIC_QUANT_DELTA_2(x) (((x) & 0x1f) << 27)
+#define G1_REG_BD_REF_PIC_QUANT_DELTA_3(x) (((x) & 0x1f) << 22)
+#define G1_REG_BD_REF_PIC_QUANT_2(x) (((x) & 0x7ff) << 11)
+#define G1_REG_BD_REF_PIC_QUANT_3(x) (((x) & 0x7ff) << 0)
+#define G1_REG_BD_P_REF_PIC 0x0bc
+#define G1_REG_BD_P_REF_PIC_QUANT_DELTA_4(x) (((x) & 0x1f) << 27)
+#define G1_REG_BD_P_REF_PIC_PINIT_RLIST_F3(x) (((x) & 0x1f) << 25)
+#define G1_REG_BD_P_REF_PIC_PINIT_RLIST_F2(x) (((x) & 0x1f) << 20)
+#define G1_REG_BD_P_REF_PIC_PINIT_RLIST_F1(x) (((x) & 0x1f) << 15)
+#define G1_REG_BD_P_REF_PIC_PINIT_RLIST_F0(x) (((x) & 0x1f) << 10)
+#define G1_REG_BD_P_REF_PIC_BINIT_RLIST_B15(x) (((x) & 0x1f) << 5)
+#define G1_REG_BD_P_REF_PIC_BINIT_RLIST_F15(x) (((x) & 0x1f) << 0)
+#define G1_REG_ERR_CONC 0x0c0
+#define G1_REG_ERR_CONC_STARTMB_X(x) (((x) & 0x1ff) << 23)
+#define G1_REG_ERR_CONC_STARTMB_Y(x) (((x) & 0xff) << 15)
+#define G1_REG_PRED_FLT 0x0c4
+#define G1_REG_PRED_FLT_PRED_BC_TAP_0_0(x) (((x) & 0x3ff) << 22)
+#define G1_REG_PRED_FLT_PRED_BC_TAP_0_1(x) (((x) & 0x3ff) << 12)
+#define G1_REG_PRED_FLT_PRED_BC_TAP_0_2(x) (((x) & 0x3ff) << 2)
+#define G1_REG_REF_BUF_CTRL 0x0cc
+#define G1_REG_REF_BUF_CTRL_REFBU_E BIT(31)
+#define G1_REG_REF_BUF_CTRL_REFBU_THR(x) (((x) & 0xfff) << 19)
+#define G1_REG_REF_BUF_CTRL_REFBU_PICID(x) (((x) & 0x1f) << 14)
+#define G1_REG_REF_BUF_CTRL_REFBU_EVAL_E BIT(13)
+#define G1_REG_REF_BUF_CTRL_REFBU_FPARMOD_E BIT(12)
+#define G1_REG_REF_BUF_CTRL_REFBU_Y_OFFSET(x) (((x) & 0x1ff) << 0)
+#define G1_REG_REF_BUF_CTRL2 0x0dc
+#define G1_REG_REF_BUF_CTRL2_REFBU2_BUF_E BIT(31)
+#define G1_REG_REF_BUF_CTRL2_REFBU2_THR(x) (((x) & 0xfff) << 19)
+#define G1_REG_REF_BUF_CTRL2_REFBU2_PICID(x) (((x) & 0x1f) << 14)
+#define G1_REG_REF_BUF_CTRL2_APF_THRESHOLD(x) (((x) & 0x3fff) << 0)
+#define G1_REG_SOFT_RESET 0x194
+
+/* Post-processor registers. */
+#define G1_REG_PP_INTERRUPT G1_SWREG(60)
+#define G1_REG_PP_READY_IRQ BIT(12)
+#define G1_REG_PP_IRQ BIT(8)
+#define G1_REG_PP_IRQ_DIS BIT(4)
+#define G1_REG_PP_PIPELINE_EN BIT(1)
+#define G1_REG_PP_EXTERNAL_TRIGGER BIT(0)
+#define G1_REG_PP_DEV_CONFIG G1_SWREG(61)
+#define G1_REG_PP_AXI_RD_ID(v) (((v) << 24) & GENMASK(31, 24))
+#define G1_REG_PP_AXI_WR_ID(v) (((v) << 16) & GENMASK(23, 16))
+#define G1_REG_PP_INSWAP32_E(v) ((v) ? BIT(10) : 0)
+#define G1_REG_PP_DATA_DISC_E(v) ((v) ? BIT(9) : 0)
+#define G1_REG_PP_CLK_GATE_E(v) ((v) ? BIT(8) : 0)
+#define G1_REG_PP_IN_ENDIAN(v) ((v) ? BIT(7) : 0)
+#define G1_REG_PP_OUT_ENDIAN(v) ((v) ? BIT(6) : 0)
+#define G1_REG_PP_OUTSWAP32_E(v) ((v) ? BIT(5) : 0)
+#define G1_REG_PP_MAX_BURST(v) (((v) << 0) & GENMASK(4, 0))
+#define G1_REG_PP_IN_LUMA_BASE G1_SWREG(63)
+#define G1_REG_PP_IN_CB_BASE G1_SWREG(64)
+#define G1_REG_PP_IN_CR_BASE G1_SWREG(65)
+#define G1_REG_PP_OUT_LUMA_BASE G1_SWREG(66)
+#define G1_REG_PP_OUT_CHROMA_BASE G1_SWREG(67)
+#define G1_REG_PP_CONTRAST_ADJUST G1_SWREG(68)
+#define G1_REG_PP_COLOR_CONVERSION G1_SWREG(69)
+#define G1_REG_PP_COLOR_CONVERSION0 G1_SWREG(70)
+#define G1_REG_PP_COLOR_CONVERSION1 G1_SWREG(71)
+#define G1_REG_PP_INPUT_SIZE G1_SWREG(72)
+#define G1_REG_PP_INPUT_SIZE_HEIGHT(v) (((v) << 9) & GENMASK(16, 9))
+#define G1_REG_PP_INPUT_SIZE_WIDTH(v) (((v) << 0) & GENMASK(8, 0))
+#define G1_REG_PP_SCALING0 G1_SWREG(79)
+#define G1_REG_PP_PADD_R(v) (((v) << 23) & GENMASK(27, 23))
+#define G1_REG_PP_PADD_G(v) (((v) << 18) & GENMASK(22, 18))
+#define G1_REG_PP_RANGEMAP_Y(v) ((v) ? BIT(31) : 0)
+#define G1_REG_PP_RANGEMAP_C(v) ((v) ? BIT(30) : 0)
+#define G1_REG_PP_YCBCR_RANGE(v) ((v) ? BIT(29) : 0)
+#define G1_REG_PP_RGB_16(v) ((v) ? BIT(28) : 0)
+#define G1_REG_PP_SCALING1 G1_SWREG(80)
+#define G1_REG_PP_PADD_B(v) (((v) << 18) & GENMASK(22, 18))
+#define G1_REG_PP_MASK_R G1_SWREG(82)
+#define G1_REG_PP_MASK_G G1_SWREG(83)
+#define G1_REG_PP_MASK_B G1_SWREG(84)
+#define G1_REG_PP_CONTROL G1_SWREG(85)
+#define G1_REG_PP_CONTROL_IN_FMT(v) (((v) << 29) & GENMASK(31, 29))
+#define G1_REG_PP_CONTROL_OUT_FMT(v) (((v) << 26) & GENMASK(28, 26))
+#define G1_REG_PP_CONTROL_OUT_HEIGHT(v) (((v) << 15) & GENMASK(25, 15))
+#define G1_REG_PP_CONTROL_OUT_WIDTH(v) (((v) << 4) & GENMASK(14, 4))
+#define G1_REG_PP_MASK1_ORIG_WIDTH G1_SWREG(88)
+#define G1_REG_PP_ORIG_WIDTH(v) (((v) << 23) & GENMASK(31, 23))
+#define G1_REG_PP_DISPLAY_WIDTH G1_SWREG(92)
+#define G1_REG_PP_FUSE G1_SWREG(99)
+
+#endif /* HANTRO_G1_REGS_H_ */
diff --git a/drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c b/drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c
new file mode 100644
index 000000000000..851eb67f19f5
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_g1_vp8_dec.c
@@ -0,0 +1,511 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VP8 codec driver
+ *
+ * Copyright (C) 2019 Rockchip Electronics Co., Ltd.
+ * ZhiChao Yu <zhichao.yu@rock-chips.com>
+ *
+ * Copyright (C) 2019 Google, Inc.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#include <media/v4l2-mem2mem.h>
+
+#include "hantro_hw.h"
+#include "hantro.h"
+#include "hantro_g1_regs.h"
+
+/* DCT partition base address regs */
+static const struct hantro_reg vp8_dec_dct_base[8] = {
+ { G1_REG_ADDR_STR, 0, 0xffffffff },
+ { G1_REG_ADDR_REF(8), 0, 0xffffffff },
+ { G1_REG_ADDR_REF(9), 0, 0xffffffff },
+ { G1_REG_ADDR_REF(10), 0, 0xffffffff },
+ { G1_REG_ADDR_REF(11), 0, 0xffffffff },
+ { G1_REG_ADDR_REF(12), 0, 0xffffffff },
+ { G1_REG_ADDR_REF(14), 0, 0xffffffff },
+ { G1_REG_ADDR_REF(15), 0, 0xffffffff },
+};
+
+/* Loop filter level regs */
+static const struct hantro_reg vp8_dec_lf_level[4] = {
+ { G1_REG_REF_PIC(2), 18, 0x3f },
+ { G1_REG_REF_PIC(2), 12, 0x3f },
+ { G1_REG_REF_PIC(2), 6, 0x3f },
+ { G1_REG_REF_PIC(2), 0, 0x3f },
+};
+
+/* Macroblock loop filter level adjustment regs */
+static const struct hantro_reg vp8_dec_mb_adj[4] = {
+ { G1_REG_REF_PIC(0), 21, 0x7f },
+ { G1_REG_REF_PIC(0), 14, 0x7f },
+ { G1_REG_REF_PIC(0), 7, 0x7f },
+ { G1_REG_REF_PIC(0), 0, 0x7f },
+};
+
+/* Reference frame adjustment regs */
+static const struct hantro_reg vp8_dec_ref_adj[4] = {
+ { G1_REG_REF_PIC(1), 21, 0x7f },
+ { G1_REG_REF_PIC(1), 14, 0x7f },
+ { G1_REG_REF_PIC(1), 7, 0x7f },
+ { G1_REG_REF_PIC(1), 0, 0x7f },
+};
+
+/* Quantizer */
+static const struct hantro_reg vp8_dec_quant[4] = {
+ { G1_REG_REF_PIC(3), 11, 0x7ff },
+ { G1_REG_REF_PIC(3), 0, 0x7ff },
+ { G1_REG_BD_REF_PIC(4), 11, 0x7ff },
+ { G1_REG_BD_REF_PIC(4), 0, 0x7ff },
+};
+
+/* Quantizer delta regs */
+static const struct hantro_reg vp8_dec_quant_delta[5] = {
+ { G1_REG_REF_PIC(3), 27, 0x1f },
+ { G1_REG_REF_PIC(3), 22, 0x1f },
+ { G1_REG_BD_REF_PIC(4), 27, 0x1f },
+ { G1_REG_BD_REF_PIC(4), 22, 0x1f },
+ { G1_REG_BD_P_REF_PIC, 27, 0x1f },
+};
+
+/* DCT partition start bits regs */
+static const struct hantro_reg vp8_dec_dct_start_bits[8] = {
+ { G1_REG_DEC_CTRL2, 26, 0x3f }, { G1_REG_DEC_CTRL4, 26, 0x3f },
+ { G1_REG_DEC_CTRL4, 20, 0x3f }, { G1_REG_DEC_CTRL7, 24, 0x3f },
+ { G1_REG_DEC_CTRL7, 18, 0x3f }, { G1_REG_DEC_CTRL7, 12, 0x3f },
+ { G1_REG_DEC_CTRL7, 6, 0x3f }, { G1_REG_DEC_CTRL7, 0, 0x3f },
+};
+
+/* Precision filter tap regs */
+static const struct hantro_reg vp8_dec_pred_bc_tap[8][4] = {
+ {
+ { G1_REG_PRED_FLT, 22, 0x3ff },
+ { G1_REG_PRED_FLT, 12, 0x3ff },
+ { G1_REG_PRED_FLT, 2, 0x3ff },
+ { G1_REG_REF_PIC(4), 22, 0x3ff },
+ },
+ {
+ { G1_REG_REF_PIC(4), 12, 0x3ff },
+ { G1_REG_REF_PIC(4), 2, 0x3ff },
+ { G1_REG_REF_PIC(5), 22, 0x3ff },
+ { G1_REG_REF_PIC(5), 12, 0x3ff },
+ },
+ {
+ { G1_REG_REF_PIC(5), 2, 0x3ff },
+ { G1_REG_REF_PIC(6), 22, 0x3ff },
+ { G1_REG_REF_PIC(6), 12, 0x3ff },
+ { G1_REG_REF_PIC(6), 2, 0x3ff },
+ },
+ {
+ { G1_REG_REF_PIC(7), 22, 0x3ff },
+ { G1_REG_REF_PIC(7), 12, 0x3ff },
+ { G1_REG_REF_PIC(7), 2, 0x3ff },
+ { G1_REG_LT_REF, 22, 0x3ff },
+ },
+ {
+ { G1_REG_LT_REF, 12, 0x3ff },
+ { G1_REG_LT_REF, 2, 0x3ff },
+ { G1_REG_VALID_REF, 22, 0x3ff },
+ { G1_REG_VALID_REF, 12, 0x3ff },
+ },
+ {
+ { G1_REG_VALID_REF, 2, 0x3ff },
+ { G1_REG_BD_REF_PIC(0), 22, 0x3ff },
+ { G1_REG_BD_REF_PIC(0), 12, 0x3ff },
+ { G1_REG_BD_REF_PIC(0), 2, 0x3ff },
+ },
+ {
+ { G1_REG_BD_REF_PIC(1), 22, 0x3ff },
+ { G1_REG_BD_REF_PIC(1), 12, 0x3ff },
+ { G1_REG_BD_REF_PIC(1), 2, 0x3ff },
+ { G1_REG_BD_REF_PIC(2), 22, 0x3ff },
+ },
+ {
+ { G1_REG_BD_REF_PIC(2), 12, 0x3ff },
+ { G1_REG_BD_REF_PIC(2), 2, 0x3ff },
+ { G1_REG_BD_REF_PIC(3), 22, 0x3ff },
+ { G1_REG_BD_REF_PIC(3), 12, 0x3ff },
+ },
+};
+
+/*
+ * Set loop filters
+ */
+static void cfg_lf(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr)
+{
+ const struct v4l2_vp8_segment *seg = &hdr->segment;
+ const struct v4l2_vp8_loop_filter *lf = &hdr->lf;
+ struct hantro_dev *vpu = ctx->dev;
+ unsigned int i;
+ u32 reg;
+
+ if (!(seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED)) {
+ hantro_reg_write(vpu, &vp8_dec_lf_level[0], lf->level);
+ } else if (seg->flags & V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE) {
+ for (i = 0; i < 4; i++) {
+ u32 lf_level = clamp(lf->level + seg->lf_update[i],
+ 0, 63);
+
+ hantro_reg_write(vpu, &vp8_dec_lf_level[i], lf_level);
+ }
+ } else {
+ for (i = 0; i < 4; i++)
+ hantro_reg_write(vpu, &vp8_dec_lf_level[i],
+ seg->lf_update[i]);
+ }
+
+ reg = G1_REG_REF_PIC_FILT_SHARPNESS(lf->sharpness_level);
+ if (lf->flags & V4L2_VP8_LF_FILTER_TYPE_SIMPLE)
+ reg |= G1_REG_REF_PIC_FILT_TYPE_E;
+ vdpu_write_relaxed(vpu, reg, G1_REG_REF_PIC(0));
+
+ if (lf->flags & V4L2_VP8_LF_ADJ_ENABLE) {
+ for (i = 0; i < 4; i++) {
+ hantro_reg_write(vpu, &vp8_dec_mb_adj[i],
+ lf->mb_mode_delta[i]);
+ hantro_reg_write(vpu, &vp8_dec_ref_adj[i],
+ lf->ref_frm_delta[i]);
+ }
+ }
+}
+
+/*
+ * Set quantization parameters
+ */
+static void cfg_qp(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr)
+{
+ const struct v4l2_vp8_quantization *q = &hdr->quant;
+ const struct v4l2_vp8_segment *seg = &hdr->segment;
+ struct hantro_dev *vpu = ctx->dev;
+ unsigned int i;
+
+ if (!(seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED)) {
+ hantro_reg_write(vpu, &vp8_dec_quant[0], q->y_ac_qi);
+ } else if (seg->flags & V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE) {
+ for (i = 0; i < 4; i++) {
+ u32 quant = clamp(q->y_ac_qi + seg->quant_update[i],
+ 0, 127);
+
+ hantro_reg_write(vpu, &vp8_dec_quant[i], quant);
+ }
+ } else {
+ for (i = 0; i < 4; i++)
+ hantro_reg_write(vpu, &vp8_dec_quant[i],
+ seg->quant_update[i]);
+ }
+
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[0], q->y_dc_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[1], q->y2_dc_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[2], q->y2_ac_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[3], q->uv_dc_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[4], q->uv_ac_delta);
+}
+
+/*
+ * set control partition and DCT partition regs
+ *
+ * VP8 frame stream data layout:
+ *
+ * first_part_size parttion_sizes[0]
+ * ^ ^
+ * src_dma | |
+ * ^ +--------+------+ +-----+-----+
+ * | | control part | | |
+ * +--------+----------------+------------------+-----------+-----+-----------+
+ * | tag 3B | extra 7B | hdr | mb_data | DCT sz | DCT part0 | ... | DCT partn |
+ * +--------+-----------------------------------+-----------+-----+-----------+
+ * | | | |
+ * v +----+---+ v
+ * mb_start | src_dma_end
+ * v
+ * DCT size part
+ * (num_dct-1)*3B
+ * Note:
+ * 1. only key-frames have extra 7-bytes
+ * 2. all offsets are base on src_dma
+ * 3. number of DCT parts is 1, 2, 4 or 8
+ * 4. the addresses set to the VPU must be 64-bits aligned
+ */
+static void cfg_parts(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_src;
+ u32 first_part_offset = V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) ? 10 : 3;
+ u32 mb_size, mb_offset_bytes, mb_offset_bits, mb_start_bits;
+ u32 dct_size_part_size, dct_part_offset;
+ struct hantro_reg reg;
+ dma_addr_t src_dma;
+ u32 dct_part_total_len = 0;
+ u32 count = 0;
+ unsigned int i;
+
+ vb2_src = hantro_get_src_buf(ctx);
+ src_dma = vb2_dma_contig_plane_dma_addr(&vb2_src->vb2_buf, 0);
+
+ /*
+ * Calculate control partition mb data info
+ * @first_part_header_bits: bits offset of mb data from first
+ * part start pos
+ * @mb_offset_bits: bits offset of mb data from src_dma
+ * base addr
+ * @mb_offset_byte: bytes offset of mb data from src_dma
+ * base addr
+ * @mb_start_bits: bits offset of mb data from mb data
+ * 64bits alignment addr
+ */
+ mb_offset_bits = first_part_offset * 8 +
+ hdr->first_part_header_bits + 8;
+ mb_offset_bytes = mb_offset_bits / 8;
+ mb_start_bits = mb_offset_bits -
+ (mb_offset_bytes & (~DEC_8190_ALIGN_MASK)) * 8;
+ mb_size = hdr->first_part_size -
+ (mb_offset_bytes - first_part_offset) +
+ (mb_offset_bytes & DEC_8190_ALIGN_MASK);
+
+ /* Macroblock data aligned base addr */
+ vdpu_write_relaxed(vpu, (mb_offset_bytes & (~DEC_8190_ALIGN_MASK))
+ + src_dma, G1_REG_ADDR_REF(13));
+
+ /* Macroblock data start bits */
+ reg.base = G1_REG_DEC_CTRL2;
+ reg.mask = 0x3f;
+ reg.shift = 18;
+ hantro_reg_write(vpu, &reg, mb_start_bits);
+
+ /* Macroblock aligned data length */
+ reg.base = G1_REG_DEC_CTRL6;
+ reg.mask = 0x3fffff;
+ reg.shift = 0;
+ hantro_reg_write(vpu, &reg, mb_size + 1);
+
+ /*
+ * Calculate DCT partition info
+ * @dct_size_part_size: Containing sizes of DCT part, every DCT part
+ * has 3 bytes to store its size, except the last
+ * DCT part
+ * @dct_part_offset: bytes offset of DCT parts from src_dma base addr
+ * @dct_part_total_len: total size of all DCT parts
+ */
+ dct_size_part_size = (hdr->num_dct_parts - 1) * 3;
+ dct_part_offset = first_part_offset + hdr->first_part_size;
+ for (i = 0; i < hdr->num_dct_parts; i++)
+ dct_part_total_len += hdr->dct_part_sizes[i];
+ dct_part_total_len += dct_size_part_size;
+ dct_part_total_len += (dct_part_offset & DEC_8190_ALIGN_MASK);
+
+ /* Number of DCT partitions */
+ reg.base = G1_REG_DEC_CTRL6;
+ reg.mask = 0xf;
+ reg.shift = 24;
+ hantro_reg_write(vpu, &reg, hdr->num_dct_parts - 1);
+
+ /* DCT partition length */
+ vdpu_write_relaxed(vpu,
+ G1_REG_DEC_CTRL3_STREAM_LEN(dct_part_total_len),
+ G1_REG_DEC_CTRL3);
+
+ /* DCT partitions base address */
+ for (i = 0; i < hdr->num_dct_parts; i++) {
+ u32 byte_offset = dct_part_offset + dct_size_part_size + count;
+ u32 base_addr = byte_offset + src_dma;
+
+ hantro_reg_write(vpu, &vp8_dec_dct_base[i],
+ base_addr & (~DEC_8190_ALIGN_MASK));
+
+ hantro_reg_write(vpu, &vp8_dec_dct_start_bits[i],
+ (byte_offset & DEC_8190_ALIGN_MASK) * 8);
+
+ count += hdr->dct_part_sizes[i];
+ }
+}
+
+/*
+ * prediction filter taps
+ * normal 6-tap filters
+ */
+static void cfg_tap(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_reg reg;
+ u32 val = 0;
+ int i, j;
+
+ reg.base = G1_REG_BD_REF_PIC(3);
+ reg.mask = 0xf;
+
+ if ((hdr->version & 0x03) != 0)
+ return; /* Tap filter not used. */
+
+ for (i = 0; i < 8; i++) {
+ val = (hantro_vp8_dec_mc_filter[i][0] << 2) |
+ hantro_vp8_dec_mc_filter[i][5];
+
+ for (j = 0; j < 4; j++)
+ hantro_reg_write(vpu, &vp8_dec_pred_bc_tap[i][j],
+ hantro_vp8_dec_mc_filter[i][j + 1]);
+
+ switch (i) {
+ case 2:
+ reg.shift = 8;
+ break;
+ case 4:
+ reg.shift = 4;
+ break;
+ case 6:
+ reg.shift = 0;
+ break;
+ default:
+ continue;
+ }
+
+ hantro_reg_write(vpu, &reg, val);
+ }
+}
+
+static void cfg_ref(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr,
+ struct vb2_v4l2_buffer *vb2_dst)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ dma_addr_t ref;
+
+
+ ref = hantro_get_ref(ctx, hdr->last_frame_ts);
+ if (!ref) {
+ vpu_debug(0, "failed to find last frame ts=%llu\n",
+ hdr->last_frame_ts);
+ ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ }
+ vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(0));
+
+ ref = hantro_get_ref(ctx, hdr->golden_frame_ts);
+ if (!ref && hdr->golden_frame_ts)
+ vpu_debug(0, "failed to find golden frame ts=%llu\n",
+ hdr->golden_frame_ts);
+ if (!ref)
+ ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ if (hdr->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_GOLDEN)
+ ref |= G1_REG_ADDR_REF_TOPC_E;
+ vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(4));
+
+ ref = hantro_get_ref(ctx, hdr->alt_frame_ts);
+ if (!ref && hdr->alt_frame_ts)
+ vpu_debug(0, "failed to find alt frame ts=%llu\n",
+ hdr->alt_frame_ts);
+ if (!ref)
+ ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ if (hdr->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_ALT)
+ ref |= G1_REG_ADDR_REF_TOPC_E;
+ vdpu_write_relaxed(vpu, ref, G1_REG_ADDR_REF(5));
+}
+
+static void cfg_buffers(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr,
+ struct vb2_v4l2_buffer *vb2_dst)
+{
+ const struct v4l2_vp8_segment *seg = &hdr->segment;
+ struct hantro_dev *vpu = ctx->dev;
+ dma_addr_t dst_dma;
+ u32 reg;
+
+ /* Set probability table buffer address */
+ vdpu_write_relaxed(vpu, ctx->vp8_dec.prob_tbl.dma,
+ G1_REG_ADDR_QTABLE);
+
+ /* Set segment map address */
+ reg = G1_REG_FWD_PIC1_SEGMENT_BASE(ctx->vp8_dec.segment_map.dma);
+ if (seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED) {
+ reg |= G1_REG_FWD_PIC1_SEGMENT_E;
+ if (seg->flags & V4L2_VP8_SEGMENT_FLAG_UPDATE_MAP)
+ reg |= G1_REG_FWD_PIC1_SEGMENT_UPD_E;
+ }
+ vdpu_write_relaxed(vpu, reg, G1_REG_FWD_PIC(0));
+
+ dst_dma = hantro_get_dec_buf_addr(ctx, &vb2_dst->vb2_buf);
+ vdpu_write_relaxed(vpu, dst_dma, G1_REG_ADDR_DST);
+}
+
+int hantro_g1_vp8_dec_run(struct hantro_ctx *ctx)
+{
+ const struct v4l2_ctrl_vp8_frame *hdr;
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_dst;
+ size_t height = ctx->dst_fmt.height;
+ size_t width = ctx->dst_fmt.width;
+ u32 mb_width, mb_height;
+ u32 reg;
+
+ hantro_start_prepare_run(ctx);
+
+ hdr = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_VP8_FRAME);
+ if (WARN_ON(!hdr))
+ return -EINVAL;
+
+ /* Reset segment_map buffer in keyframe */
+ if (V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) && ctx->vp8_dec.segment_map.cpu)
+ memset(ctx->vp8_dec.segment_map.cpu, 0,
+ ctx->vp8_dec.segment_map.size);
+
+ hantro_vp8_prob_update(ctx, hdr);
+
+ reg = G1_REG_CONFIG_DEC_TIMEOUT_E |
+ G1_REG_CONFIG_DEC_STRENDIAN_E |
+ G1_REG_CONFIG_DEC_INSWAP32_E |
+ G1_REG_CONFIG_DEC_STRSWAP32_E |
+ G1_REG_CONFIG_DEC_OUTSWAP32_E |
+ G1_REG_CONFIG_DEC_CLK_GATE_E |
+ G1_REG_CONFIG_DEC_IN_ENDIAN |
+ G1_REG_CONFIG_DEC_OUT_ENDIAN |
+ G1_REG_CONFIG_DEC_MAX_BURST(16);
+ vdpu_write_relaxed(vpu, reg, G1_REG_CONFIG);
+
+ reg = G1_REG_DEC_CTRL0_DEC_MODE(10) |
+ G1_REG_DEC_CTRL0_DEC_AXI_AUTO;
+ if (!V4L2_VP8_FRAME_IS_KEY_FRAME(hdr))
+ reg |= G1_REG_DEC_CTRL0_PIC_INTER_E;
+ if (!(hdr->flags & V4L2_VP8_FRAME_FLAG_MB_NO_SKIP_COEFF))
+ reg |= G1_REG_DEC_CTRL0_SKIP_MODE;
+ if (hdr->lf.level == 0)
+ reg |= G1_REG_DEC_CTRL0_FILTERING_DIS;
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL0);
+
+ /* Frame dimensions */
+ mb_width = MB_WIDTH(width);
+ mb_height = MB_HEIGHT(height);
+ reg = G1_REG_DEC_CTRL1_PIC_MB_WIDTH(mb_width) |
+ G1_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(mb_height) |
+ G1_REG_DEC_CTRL1_PIC_MB_W_EXT(mb_width >> 9) |
+ G1_REG_DEC_CTRL1_PIC_MB_H_EXT(mb_height >> 8);
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL1);
+
+ /* Boolean decoder */
+ reg = G1_REG_DEC_CTRL2_BOOLEAN_RANGE(hdr->coder_state.range)
+ | G1_REG_DEC_CTRL2_BOOLEAN_VALUE(hdr->coder_state.value);
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL2);
+
+ reg = 0;
+ if (hdr->version != 3)
+ reg |= G1_REG_DEC_CTRL4_VC1_HEIGHT_EXT;
+ if (hdr->version & 0x3)
+ reg |= G1_REG_DEC_CTRL4_BILIN_MC_E;
+ vdpu_write_relaxed(vpu, reg, G1_REG_DEC_CTRL4);
+
+ cfg_lf(ctx, hdr);
+ cfg_qp(ctx, hdr);
+ cfg_parts(ctx, hdr);
+ cfg_tap(ctx, hdr);
+
+ vb2_dst = hantro_get_dst_buf(ctx);
+ cfg_ref(ctx, hdr, vb2_dst);
+ cfg_buffers(ctx, hdr, vb2_dst);
+
+ hantro_end_prepare_run(ctx);
+
+ vdpu_write(vpu, G1_REG_INTERRUPT_DEC_E, G1_REG_INTERRUPT);
+
+ return 0;
+}
diff --git a/drivers/media/platform/verisilicon/hantro_g2.c b/drivers/media/platform/verisilicon/hantro_g2.c
new file mode 100644
index 000000000000..ee5f14c5f8f2
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_g2.c
@@ -0,0 +1,44 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2021 Collabora Ltd, Andrzej Pietrasiewicz <andrzej.p@collabora.com>
+ */
+
+#include "hantro_hw.h"
+#include "hantro_g2_regs.h"
+
+void hantro_g2_check_idle(struct hantro_dev *vpu)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ u32 status;
+
+ /* Make sure the VPU is idle */
+ status = vdpu_read(vpu, G2_REG_INTERRUPT);
+ if (status & G2_REG_INTERRUPT_DEC_E) {
+ dev_warn(vpu->dev, "device still running, aborting");
+ status |= G2_REG_INTERRUPT_DEC_ABORT_E | G2_REG_INTERRUPT_DEC_IRQ_DIS;
+ vdpu_write(vpu, status, G2_REG_INTERRUPT);
+ }
+ }
+}
+
+irqreturn_t hantro_g2_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vdpu_read(vpu, G2_REG_INTERRUPT);
+ state = (status & G2_REG_INTERRUPT_DEC_RDY_INT) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vdpu_write(vpu, 0, G2_REG_INTERRUPT);
+ vdpu_write(vpu, G2_REG_CONFIG_DEC_CLK_GATE_E, G2_REG_CONFIG);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
new file mode 100644
index 000000000000..233ecd863d5f
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c
@@ -0,0 +1,629 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU HEVC codec driver
+ *
+ * Copyright (C) 2020 Safran Passenger Innovations LLC
+ */
+
+#include "hantro_hw.h"
+#include "hantro_g2_regs.h"
+
+#define G2_ALIGN 16
+
+static size_t hantro_hevc_chroma_offset(struct hantro_ctx *ctx)
+{
+ return ctx->dst_fmt.width * ctx->dst_fmt.height;
+}
+
+static size_t hantro_hevc_motion_vectors_offset(struct hantro_ctx *ctx)
+{
+ size_t cr_offset = hantro_hevc_chroma_offset(ctx);
+
+ return ALIGN((cr_offset * 3) / 2, G2_ALIGN);
+}
+
+static void prepare_tile_info_buffer(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+ u16 *p = (u16 *)((u8 *)ctx->hevc_dec.tile_sizes.cpu);
+ unsigned int num_tile_rows = pps->num_tile_rows_minus1 + 1;
+ unsigned int num_tile_cols = pps->num_tile_columns_minus1 + 1;
+ unsigned int pic_width_in_ctbs, pic_height_in_ctbs;
+ unsigned int max_log2_ctb_size, ctb_size;
+ bool tiles_enabled, uniform_spacing;
+ u32 no_chroma = 0;
+
+ tiles_enabled = !!(pps->flags & V4L2_HEVC_PPS_FLAG_TILES_ENABLED);
+ uniform_spacing = !!(pps->flags & V4L2_HEVC_PPS_FLAG_UNIFORM_SPACING);
+
+ hantro_reg_write(vpu, &g2_tile_e, tiles_enabled);
+
+ max_log2_ctb_size = sps->log2_min_luma_coding_block_size_minus3 + 3 +
+ sps->log2_diff_max_min_luma_coding_block_size;
+ pic_width_in_ctbs = (sps->pic_width_in_luma_samples +
+ (1 << max_log2_ctb_size) - 1) >> max_log2_ctb_size;
+ pic_height_in_ctbs = (sps->pic_height_in_luma_samples + (1 << max_log2_ctb_size) - 1)
+ >> max_log2_ctb_size;
+ ctb_size = 1 << max_log2_ctb_size;
+
+ vpu_debug(1, "Preparing tile sizes buffer for %dx%d CTBs (CTB size %d)\n",
+ pic_width_in_ctbs, pic_height_in_ctbs, ctb_size);
+
+ if (tiles_enabled) {
+ unsigned int i, j, h;
+
+ vpu_debug(1, "Tiles enabled! %dx%d\n", num_tile_cols, num_tile_rows);
+
+ hantro_reg_write(vpu, &g2_num_tile_rows, num_tile_rows);
+ hantro_reg_write(vpu, &g2_num_tile_cols, num_tile_cols);
+
+ /* write width + height for each tile in pic */
+ if (!uniform_spacing) {
+ u32 tmp_w = 0, tmp_h = 0;
+
+ for (i = 0; i < num_tile_rows; i++) {
+ if (i == num_tile_rows - 1)
+ h = pic_height_in_ctbs - tmp_h;
+ else
+ h = pps->row_height_minus1[i] + 1;
+ tmp_h += h;
+ if (i == 0 && h == 1 && ctb_size == 16)
+ no_chroma = 1;
+ for (j = 0, tmp_w = 0; j < num_tile_cols - 1; j++) {
+ tmp_w += pps->column_width_minus1[j] + 1;
+ *p++ = pps->column_width_minus1[j] + 1;
+ *p++ = h;
+ if (i == 0 && h == 1 && ctb_size == 16)
+ no_chroma = 1;
+ }
+ /* last column */
+ *p++ = pic_width_in_ctbs - tmp_w;
+ *p++ = h;
+ }
+ } else { /* uniform spacing */
+ u32 tmp, prev_h, prev_w;
+
+ for (i = 0, prev_h = 0; i < num_tile_rows; i++) {
+ tmp = (i + 1) * pic_height_in_ctbs / num_tile_rows;
+ h = tmp - prev_h;
+ prev_h = tmp;
+ if (i == 0 && h == 1 && ctb_size == 16)
+ no_chroma = 1;
+ for (j = 0, prev_w = 0; j < num_tile_cols; j++) {
+ tmp = (j + 1) * pic_width_in_ctbs / num_tile_cols;
+ *p++ = tmp - prev_w;
+ *p++ = h;
+ if (j == 0 &&
+ (pps->column_width_minus1[0] + 1) == 1 &&
+ ctb_size == 16)
+ no_chroma = 1;
+ prev_w = tmp;
+ }
+ }
+ }
+ } else {
+ hantro_reg_write(vpu, &g2_num_tile_rows, 1);
+ hantro_reg_write(vpu, &g2_num_tile_cols, 1);
+
+ /* There's one tile, with dimensions equal to pic size. */
+ p[0] = pic_width_in_ctbs;
+ p[1] = pic_height_in_ctbs;
+ }
+
+ if (no_chroma)
+ vpu_debug(1, "%s: no chroma!\n", __func__);
+}
+
+static int compute_header_skip_length(struct hantro_ctx *ctx)
+{
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+ const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps;
+ int skip = 0;
+
+ if (pps->flags & V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT)
+ /* size of pic_output_flag */
+ skip++;
+
+ if (sps->flags & V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE)
+ /* size of pic_order_cnt_lsb */
+ skip += 2;
+
+ if (!(decode_params->flags & V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC)) {
+ /* size of pic_order_cnt_lsb */
+ skip += sps->log2_max_pic_order_cnt_lsb_minus4 + 4;
+
+ /* size of short_term_ref_pic_set_sps_flag */
+ skip++;
+
+ if (decode_params->short_term_ref_pic_set_size)
+ /* size of st_ref_pic_set( num_short_term_ref_pic_sets ) */
+ skip += decode_params->short_term_ref_pic_set_size;
+ else if (sps->num_short_term_ref_pic_sets > 1)
+ skip += fls(sps->num_short_term_ref_pic_sets - 1);
+
+ skip += decode_params->long_term_ref_pic_set_size;
+ }
+
+ return skip;
+}
+
+static void set_params(struct hantro_ctx *ctx)
+{
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+ const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps;
+ const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params;
+ struct hantro_dev *vpu = ctx->dev;
+ u32 min_log2_cb_size, max_log2_ctb_size, min_cb_size, max_ctb_size;
+ u32 pic_width_in_min_cbs, pic_height_in_min_cbs;
+ u32 pic_width_aligned, pic_height_aligned;
+ u32 partial_ctb_x, partial_ctb_y;
+
+ hantro_reg_write(vpu, &g2_bit_depth_y_minus8, sps->bit_depth_luma_minus8);
+ hantro_reg_write(vpu, &g2_bit_depth_c_minus8, sps->bit_depth_chroma_minus8);
+
+ hantro_reg_write(vpu, &g2_output_8_bits, 0);
+
+ hantro_reg_write(vpu, &g2_hdr_skip_length, compute_header_skip_length(ctx));
+
+ min_log2_cb_size = sps->log2_min_luma_coding_block_size_minus3 + 3;
+ max_log2_ctb_size = min_log2_cb_size + sps->log2_diff_max_min_luma_coding_block_size;
+
+ hantro_reg_write(vpu, &g2_min_cb_size, min_log2_cb_size);
+ hantro_reg_write(vpu, &g2_max_cb_size, max_log2_ctb_size);
+
+ min_cb_size = 1 << min_log2_cb_size;
+ max_ctb_size = 1 << max_log2_ctb_size;
+
+ pic_width_in_min_cbs = sps->pic_width_in_luma_samples / min_cb_size;
+ pic_height_in_min_cbs = sps->pic_height_in_luma_samples / min_cb_size;
+ pic_width_aligned = ALIGN(sps->pic_width_in_luma_samples, max_ctb_size);
+ pic_height_aligned = ALIGN(sps->pic_height_in_luma_samples, max_ctb_size);
+
+ partial_ctb_x = !!(sps->pic_width_in_luma_samples != pic_width_aligned);
+ partial_ctb_y = !!(sps->pic_height_in_luma_samples != pic_height_aligned);
+
+ hantro_reg_write(vpu, &g2_partial_ctb_x, partial_ctb_x);
+ hantro_reg_write(vpu, &g2_partial_ctb_y, partial_ctb_y);
+
+ hantro_reg_write(vpu, &g2_pic_width_in_cbs, pic_width_in_min_cbs);
+ hantro_reg_write(vpu, &g2_pic_height_in_cbs, pic_height_in_min_cbs);
+
+ hantro_reg_write(vpu, &g2_pic_width_4x4,
+ (pic_width_in_min_cbs * min_cb_size) / 4);
+ hantro_reg_write(vpu, &g2_pic_height_4x4,
+ (pic_height_in_min_cbs * min_cb_size) / 4);
+
+ hantro_reg_write(vpu, &hevc_max_inter_hierdepth,
+ sps->max_transform_hierarchy_depth_inter);
+ hantro_reg_write(vpu, &hevc_max_intra_hierdepth,
+ sps->max_transform_hierarchy_depth_intra);
+ hantro_reg_write(vpu, &hevc_min_trb_size,
+ sps->log2_min_luma_transform_block_size_minus2 + 2);
+ hantro_reg_write(vpu, &hevc_max_trb_size,
+ sps->log2_min_luma_transform_block_size_minus2 + 2 +
+ sps->log2_diff_max_min_luma_transform_block_size);
+
+ hantro_reg_write(vpu, &g2_tempor_mvp_e,
+ !!(sps->flags & V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED) &&
+ !(decode_params->flags & V4L2_HEVC_DECODE_PARAM_FLAG_IDR_PIC));
+ hantro_reg_write(vpu, &g2_strong_smooth_e,
+ !!(sps->flags & V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED));
+ hantro_reg_write(vpu, &g2_asym_pred_e,
+ !!(sps->flags & V4L2_HEVC_SPS_FLAG_AMP_ENABLED));
+ hantro_reg_write(vpu, &g2_sao_e,
+ !!(sps->flags & V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET));
+ hantro_reg_write(vpu, &g2_sign_data_hide,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED));
+
+ if (pps->flags & V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED) {
+ hantro_reg_write(vpu, &g2_cu_qpd_e, 1);
+ hantro_reg_write(vpu, &g2_max_cu_qpd_depth, pps->diff_cu_qp_delta_depth);
+ } else {
+ hantro_reg_write(vpu, &g2_cu_qpd_e, 0);
+ hantro_reg_write(vpu, &g2_max_cu_qpd_depth, 0);
+ }
+
+ hantro_reg_write(vpu, &g2_cb_qp_offset, pps->pps_cb_qp_offset);
+ hantro_reg_write(vpu, &g2_cr_qp_offset, pps->pps_cr_qp_offset);
+
+ hantro_reg_write(vpu, &g2_filt_offset_beta, pps->pps_beta_offset_div2);
+ hantro_reg_write(vpu, &g2_filt_offset_tc, pps->pps_tc_offset_div2);
+ hantro_reg_write(vpu, &g2_slice_hdr_ext_e,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT));
+ hantro_reg_write(vpu, &g2_slice_hdr_ext_bits, pps->num_extra_slice_header_bits);
+ hantro_reg_write(vpu, &g2_slice_chqp_present,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT));
+ hantro_reg_write(vpu, &g2_weight_bipr_idc,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED));
+ hantro_reg_write(vpu, &g2_transq_bypass,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED));
+ hantro_reg_write(vpu, &g2_list_mod_e,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT));
+ hantro_reg_write(vpu, &g2_entropy_sync_e,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED));
+ hantro_reg_write(vpu, &g2_cabac_init_present,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT));
+ hantro_reg_write(vpu, &g2_idr_pic_e,
+ !!(decode_params->flags & V4L2_HEVC_DECODE_PARAM_FLAG_IRAP_PIC));
+ hantro_reg_write(vpu, &hevc_parallel_merge,
+ pps->log2_parallel_merge_level_minus2 + 2);
+ hantro_reg_write(vpu, &g2_pcm_filt_d,
+ !!(sps->flags & V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED));
+ hantro_reg_write(vpu, &g2_pcm_e,
+ !!(sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED));
+ if (sps->flags & V4L2_HEVC_SPS_FLAG_PCM_ENABLED) {
+ hantro_reg_write(vpu, &g2_max_pcm_size,
+ sps->log2_diff_max_min_pcm_luma_coding_block_size +
+ sps->log2_min_pcm_luma_coding_block_size_minus3 + 3);
+ hantro_reg_write(vpu, &g2_min_pcm_size,
+ sps->log2_min_pcm_luma_coding_block_size_minus3 + 3);
+ hantro_reg_write(vpu, &g2_bit_depth_pcm_y,
+ sps->pcm_sample_bit_depth_luma_minus1 + 1);
+ hantro_reg_write(vpu, &g2_bit_depth_pcm_c,
+ sps->pcm_sample_bit_depth_chroma_minus1 + 1);
+ } else {
+ hantro_reg_write(vpu, &g2_max_pcm_size, 0);
+ hantro_reg_write(vpu, &g2_min_pcm_size, 0);
+ hantro_reg_write(vpu, &g2_bit_depth_pcm_y, 0);
+ hantro_reg_write(vpu, &g2_bit_depth_pcm_c, 0);
+ }
+
+ hantro_reg_write(vpu, &g2_start_code_e, 1);
+ hantro_reg_write(vpu, &g2_init_qp, pps->init_qp_minus26 + 26);
+ hantro_reg_write(vpu, &g2_weight_pred_e,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED));
+ hantro_reg_write(vpu, &g2_cabac_init_present,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT));
+ hantro_reg_write(vpu, &g2_const_intra_e,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED));
+ hantro_reg_write(vpu, &g2_transform_skip,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED));
+ hantro_reg_write(vpu, &g2_out_filtering_dis,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER));
+ hantro_reg_write(vpu, &g2_filt_ctrl_pres,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT));
+ hantro_reg_write(vpu, &g2_dependent_slice,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT_ENABLED));
+ hantro_reg_write(vpu, &g2_filter_override,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED));
+ hantro_reg_write(vpu, &g2_refidx0_active,
+ pps->num_ref_idx_l0_default_active_minus1 + 1);
+ hantro_reg_write(vpu, &g2_refidx1_active,
+ pps->num_ref_idx_l1_default_active_minus1 + 1);
+ hantro_reg_write(vpu, &g2_apf_threshold, 8);
+}
+
+static void set_ref_pic_list(struct hantro_ctx *ctx)
+{
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ struct hantro_dev *vpu = ctx->dev;
+ const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params;
+ u32 list0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX] = {};
+ u32 list1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX] = {};
+ static const struct hantro_reg ref_pic_regs0[] = {
+ hevc_rlist_f0,
+ hevc_rlist_f1,
+ hevc_rlist_f2,
+ hevc_rlist_f3,
+ hevc_rlist_f4,
+ hevc_rlist_f5,
+ hevc_rlist_f6,
+ hevc_rlist_f7,
+ hevc_rlist_f8,
+ hevc_rlist_f9,
+ hevc_rlist_f10,
+ hevc_rlist_f11,
+ hevc_rlist_f12,
+ hevc_rlist_f13,
+ hevc_rlist_f14,
+ hevc_rlist_f15,
+ };
+ static const struct hantro_reg ref_pic_regs1[] = {
+ hevc_rlist_b0,
+ hevc_rlist_b1,
+ hevc_rlist_b2,
+ hevc_rlist_b3,
+ hevc_rlist_b4,
+ hevc_rlist_b5,
+ hevc_rlist_b6,
+ hevc_rlist_b7,
+ hevc_rlist_b8,
+ hevc_rlist_b9,
+ hevc_rlist_b10,
+ hevc_rlist_b11,
+ hevc_rlist_b12,
+ hevc_rlist_b13,
+ hevc_rlist_b14,
+ hevc_rlist_b15,
+ };
+ unsigned int i, j;
+
+ /* List 0 contains: short term before, short term after and long term */
+ j = 0;
+ for (i = 0; i < decode_params->num_poc_st_curr_before && j < ARRAY_SIZE(list0); i++)
+ list0[j++] = decode_params->poc_st_curr_before[i];
+ for (i = 0; i < decode_params->num_poc_st_curr_after && j < ARRAY_SIZE(list0); i++)
+ list0[j++] = decode_params->poc_st_curr_after[i];
+ for (i = 0; i < decode_params->num_poc_lt_curr && j < ARRAY_SIZE(list0); i++)
+ list0[j++] = decode_params->poc_lt_curr[i];
+
+ /* Fill the list, copying over and over */
+ i = 0;
+ while (j < ARRAY_SIZE(list0))
+ list0[j++] = list0[i++];
+
+ j = 0;
+ for (i = 0; i < decode_params->num_poc_st_curr_after && j < ARRAY_SIZE(list1); i++)
+ list1[j++] = decode_params->poc_st_curr_after[i];
+ for (i = 0; i < decode_params->num_poc_st_curr_before && j < ARRAY_SIZE(list1); i++)
+ list1[j++] = decode_params->poc_st_curr_before[i];
+ for (i = 0; i < decode_params->num_poc_lt_curr && j < ARRAY_SIZE(list1); i++)
+ list1[j++] = decode_params->poc_lt_curr[i];
+
+ i = 0;
+ while (j < ARRAY_SIZE(list1))
+ list1[j++] = list1[i++];
+
+ for (i = 0; i < V4L2_HEVC_DPB_ENTRIES_NUM_MAX; i++) {
+ hantro_reg_write(vpu, &ref_pic_regs0[i], list0[i]);
+ hantro_reg_write(vpu, &ref_pic_regs1[i], list1[i]);
+ }
+}
+
+static int set_ref(struct hantro_ctx *ctx)
+{
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps;
+ const struct v4l2_ctrl_hevc_decode_params *decode_params = ctrls->decode_params;
+ const struct v4l2_hevc_dpb_entry *dpb = decode_params->dpb;
+ dma_addr_t luma_addr, chroma_addr, mv_addr = 0;
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_dst;
+ struct hantro_decoded_buffer *dst;
+ size_t cr_offset = hantro_hevc_chroma_offset(ctx);
+ size_t mv_offset = hantro_hevc_motion_vectors_offset(ctx);
+ u32 max_ref_frames;
+ u16 dpb_longterm_e;
+ static const struct hantro_reg cur_poc[] = {
+ hevc_cur_poc_00,
+ hevc_cur_poc_01,
+ hevc_cur_poc_02,
+ hevc_cur_poc_03,
+ hevc_cur_poc_04,
+ hevc_cur_poc_05,
+ hevc_cur_poc_06,
+ hevc_cur_poc_07,
+ hevc_cur_poc_08,
+ hevc_cur_poc_09,
+ hevc_cur_poc_10,
+ hevc_cur_poc_11,
+ hevc_cur_poc_12,
+ hevc_cur_poc_13,
+ hevc_cur_poc_14,
+ hevc_cur_poc_15,
+ };
+ unsigned int i;
+
+ max_ref_frames = decode_params->num_poc_lt_curr +
+ decode_params->num_poc_st_curr_before +
+ decode_params->num_poc_st_curr_after;
+ /*
+ * Set max_ref_frames to non-zero to avoid HW hang when decoding
+ * badly marked I-frames.
+ */
+ max_ref_frames = max_ref_frames ? max_ref_frames : 1;
+ hantro_reg_write(vpu, &g2_num_ref_frames, max_ref_frames);
+ hantro_reg_write(vpu, &g2_filter_over_slices,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED));
+ hantro_reg_write(vpu, &g2_filter_over_tiles,
+ !!(pps->flags & V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED));
+
+ /*
+ * Write POC count diff from current pic.
+ */
+ for (i = 0; i < decode_params->num_active_dpb_entries && i < ARRAY_SIZE(cur_poc); i++) {
+ char poc_diff = decode_params->pic_order_cnt_val - dpb[i].pic_order_cnt_val;
+
+ hantro_reg_write(vpu, &cur_poc[i], poc_diff);
+ }
+
+ if (i < ARRAY_SIZE(cur_poc)) {
+ /*
+ * After the references, fill one entry pointing to itself,
+ * i.e. difference is zero.
+ */
+ hantro_reg_write(vpu, &cur_poc[i], 0);
+ i++;
+ }
+
+ /* Fill the rest with the current picture */
+ for (; i < ARRAY_SIZE(cur_poc); i++)
+ hantro_reg_write(vpu, &cur_poc[i], decode_params->pic_order_cnt_val);
+
+ set_ref_pic_list(ctx);
+
+ /* We will only keep the reference pictures that are still used */
+ hantro_hevc_ref_init(ctx);
+
+ /* Set up addresses of DPB buffers */
+ dpb_longterm_e = 0;
+ for (i = 0; i < decode_params->num_active_dpb_entries &&
+ i < (V4L2_HEVC_DPB_ENTRIES_NUM_MAX - 1); i++) {
+ luma_addr = hantro_hevc_get_ref_buf(ctx, dpb[i].pic_order_cnt_val);
+ if (!luma_addr)
+ return -ENOMEM;
+
+ chroma_addr = luma_addr + cr_offset;
+ mv_addr = luma_addr + mv_offset;
+
+ if (dpb[i].flags & V4L2_HEVC_DPB_ENTRY_LONG_TERM_REFERENCE)
+ dpb_longterm_e |= BIT(V4L2_HEVC_DPB_ENTRIES_NUM_MAX - 1 - i);
+
+ hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), luma_addr);
+ hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), chroma_addr);
+ hantro_write_addr(vpu, G2_REF_MV_ADDR(i), mv_addr);
+ }
+
+ vb2_dst = hantro_get_dst_buf(ctx);
+ dst = vb2_to_hantro_decoded_buf(&vb2_dst->vb2_buf);
+ luma_addr = hantro_get_dec_buf_addr(ctx, &dst->base.vb.vb2_buf);
+ if (!luma_addr)
+ return -ENOMEM;
+
+ if (hantro_hevc_add_ref_buf(ctx, decode_params->pic_order_cnt_val, luma_addr))
+ return -EINVAL;
+
+ chroma_addr = luma_addr + cr_offset;
+ mv_addr = luma_addr + mv_offset;
+
+ hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), luma_addr);
+ hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), chroma_addr);
+ hantro_write_addr(vpu, G2_REF_MV_ADDR(i++), mv_addr);
+
+ hantro_write_addr(vpu, G2_OUT_LUMA_ADDR, luma_addr);
+ hantro_write_addr(vpu, G2_OUT_CHROMA_ADDR, chroma_addr);
+ hantro_write_addr(vpu, G2_OUT_MV_ADDR, mv_addr);
+
+ for (; i < V4L2_HEVC_DPB_ENTRIES_NUM_MAX; i++) {
+ hantro_write_addr(vpu, G2_REF_LUMA_ADDR(i), 0);
+ hantro_write_addr(vpu, G2_REF_CHROMA_ADDR(i), 0);
+ hantro_write_addr(vpu, G2_REF_MV_ADDR(i), 0);
+ }
+
+ hantro_reg_write(vpu, &g2_refer_lterm_e, dpb_longterm_e);
+
+ return 0;
+}
+
+static void set_buffers(struct hantro_ctx *ctx)
+{
+ struct vb2_v4l2_buffer *src_buf;
+ struct hantro_dev *vpu = ctx->dev;
+ dma_addr_t src_dma;
+ u32 src_len, src_buf_len;
+
+ src_buf = hantro_get_src_buf(ctx);
+
+ /* Source (stream) buffer. */
+ src_dma = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ src_len = vb2_get_plane_payload(&src_buf->vb2_buf, 0);
+ src_buf_len = vb2_plane_size(&src_buf->vb2_buf, 0);
+
+ hantro_write_addr(vpu, G2_STREAM_ADDR, src_dma);
+ hantro_reg_write(vpu, &g2_stream_len, src_len);
+ hantro_reg_write(vpu, &g2_strm_buffer_len, src_buf_len);
+ hantro_reg_write(vpu, &g2_strm_start_offset, 0);
+ hantro_reg_write(vpu, &g2_write_mvs_e, 1);
+
+ hantro_write_addr(vpu, G2_TILE_SIZES_ADDR, ctx->hevc_dec.tile_sizes.dma);
+ hantro_write_addr(vpu, G2_TILE_FILTER_ADDR, ctx->hevc_dec.tile_filter.dma);
+ hantro_write_addr(vpu, G2_TILE_SAO_ADDR, ctx->hevc_dec.tile_sao.dma);
+ hantro_write_addr(vpu, G2_TILE_BSD_ADDR, ctx->hevc_dec.tile_bsd.dma);
+}
+
+static void prepare_scaling_list_buffer(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_scaling_matrix *sc = ctrls->scaling;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+ u8 *p = ((u8 *)ctx->hevc_dec.scaling_lists.cpu);
+ unsigned int scaling_list_enabled;
+ unsigned int i, j, k;
+
+ scaling_list_enabled = !!(sps->flags & V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED);
+ hantro_reg_write(vpu, &g2_scaling_list_e, scaling_list_enabled);
+
+ if (!scaling_list_enabled)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(sc->scaling_list_dc_coef_16x16); i++)
+ *p++ = sc->scaling_list_dc_coef_16x16[i];
+
+ for (i = 0; i < ARRAY_SIZE(sc->scaling_list_dc_coef_32x32); i++)
+ *p++ = sc->scaling_list_dc_coef_32x32[i];
+
+ /* 128-bit boundary */
+ p += 8;
+
+ /* write scaling lists column by column */
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 4; j++)
+ for (k = 0; k < 4; k++)
+ *p++ = sc->scaling_list_4x4[i][4 * k + j];
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k++)
+ *p++ = sc->scaling_list_8x8[i][8 * k + j];
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k++)
+ *p++ = sc->scaling_list_16x16[i][8 * k + j];
+
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k++)
+ *p++ = sc->scaling_list_32x32[i][8 * k + j];
+
+ hantro_write_addr(vpu, G2_HEVC_SCALING_LIST_ADDR, ctx->hevc_dec.scaling_lists.dma);
+}
+
+int hantro_g2_hevc_dec_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ int ret;
+
+ hantro_g2_check_idle(vpu);
+
+ /* Prepare HEVC decoder context. */
+ ret = hantro_hevc_dec_prepare_run(ctx);
+ if (ret)
+ return ret;
+
+ /* Configure hardware registers. */
+ set_params(ctx);
+
+ /* set reference pictures */
+ ret = set_ref(ctx);
+ if (ret)
+ return ret;
+
+ set_buffers(ctx);
+ prepare_tile_info_buffer(ctx);
+
+ prepare_scaling_list_buffer(ctx);
+
+ hantro_end_prepare_run(ctx);
+
+ hantro_reg_write(vpu, &g2_mode, HEVC_DEC_MODE);
+ hantro_reg_write(vpu, &g2_clk_gate_e, 1);
+
+ /* Don't disable output */
+ hantro_reg_write(vpu, &g2_out_dis, 0);
+
+ /* Don't compress buffers */
+ hantro_reg_write(vpu, &g2_ref_compress_bypass, 1);
+
+ /* Bus width and max burst */
+ hantro_reg_write(vpu, &g2_buswidth, BUS_WIDTH_128);
+ hantro_reg_write(vpu, &g2_max_burst, 16);
+
+ /* Swap */
+ hantro_reg_write(vpu, &g2_strm_swap, 0xf);
+ hantro_reg_write(vpu, &g2_dirmv_swap, 0xf);
+ hantro_reg_write(vpu, &g2_compress_swap, 0xf);
+
+ /* Start decoding! */
+ vdpu_write(vpu, G2_REG_INTERRUPT_DEC_E, G2_REG_INTERRUPT);
+
+ return 0;
+}
diff --git a/drivers/media/platform/verisilicon/hantro_g2_regs.h b/drivers/media/platform/verisilicon/hantro_g2_regs.h
new file mode 100644
index 000000000000..82606783591a
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_g2_regs.h
@@ -0,0 +1,325 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021, Collabora
+ *
+ * Author: Benjamin Gaignard <benjamin.gaignard@collabora.com>
+ */
+
+#ifndef HANTRO_G2_REGS_H_
+#define HANTRO_G2_REGS_H_
+
+#include "hantro.h"
+
+#define G2_SWREG(nr) ((nr) * 4)
+
+#define G2_DEC_REG(b, s, m) \
+ ((const struct hantro_reg) { \
+ .base = G2_SWREG(b), \
+ .shift = s, \
+ .mask = m, \
+ })
+
+#define G2_REG_VERSION G2_SWREG(0)
+
+#define G2_REG_INTERRUPT G2_SWREG(1)
+#define G2_REG_INTERRUPT_DEC_RDY_INT BIT(12)
+#define G2_REG_INTERRUPT_DEC_ABORT_E BIT(5)
+#define G2_REG_INTERRUPT_DEC_IRQ_DIS BIT(4)
+#define G2_REG_INTERRUPT_DEC_E BIT(0)
+
+#define HEVC_DEC_MODE 0xc
+#define VP9_DEC_MODE 0xd
+
+#define BUS_WIDTH_32 0
+#define BUS_WIDTH_64 1
+#define BUS_WIDTH_128 2
+#define BUS_WIDTH_256 3
+
+#define g2_strm_swap G2_DEC_REG(2, 28, 0xf)
+#define g2_strm_swap_old G2_DEC_REG(2, 27, 0x1f)
+#define g2_pic_swap G2_DEC_REG(2, 22, 0x1f)
+#define g2_dirmv_swap G2_DEC_REG(2, 20, 0xf)
+#define g2_dirmv_swap_old G2_DEC_REG(2, 17, 0x1f)
+#define g2_tab0_swap_old G2_DEC_REG(2, 12, 0x1f)
+#define g2_tab1_swap_old G2_DEC_REG(2, 7, 0x1f)
+#define g2_tab2_swap_old G2_DEC_REG(2, 2, 0x1f)
+
+#define g2_mode G2_DEC_REG(3, 27, 0x1f)
+#define g2_compress_swap G2_DEC_REG(3, 20, 0xf)
+#define g2_ref_compress_bypass G2_DEC_REG(3, 17, 0x1)
+#define g2_out_rs_e G2_DEC_REG(3, 16, 0x1)
+#define g2_out_dis G2_DEC_REG(3, 15, 0x1)
+#define g2_out_filtering_dis G2_DEC_REG(3, 14, 0x1)
+#define g2_write_mvs_e G2_DEC_REG(3, 12, 0x1)
+#define g2_tab3_swap_old G2_DEC_REG(3, 7, 0x1f)
+#define g2_rscan_swap G2_DEC_REG(3, 2, 0x1f)
+
+#define g2_pic_width_in_cbs G2_DEC_REG(4, 19, 0x1fff)
+#define g2_pic_height_in_cbs G2_DEC_REG(4, 6, 0x1fff)
+#define g2_num_ref_frames G2_DEC_REG(4, 0, 0x1f)
+
+#define g2_start_bit G2_DEC_REG(5, 25, 0x7f)
+#define g2_scaling_list_e G2_DEC_REG(5, 24, 0x1)
+#define g2_cb_qp_offset G2_DEC_REG(5, 19, 0x1f)
+#define g2_cr_qp_offset G2_DEC_REG(5, 14, 0x1f)
+#define g2_sign_data_hide G2_DEC_REG(5, 12, 0x1)
+#define g2_tempor_mvp_e G2_DEC_REG(5, 11, 0x1)
+#define g2_max_cu_qpd_depth G2_DEC_REG(5, 5, 0x3f)
+#define g2_cu_qpd_e G2_DEC_REG(5, 4, 0x1)
+#define g2_pix_shift G2_DEC_REG(5, 0, 0xf)
+
+#define g2_stream_len G2_DEC_REG(6, 0, 0xffffffff)
+
+#define g2_cabac_init_present G2_DEC_REG(7, 31, 0x1)
+#define g2_weight_pred_e G2_DEC_REG(7, 28, 0x1)
+#define g2_weight_bipr_idc G2_DEC_REG(7, 26, 0x3)
+#define g2_filter_over_slices G2_DEC_REG(7, 25, 0x1)
+#define g2_filter_over_tiles G2_DEC_REG(7, 24, 0x1)
+#define g2_asym_pred_e G2_DEC_REG(7, 23, 0x1)
+#define g2_sao_e G2_DEC_REG(7, 22, 0x1)
+#define g2_pcm_filt_d G2_DEC_REG(7, 21, 0x1)
+#define g2_slice_chqp_present G2_DEC_REG(7, 20, 0x1)
+#define g2_dependent_slice G2_DEC_REG(7, 19, 0x1)
+#define g2_filter_override G2_DEC_REG(7, 18, 0x1)
+#define g2_strong_smooth_e G2_DEC_REG(7, 17, 0x1)
+#define g2_filt_offset_beta G2_DEC_REG(7, 12, 0x1f)
+#define g2_filt_offset_tc G2_DEC_REG(7, 7, 0x1f)
+#define g2_slice_hdr_ext_e G2_DEC_REG(7, 6, 0x1)
+#define g2_slice_hdr_ext_bits G2_DEC_REG(7, 3, 0x7)
+
+#define g2_const_intra_e G2_DEC_REG(8, 31, 0x1)
+#define g2_filt_ctrl_pres G2_DEC_REG(8, 30, 0x1)
+#define g2_bit_depth_y G2_DEC_REG(8, 21, 0xf)
+#define g2_bit_depth_c G2_DEC_REG(8, 17, 0xf)
+#define g2_idr_pic_e G2_DEC_REG(8, 16, 0x1)
+#define g2_bit_depth_pcm_y G2_DEC_REG(8, 12, 0xf)
+#define g2_bit_depth_pcm_c G2_DEC_REG(8, 8, 0xf)
+#define g2_bit_depth_y_minus8 G2_DEC_REG(8, 6, 0x3)
+#define g2_bit_depth_c_minus8 G2_DEC_REG(8, 4, 0x3)
+#define g2_rs_out_bit_depth G2_DEC_REG(8, 4, 0xf)
+#define g2_output_8_bits G2_DEC_REG(8, 3, 0x1)
+#define g2_output_format G2_DEC_REG(8, 0, 0x7)
+#define g2_pp_pix_shift G2_DEC_REG(8, 0, 0xf)
+
+#define g2_refidx1_active G2_DEC_REG(9, 19, 0x1f)
+#define g2_refidx0_active G2_DEC_REG(9, 14, 0x1f)
+#define g2_hdr_skip_length G2_DEC_REG(9, 0, 0x3fff)
+
+#define g2_start_code_e G2_DEC_REG(10, 31, 0x1)
+#define g2_init_qp_old G2_DEC_REG(10, 25, 0x3f)
+#define g2_init_qp G2_DEC_REG(10, 24, 0x7f)
+#define g2_num_tile_cols_old G2_DEC_REG(10, 20, 0x1f)
+#define g2_num_tile_cols G2_DEC_REG(10, 19, 0x1f)
+#define g2_num_tile_rows_old G2_DEC_REG(10, 15, 0x1f)
+#define g2_num_tile_rows G2_DEC_REG(10, 14, 0x1f)
+#define g2_tile_e G2_DEC_REG(10, 1, 0x1)
+#define g2_entropy_sync_e G2_DEC_REG(10, 0, 0x1)
+
+#define vp9_transform_mode G2_DEC_REG(11, 27, 0x7)
+#define vp9_filt_sharpness G2_DEC_REG(11, 21, 0x7)
+#define vp9_mcomp_filt_type G2_DEC_REG(11, 8, 0x7)
+#define vp9_high_prec_mv_e G2_DEC_REG(11, 7, 0x1)
+#define vp9_comp_pred_mode G2_DEC_REG(11, 4, 0x3)
+#define vp9_gref_sign_bias G2_DEC_REG(11, 2, 0x1)
+#define vp9_aref_sign_bias G2_DEC_REG(11, 0, 0x1)
+
+#define g2_refer_lterm_e G2_DEC_REG(12, 16, 0xffff)
+#define g2_min_cb_size G2_DEC_REG(12, 13, 0x7)
+#define g2_max_cb_size G2_DEC_REG(12, 10, 0x7)
+#define g2_min_pcm_size G2_DEC_REG(12, 7, 0x7)
+#define g2_max_pcm_size G2_DEC_REG(12, 4, 0x7)
+#define g2_pcm_e G2_DEC_REG(12, 3, 0x1)
+#define g2_transform_skip G2_DEC_REG(12, 2, 0x1)
+#define g2_transq_bypass G2_DEC_REG(12, 1, 0x1)
+#define g2_list_mod_e G2_DEC_REG(12, 0, 0x1)
+
+#define hevc_min_trb_size G2_DEC_REG(13, 13, 0x7)
+#define hevc_max_trb_size G2_DEC_REG(13, 10, 0x7)
+#define hevc_max_intra_hierdepth G2_DEC_REG(13, 7, 0x7)
+#define hevc_max_inter_hierdepth G2_DEC_REG(13, 4, 0x7)
+#define hevc_parallel_merge G2_DEC_REG(13, 0, 0xf)
+
+#define hevc_rlist_f0 G2_DEC_REG(14, 0, 0x1f)
+#define hevc_rlist_f1 G2_DEC_REG(14, 10, 0x1f)
+#define hevc_rlist_f2 G2_DEC_REG(14, 20, 0x1f)
+#define hevc_rlist_b0 G2_DEC_REG(14, 5, 0x1f)
+#define hevc_rlist_b1 G2_DEC_REG(14, 15, 0x1f)
+#define hevc_rlist_b2 G2_DEC_REG(14, 25, 0x1f)
+
+#define hevc_rlist_f3 G2_DEC_REG(15, 0, 0x1f)
+#define hevc_rlist_f4 G2_DEC_REG(15, 10, 0x1f)
+#define hevc_rlist_f5 G2_DEC_REG(15, 20, 0x1f)
+#define hevc_rlist_b3 G2_DEC_REG(15, 5, 0x1f)
+#define hevc_rlist_b4 G2_DEC_REG(15, 15, 0x1f)
+#define hevc_rlist_b5 G2_DEC_REG(15, 25, 0x1f)
+
+#define hevc_rlist_f6 G2_DEC_REG(16, 0, 0x1f)
+#define hevc_rlist_f7 G2_DEC_REG(16, 10, 0x1f)
+#define hevc_rlist_f8 G2_DEC_REG(16, 20, 0x1f)
+#define hevc_rlist_b6 G2_DEC_REG(16, 5, 0x1f)
+#define hevc_rlist_b7 G2_DEC_REG(16, 15, 0x1f)
+#define hevc_rlist_b8 G2_DEC_REG(16, 25, 0x1f)
+
+#define hevc_rlist_f9 G2_DEC_REG(17, 0, 0x1f)
+#define hevc_rlist_f10 G2_DEC_REG(17, 10, 0x1f)
+#define hevc_rlist_f11 G2_DEC_REG(17, 20, 0x1f)
+#define hevc_rlist_b9 G2_DEC_REG(17, 5, 0x1f)
+#define hevc_rlist_b10 G2_DEC_REG(17, 15, 0x1f)
+#define hevc_rlist_b11 G2_DEC_REG(17, 25, 0x1f)
+
+#define hevc_rlist_f12 G2_DEC_REG(18, 0, 0x1f)
+#define hevc_rlist_f13 G2_DEC_REG(18, 10, 0x1f)
+#define hevc_rlist_f14 G2_DEC_REG(18, 20, 0x1f)
+#define hevc_rlist_b12 G2_DEC_REG(18, 5, 0x1f)
+#define hevc_rlist_b13 G2_DEC_REG(18, 15, 0x1f)
+#define hevc_rlist_b14 G2_DEC_REG(18, 25, 0x1f)
+
+#define hevc_rlist_f15 G2_DEC_REG(19, 0, 0x1f)
+#define hevc_rlist_b15 G2_DEC_REG(19, 5, 0x1f)
+
+#define g2_partial_ctb_x G2_DEC_REG(20, 31, 0x1)
+#define g2_partial_ctb_y G2_DEC_REG(20, 30, 0x1)
+#define g2_pic_width_4x4 G2_DEC_REG(20, 16, 0xfff)
+#define g2_pic_height_4x4 G2_DEC_REG(20, 0, 0xfff)
+
+#define vp9_qp_delta_y_dc G2_DEC_REG(13, 23, 0x3f)
+#define vp9_qp_delta_ch_dc G2_DEC_REG(13, 17, 0x3f)
+#define vp9_qp_delta_ch_ac G2_DEC_REG(13, 11, 0x3f)
+#define vp9_last_sign_bias G2_DEC_REG(13, 10, 0x1)
+#define vp9_lossless_e G2_DEC_REG(13, 9, 0x1)
+#define vp9_comp_pred_var_ref1 G2_DEC_REG(13, 7, 0x3)
+#define vp9_comp_pred_var_ref0 G2_DEC_REG(13, 5, 0x3)
+#define vp9_comp_pred_fixed_ref G2_DEC_REG(13, 3, 0x3)
+#define vp9_segment_temp_upd_e G2_DEC_REG(13, 2, 0x1)
+#define vp9_segment_upd_e G2_DEC_REG(13, 1, 0x1)
+#define vp9_segment_e G2_DEC_REG(13, 0, 0x1)
+
+#define vp9_filt_level G2_DEC_REG(14, 18, 0x3f)
+#define vp9_refpic_seg0 G2_DEC_REG(14, 15, 0x7)
+#define vp9_skip_seg0 G2_DEC_REG(14, 14, 0x1)
+#define vp9_filt_level_seg0 G2_DEC_REG(14, 8, 0x3f)
+#define vp9_quant_seg0 G2_DEC_REG(14, 0, 0xff)
+
+#define vp9_refpic_seg1 G2_DEC_REG(15, 15, 0x7)
+#define vp9_skip_seg1 G2_DEC_REG(15, 14, 0x1)
+#define vp9_filt_level_seg1 G2_DEC_REG(15, 8, 0x3f)
+#define vp9_quant_seg1 G2_DEC_REG(15, 0, 0xff)
+
+#define vp9_refpic_seg2 G2_DEC_REG(16, 15, 0x7)
+#define vp9_skip_seg2 G2_DEC_REG(16, 14, 0x1)
+#define vp9_filt_level_seg2 G2_DEC_REG(16, 8, 0x3f)
+#define vp9_quant_seg2 G2_DEC_REG(16, 0, 0xff)
+
+#define vp9_refpic_seg3 G2_DEC_REG(17, 15, 0x7)
+#define vp9_skip_seg3 G2_DEC_REG(17, 14, 0x1)
+#define vp9_filt_level_seg3 G2_DEC_REG(17, 8, 0x3f)
+#define vp9_quant_seg3 G2_DEC_REG(17, 0, 0xff)
+
+#define vp9_refpic_seg4 G2_DEC_REG(18, 15, 0x7)
+#define vp9_skip_seg4 G2_DEC_REG(18, 14, 0x1)
+#define vp9_filt_level_seg4 G2_DEC_REG(18, 8, 0x3f)
+#define vp9_quant_seg4 G2_DEC_REG(18, 0, 0xff)
+
+#define vp9_refpic_seg5 G2_DEC_REG(19, 15, 0x7)
+#define vp9_skip_seg5 G2_DEC_REG(19, 14, 0x1)
+#define vp9_filt_level_seg5 G2_DEC_REG(19, 8, 0x3f)
+#define vp9_quant_seg5 G2_DEC_REG(19, 0, 0xff)
+
+#define hevc_cur_poc_00 G2_DEC_REG(46, 24, 0xff)
+#define hevc_cur_poc_01 G2_DEC_REG(46, 16, 0xff)
+#define hevc_cur_poc_02 G2_DEC_REG(46, 8, 0xff)
+#define hevc_cur_poc_03 G2_DEC_REG(46, 0, 0xff)
+
+#define hevc_cur_poc_04 G2_DEC_REG(47, 24, 0xff)
+#define hevc_cur_poc_05 G2_DEC_REG(47, 16, 0xff)
+#define hevc_cur_poc_06 G2_DEC_REG(47, 8, 0xff)
+#define hevc_cur_poc_07 G2_DEC_REG(47, 0, 0xff)
+
+#define hevc_cur_poc_08 G2_DEC_REG(48, 24, 0xff)
+#define hevc_cur_poc_09 G2_DEC_REG(48, 16, 0xff)
+#define hevc_cur_poc_10 G2_DEC_REG(48, 8, 0xff)
+#define hevc_cur_poc_11 G2_DEC_REG(48, 0, 0xff)
+
+#define hevc_cur_poc_12 G2_DEC_REG(49, 24, 0xff)
+#define hevc_cur_poc_13 G2_DEC_REG(49, 16, 0xff)
+#define hevc_cur_poc_14 G2_DEC_REG(49, 8, 0xff)
+#define hevc_cur_poc_15 G2_DEC_REG(49, 0, 0xff)
+
+#define vp9_refpic_seg6 G2_DEC_REG(31, 15, 0x7)
+#define vp9_skip_seg6 G2_DEC_REG(31, 14, 0x1)
+#define vp9_filt_level_seg6 G2_DEC_REG(31, 8, 0x3f)
+#define vp9_quant_seg6 G2_DEC_REG(31, 0, 0xff)
+
+#define vp9_refpic_seg7 G2_DEC_REG(32, 15, 0x7)
+#define vp9_skip_seg7 G2_DEC_REG(32, 14, 0x1)
+#define vp9_filt_level_seg7 G2_DEC_REG(32, 8, 0x3f)
+#define vp9_quant_seg7 G2_DEC_REG(32, 0, 0xff)
+
+#define vp9_lref_width G2_DEC_REG(33, 16, 0xffff)
+#define vp9_lref_height G2_DEC_REG(33, 0, 0xffff)
+
+#define vp9_gref_width G2_DEC_REG(34, 16, 0xffff)
+#define vp9_gref_height G2_DEC_REG(34, 0, 0xffff)
+
+#define vp9_aref_width G2_DEC_REG(35, 16, 0xffff)
+#define vp9_aref_height G2_DEC_REG(35, 0, 0xffff)
+
+#define vp9_lref_hor_scale G2_DEC_REG(36, 16, 0xffff)
+#define vp9_lref_ver_scale G2_DEC_REG(36, 0, 0xffff)
+
+#define vp9_gref_hor_scale G2_DEC_REG(37, 16, 0xffff)
+#define vp9_gref_ver_scale G2_DEC_REG(37, 0, 0xffff)
+
+#define vp9_aref_hor_scale G2_DEC_REG(38, 16, 0xffff)
+#define vp9_aref_ver_scale G2_DEC_REG(38, 0, 0xffff)
+
+#define vp9_filt_ref_adj_0 G2_DEC_REG(46, 24, 0x7f)
+#define vp9_filt_ref_adj_1 G2_DEC_REG(46, 16, 0x7f)
+#define vp9_filt_ref_adj_2 G2_DEC_REG(46, 8, 0x7f)
+#define vp9_filt_ref_adj_3 G2_DEC_REG(46, 0, 0x7f)
+
+#define vp9_filt_mb_adj_0 G2_DEC_REG(47, 24, 0x7f)
+#define vp9_filt_mb_adj_1 G2_DEC_REG(47, 16, 0x7f)
+#define vp9_filt_mb_adj_2 G2_DEC_REG(47, 8, 0x7f)
+#define vp9_filt_mb_adj_3 G2_DEC_REG(47, 0, 0x7f)
+
+#define g2_apf_threshold G2_DEC_REG(55, 0, 0xffff)
+
+#define g2_clk_gate_e G2_DEC_REG(58, 16, 0x1)
+#define g2_double_buffer_e G2_DEC_REG(58, 15, 0x1)
+#define g2_buswidth G2_DEC_REG(58, 8, 0x7)
+#define g2_max_burst G2_DEC_REG(58, 0, 0xff)
+
+#define g2_down_scale_e G2_DEC_REG(184, 7, 0x1)
+#define g2_down_scale_y G2_DEC_REG(184, 2, 0x3)
+#define g2_down_scale_x G2_DEC_REG(184, 0, 0x3)
+
+#define G2_REG_CONFIG G2_SWREG(58)
+#define G2_REG_CONFIG_DEC_CLK_GATE_E BIT(16)
+#define G2_REG_CONFIG_DEC_CLK_GATE_IDLE_E BIT(17)
+
+#define G2_OUT_LUMA_ADDR (G2_SWREG(65))
+#define G2_REF_LUMA_ADDR(i) (G2_SWREG(67) + ((i) * 0x8))
+#define G2_VP9_SEGMENT_WRITE_ADDR (G2_SWREG(79))
+#define G2_VP9_SEGMENT_READ_ADDR (G2_SWREG(81))
+#define G2_OUT_CHROMA_ADDR (G2_SWREG(99))
+#define G2_REF_CHROMA_ADDR(i) (G2_SWREG(101) + ((i) * 0x8))
+#define G2_OUT_MV_ADDR (G2_SWREG(133))
+#define G2_REF_MV_ADDR(i) (G2_SWREG(135) + ((i) * 0x8))
+#define G2_TILE_SIZES_ADDR (G2_SWREG(167))
+#define G2_STREAM_ADDR (G2_SWREG(169))
+#define G2_HEVC_SCALING_LIST_ADDR (G2_SWREG(171))
+#define G2_VP9_CTX_COUNT_ADDR (G2_SWREG(171))
+#define G2_VP9_PROBS_ADDR (G2_SWREG(173))
+#define G2_RS_OUT_LUMA_ADDR (G2_SWREG(175))
+#define G2_RS_OUT_CHROMA_ADDR (G2_SWREG(177))
+#define G2_TILE_FILTER_ADDR (G2_SWREG(179))
+#define G2_TILE_SAO_ADDR (G2_SWREG(181))
+#define G2_TILE_BSD_ADDR (G2_SWREG(183))
+#define G2_DS_DST (G2_SWREG(186))
+#define G2_DS_DST_CHR (G2_SWREG(188))
+
+#define g2_strm_buffer_len G2_DEC_REG(258, 0, 0xffffffff)
+#define g2_strm_start_offset G2_DEC_REG(259, 0, 0xffffffff)
+
+#endif
diff --git a/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c b/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
new file mode 100644
index 000000000000..6fc4b555517f
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_g2_vp9_dec.c
@@ -0,0 +1,1014 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VP9 codec driver
+ *
+ * Copyright (C) 2021 Collabora Ltd.
+ */
+#include "media/videobuf2-core.h"
+#include "media/videobuf2-dma-contig.h"
+#include "media/videobuf2-v4l2.h"
+#include <linux/kernel.h>
+#include <linux/vmalloc.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-vp9.h>
+
+#include "hantro.h"
+#include "hantro_vp9.h"
+#include "hantro_g2_regs.h"
+
+#define G2_ALIGN 16
+
+enum hantro_ref_frames {
+ INTRA_FRAME = 0,
+ LAST_FRAME = 1,
+ GOLDEN_FRAME = 2,
+ ALTREF_FRAME = 3,
+ MAX_REF_FRAMES = 4
+};
+
+static int start_prepare_run(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame **dec_params)
+{
+ const struct v4l2_ctrl_vp9_compressed_hdr *prob_updates;
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ struct v4l2_ctrl *ctrl;
+ unsigned int fctx_idx;
+
+ /* v4l2-specific stuff */
+ hantro_start_prepare_run(ctx);
+
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_handler, V4L2_CID_STATELESS_VP9_FRAME);
+ if (WARN_ON(!ctrl))
+ return -EINVAL;
+ *dec_params = ctrl->p_cur.p;
+
+ ctrl = v4l2_ctrl_find(&ctx->ctrl_handler, V4L2_CID_STATELESS_VP9_COMPRESSED_HDR);
+ if (WARN_ON(!ctrl))
+ return -EINVAL;
+ prob_updates = ctrl->p_cur.p;
+ vp9_ctx->cur.tx_mode = prob_updates->tx_mode;
+
+ /*
+ * vp9 stuff
+ *
+ * by this point the userspace has done all parts of 6.2 uncompressed_header()
+ * except this fragment:
+ * if ( FrameIsIntra || error_resilient_mode ) {
+ * setup_past_independence ( )
+ * if ( frame_type == KEY_FRAME || error_resilient_mode == 1 ||
+ * reset_frame_context == 3 ) {
+ * for ( i = 0; i < 4; i ++ ) {
+ * save_probs( i )
+ * }
+ * } else if ( reset_frame_context == 2 ) {
+ * save_probs( frame_context_idx )
+ * }
+ * frame_context_idx = 0
+ * }
+ */
+ fctx_idx = v4l2_vp9_reset_frame_ctx(*dec_params, vp9_ctx->frame_context);
+ vp9_ctx->cur.frame_context_idx = fctx_idx;
+
+ /* 6.1 frame(sz): load_probs() and load_probs2() */
+ vp9_ctx->probability_tables = vp9_ctx->frame_context[fctx_idx];
+
+ /*
+ * The userspace has also performed 6.3 compressed_header(), but handling the
+ * probs in a special way. All probs which need updating, except MV-related,
+ * have been read from the bitstream and translated through inv_map_table[],
+ * but no 6.3.6 inv_recenter_nonneg(v, m) has been performed. The values passed
+ * by userspace are either translated values (there are no 0 values in
+ * inv_map_table[]), or zero to indicate no update. All MV-related probs which need
+ * updating have been read from the bitstream and (mv_prob << 1) | 1 has been
+ * performed. The values passed by userspace are either new values
+ * to replace old ones (the above mentioned shift and bitwise or never result in
+ * a zero) or zero to indicate no update.
+ * fw_update_probs() performs actual probs updates or leaves probs as-is
+ * for values for which a zero was passed from userspace.
+ */
+ v4l2_vp9_fw_update_probs(&vp9_ctx->probability_tables, prob_updates, *dec_params);
+
+ return 0;
+}
+
+static size_t chroma_offset(const struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ int bytes_per_pixel = dec_params->bit_depth == 8 ? 1 : 2;
+
+ return ctx->src_fmt.width * ctx->src_fmt.height * bytes_per_pixel;
+}
+
+static size_t mv_offset(const struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ size_t cr_offset = chroma_offset(ctx, dec_params);
+
+ return ALIGN((cr_offset * 3) / 2, G2_ALIGN);
+}
+
+static struct hantro_decoded_buffer *
+get_ref_buf(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *dst, u64 timestamp)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ struct vb2_queue *cap_q = &m2m_ctx->cap_q_ctx.q;
+ struct vb2_buffer *buf;
+
+ /*
+ * If a ref is unused or invalid, address of current destination
+ * buffer is returned.
+ */
+ buf = vb2_find_buffer(cap_q, timestamp);
+ if (!buf)
+ buf = &dst->vb2_buf;
+
+ return vb2_to_hantro_decoded_buf(buf);
+}
+
+static void update_dec_buf_info(struct hantro_decoded_buffer *buf,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ buf->vp9.width = dec_params->frame_width_minus_1 + 1;
+ buf->vp9.height = dec_params->frame_height_minus_1 + 1;
+ buf->vp9.bit_depth = dec_params->bit_depth;
+}
+
+static void update_ctx_cur_info(struct hantro_vp9_dec_hw_ctx *vp9_ctx,
+ struct hantro_decoded_buffer *buf,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ vp9_ctx->cur.valid = true;
+ vp9_ctx->cur.reference_mode = dec_params->reference_mode;
+ vp9_ctx->cur.interpolation_filter = dec_params->interpolation_filter;
+ vp9_ctx->cur.flags = dec_params->flags;
+ vp9_ctx->cur.timestamp = buf->base.vb.vb2_buf.timestamp;
+}
+
+static void config_output(struct hantro_ctx *ctx,
+ struct hantro_decoded_buffer *dst,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ dma_addr_t luma_addr, chroma_addr, mv_addr;
+
+ hantro_reg_write(ctx->dev, &g2_out_dis, 0);
+ if (!ctx->dev->variant->legacy_regs)
+ hantro_reg_write(ctx->dev, &g2_output_format, 0);
+
+ luma_addr = hantro_get_dec_buf_addr(ctx, &dst->base.vb.vb2_buf);
+ hantro_write_addr(ctx->dev, G2_OUT_LUMA_ADDR, luma_addr);
+
+ chroma_addr = luma_addr + chroma_offset(ctx, dec_params);
+ hantro_write_addr(ctx->dev, G2_OUT_CHROMA_ADDR, chroma_addr);
+
+ mv_addr = luma_addr + mv_offset(ctx, dec_params);
+ hantro_write_addr(ctx->dev, G2_OUT_MV_ADDR, mv_addr);
+}
+
+struct hantro_vp9_ref_reg {
+ const struct hantro_reg width;
+ const struct hantro_reg height;
+ const struct hantro_reg hor_scale;
+ const struct hantro_reg ver_scale;
+ u32 y_base;
+ u32 c_base;
+};
+
+static void config_ref(struct hantro_ctx *ctx,
+ struct hantro_decoded_buffer *dst,
+ const struct hantro_vp9_ref_reg *ref_reg,
+ const struct v4l2_ctrl_vp9_frame *dec_params,
+ u64 ref_ts)
+{
+ struct hantro_decoded_buffer *buf;
+ dma_addr_t luma_addr, chroma_addr;
+ u32 refw, refh;
+
+ buf = get_ref_buf(ctx, &dst->base.vb, ref_ts);
+ refw = buf->vp9.width;
+ refh = buf->vp9.height;
+
+ hantro_reg_write(ctx->dev, &ref_reg->width, refw);
+ hantro_reg_write(ctx->dev, &ref_reg->height, refh);
+
+ hantro_reg_write(ctx->dev, &ref_reg->hor_scale, (refw << 14) / dst->vp9.width);
+ hantro_reg_write(ctx->dev, &ref_reg->ver_scale, (refh << 14) / dst->vp9.height);
+
+ luma_addr = hantro_get_dec_buf_addr(ctx, &buf->base.vb.vb2_buf);
+ hantro_write_addr(ctx->dev, ref_reg->y_base, luma_addr);
+
+ chroma_addr = luma_addr + chroma_offset(ctx, dec_params);
+ hantro_write_addr(ctx->dev, ref_reg->c_base, chroma_addr);
+}
+
+static void config_ref_registers(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp9_frame *dec_params,
+ struct hantro_decoded_buffer *dst,
+ struct hantro_decoded_buffer *mv_ref)
+{
+ static const struct hantro_vp9_ref_reg ref_regs[] = {
+ {
+ /* Last */
+ .width = vp9_lref_width,
+ .height = vp9_lref_height,
+ .hor_scale = vp9_lref_hor_scale,
+ .ver_scale = vp9_lref_ver_scale,
+ .y_base = G2_REF_LUMA_ADDR(0),
+ .c_base = G2_REF_CHROMA_ADDR(0),
+ }, {
+ /* Golden */
+ .width = vp9_gref_width,
+ .height = vp9_gref_height,
+ .hor_scale = vp9_gref_hor_scale,
+ .ver_scale = vp9_gref_ver_scale,
+ .y_base = G2_REF_LUMA_ADDR(4),
+ .c_base = G2_REF_CHROMA_ADDR(4),
+ }, {
+ /* Altref */
+ .width = vp9_aref_width,
+ .height = vp9_aref_height,
+ .hor_scale = vp9_aref_hor_scale,
+ .ver_scale = vp9_aref_ver_scale,
+ .y_base = G2_REF_LUMA_ADDR(5),
+ .c_base = G2_REF_CHROMA_ADDR(5),
+ },
+ };
+ dma_addr_t mv_addr;
+
+ config_ref(ctx, dst, &ref_regs[0], dec_params, dec_params->last_frame_ts);
+ config_ref(ctx, dst, &ref_regs[1], dec_params, dec_params->golden_frame_ts);
+ config_ref(ctx, dst, &ref_regs[2], dec_params, dec_params->alt_frame_ts);
+
+ mv_addr = hantro_get_dec_buf_addr(ctx, &mv_ref->base.vb.vb2_buf) +
+ mv_offset(ctx, dec_params);
+ hantro_write_addr(ctx->dev, G2_REF_MV_ADDR(0), mv_addr);
+
+ hantro_reg_write(ctx->dev, &vp9_last_sign_bias,
+ dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_LAST ? 1 : 0);
+
+ hantro_reg_write(ctx->dev, &vp9_gref_sign_bias,
+ dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_GOLDEN ? 1 : 0);
+
+ hantro_reg_write(ctx->dev, &vp9_aref_sign_bias,
+ dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_ALT ? 1 : 0);
+}
+
+static void recompute_tile_info(unsigned short *tile_info, unsigned int tiles, unsigned int sbs)
+{
+ int i;
+ unsigned int accumulated = 0;
+ unsigned int next_accumulated;
+
+ for (i = 1; i <= tiles; ++i) {
+ next_accumulated = i * sbs / tiles;
+ *tile_info++ = next_accumulated - accumulated;
+ accumulated = next_accumulated;
+ }
+}
+
+static void
+recompute_tile_rc_info(struct hantro_ctx *ctx,
+ unsigned int tile_r, unsigned int tile_c,
+ unsigned int sbs_r, unsigned int sbs_c)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+
+ recompute_tile_info(vp9_ctx->tile_r_info, tile_r, sbs_r);
+ recompute_tile_info(vp9_ctx->tile_c_info, tile_c, sbs_c);
+
+ vp9_ctx->last_tile_r = tile_r;
+ vp9_ctx->last_tile_c = tile_c;
+ vp9_ctx->last_sbs_r = sbs_r;
+ vp9_ctx->last_sbs_c = sbs_c;
+}
+
+static inline unsigned int first_tile_row(unsigned int tile_r, unsigned int sbs_r)
+{
+ if (tile_r == sbs_r + 1)
+ return 1;
+
+ if (tile_r == sbs_r + 2)
+ return 2;
+
+ return 0;
+}
+
+static void
+fill_tile_info(struct hantro_ctx *ctx,
+ unsigned int tile_r, unsigned int tile_c,
+ unsigned int sbs_r, unsigned int sbs_c,
+ unsigned short *tile_mem)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ unsigned int i, j;
+ bool first = true;
+
+ for (i = first_tile_row(tile_r, sbs_r); i < tile_r; ++i) {
+ unsigned short r_info = vp9_ctx->tile_r_info[i];
+
+ if (first) {
+ if (i > 0)
+ r_info += vp9_ctx->tile_r_info[0];
+ if (i == 2)
+ r_info += vp9_ctx->tile_r_info[1];
+ first = false;
+ }
+ for (j = 0; j < tile_c; ++j) {
+ *tile_mem++ = vp9_ctx->tile_c_info[j];
+ *tile_mem++ = r_info;
+ }
+ }
+}
+
+static void
+config_tiles(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp9_frame *dec_params,
+ struct hantro_decoded_buffer *dst)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ struct hantro_aux_buf *misc = &vp9_ctx->misc;
+ struct hantro_aux_buf *tile_edge = &vp9_ctx->tile_edge;
+ dma_addr_t addr;
+ unsigned short *tile_mem;
+ unsigned int rows, cols;
+
+ addr = misc->dma + vp9_ctx->tile_info_offset;
+ hantro_write_addr(ctx->dev, G2_TILE_SIZES_ADDR, addr);
+
+ tile_mem = misc->cpu + vp9_ctx->tile_info_offset;
+ if (dec_params->tile_cols_log2 || dec_params->tile_rows_log2) {
+ unsigned int tile_r = (1 << dec_params->tile_rows_log2);
+ unsigned int tile_c = (1 << dec_params->tile_cols_log2);
+ unsigned int sbs_r = hantro_vp9_num_sbs(dst->vp9.height);
+ unsigned int sbs_c = hantro_vp9_num_sbs(dst->vp9.width);
+
+ if (tile_r != vp9_ctx->last_tile_r || tile_c != vp9_ctx->last_tile_c ||
+ sbs_r != vp9_ctx->last_sbs_r || sbs_c != vp9_ctx->last_sbs_c)
+ recompute_tile_rc_info(ctx, tile_r, tile_c, sbs_r, sbs_c);
+
+ fill_tile_info(ctx, tile_r, tile_c, sbs_r, sbs_c, tile_mem);
+
+ cols = tile_c;
+ rows = tile_r;
+ hantro_reg_write(ctx->dev, &g2_tile_e, 1);
+ } else {
+ tile_mem[0] = hantro_vp9_num_sbs(dst->vp9.width);
+ tile_mem[1] = hantro_vp9_num_sbs(dst->vp9.height);
+
+ cols = 1;
+ rows = 1;
+ hantro_reg_write(ctx->dev, &g2_tile_e, 0);
+ }
+
+ if (ctx->dev->variant->legacy_regs) {
+ hantro_reg_write(ctx->dev, &g2_num_tile_cols_old, cols);
+ hantro_reg_write(ctx->dev, &g2_num_tile_rows_old, rows);
+ } else {
+ hantro_reg_write(ctx->dev, &g2_num_tile_cols, cols);
+ hantro_reg_write(ctx->dev, &g2_num_tile_rows, rows);
+ }
+
+ /* provide aux buffers even if no tiles are used */
+ addr = tile_edge->dma;
+ hantro_write_addr(ctx->dev, G2_TILE_FILTER_ADDR, addr);
+
+ addr = tile_edge->dma + vp9_ctx->bsd_ctrl_offset;
+ hantro_write_addr(ctx->dev, G2_TILE_BSD_ADDR, addr);
+}
+
+static void
+update_feat_and_flag(struct hantro_vp9_dec_hw_ctx *vp9_ctx,
+ const struct v4l2_vp9_segmentation *seg,
+ unsigned int feature,
+ unsigned int segid)
+{
+ u8 mask = V4L2_VP9_SEGMENT_FEATURE_ENABLED(feature);
+
+ vp9_ctx->feature_data[segid][feature] = seg->feature_data[segid][feature];
+ vp9_ctx->feature_enabled[segid] &= ~mask;
+ vp9_ctx->feature_enabled[segid] |= (seg->feature_enabled[segid] & mask);
+}
+
+static inline s16 clip3(s16 x, s16 y, s16 z)
+{
+ return (z < x) ? x : (z > y) ? y : z;
+}
+
+static s16 feat_val_clip3(s16 feat_val, s16 feature_data, bool absolute, u8 clip)
+{
+ if (absolute)
+ return feature_data;
+
+ return clip3(0, 255, feat_val + feature_data);
+}
+
+static void config_segment(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ const struct v4l2_vp9_segmentation *seg;
+ s16 feat_val;
+ unsigned char feat_id;
+ unsigned int segid;
+ bool segment_enabled, absolute, update_data;
+
+ static const struct hantro_reg seg_regs[8][V4L2_VP9_SEG_LVL_MAX] = {
+ { vp9_quant_seg0, vp9_filt_level_seg0, vp9_refpic_seg0, vp9_skip_seg0 },
+ { vp9_quant_seg1, vp9_filt_level_seg1, vp9_refpic_seg1, vp9_skip_seg1 },
+ { vp9_quant_seg2, vp9_filt_level_seg2, vp9_refpic_seg2, vp9_skip_seg2 },
+ { vp9_quant_seg3, vp9_filt_level_seg3, vp9_refpic_seg3, vp9_skip_seg3 },
+ { vp9_quant_seg4, vp9_filt_level_seg4, vp9_refpic_seg4, vp9_skip_seg4 },
+ { vp9_quant_seg5, vp9_filt_level_seg5, vp9_refpic_seg5, vp9_skip_seg5 },
+ { vp9_quant_seg6, vp9_filt_level_seg6, vp9_refpic_seg6, vp9_skip_seg6 },
+ { vp9_quant_seg7, vp9_filt_level_seg7, vp9_refpic_seg7, vp9_skip_seg7 },
+ };
+
+ segment_enabled = !!(dec_params->seg.flags & V4L2_VP9_SEGMENTATION_FLAG_ENABLED);
+ hantro_reg_write(ctx->dev, &vp9_segment_e, segment_enabled);
+ hantro_reg_write(ctx->dev, &vp9_segment_upd_e,
+ !!(dec_params->seg.flags & V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP));
+ hantro_reg_write(ctx->dev, &vp9_segment_temp_upd_e,
+ !!(dec_params->seg.flags & V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE));
+
+ seg = &dec_params->seg;
+ absolute = !!(seg->flags & V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE);
+ update_data = !!(seg->flags & V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA);
+
+ for (segid = 0; segid < 8; ++segid) {
+ /* Quantizer segment feature */
+ feat_id = V4L2_VP9_SEG_LVL_ALT_Q;
+ feat_val = dec_params->quant.base_q_idx;
+ if (segment_enabled) {
+ if (update_data)
+ update_feat_and_flag(vp9_ctx, seg, feat_id, segid);
+ if (v4l2_vp9_seg_feat_enabled(vp9_ctx->feature_enabled, feat_id, segid))
+ feat_val = feat_val_clip3(feat_val,
+ vp9_ctx->feature_data[segid][feat_id],
+ absolute, 255);
+ }
+ hantro_reg_write(ctx->dev, &seg_regs[segid][feat_id], feat_val);
+
+ /* Loop filter segment feature */
+ feat_id = V4L2_VP9_SEG_LVL_ALT_L;
+ feat_val = dec_params->lf.level;
+ if (segment_enabled) {
+ if (update_data)
+ update_feat_and_flag(vp9_ctx, seg, feat_id, segid);
+ if (v4l2_vp9_seg_feat_enabled(vp9_ctx->feature_enabled, feat_id, segid))
+ feat_val = feat_val_clip3(feat_val,
+ vp9_ctx->feature_data[segid][feat_id],
+ absolute, 63);
+ }
+ hantro_reg_write(ctx->dev, &seg_regs[segid][feat_id], feat_val);
+
+ /* Reference frame segment feature */
+ feat_id = V4L2_VP9_SEG_LVL_REF_FRAME;
+ feat_val = 0;
+ if (segment_enabled) {
+ if (update_data)
+ update_feat_and_flag(vp9_ctx, seg, feat_id, segid);
+ if (!(dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME) &&
+ v4l2_vp9_seg_feat_enabled(vp9_ctx->feature_enabled, feat_id, segid))
+ feat_val = vp9_ctx->feature_data[segid][feat_id] + 1;
+ }
+ hantro_reg_write(ctx->dev, &seg_regs[segid][feat_id], feat_val);
+
+ /* Skip segment feature */
+ feat_id = V4L2_VP9_SEG_LVL_SKIP;
+ feat_val = 0;
+ if (segment_enabled) {
+ if (update_data)
+ update_feat_and_flag(vp9_ctx, seg, feat_id, segid);
+ feat_val = v4l2_vp9_seg_feat_enabled(vp9_ctx->feature_enabled,
+ feat_id, segid) ? 1 : 0;
+ }
+ hantro_reg_write(ctx->dev, &seg_regs[segid][feat_id], feat_val);
+ }
+}
+
+static void config_loop_filter(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ bool d = dec_params->lf.flags & V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED;
+
+ hantro_reg_write(ctx->dev, &vp9_filt_level, dec_params->lf.level);
+ hantro_reg_write(ctx->dev, &g2_out_filtering_dis, dec_params->lf.level == 0);
+ hantro_reg_write(ctx->dev, &vp9_filt_sharpness, dec_params->lf.sharpness);
+
+ hantro_reg_write(ctx->dev, &vp9_filt_ref_adj_0, d ? dec_params->lf.ref_deltas[0] : 0);
+ hantro_reg_write(ctx->dev, &vp9_filt_ref_adj_1, d ? dec_params->lf.ref_deltas[1] : 0);
+ hantro_reg_write(ctx->dev, &vp9_filt_ref_adj_2, d ? dec_params->lf.ref_deltas[2] : 0);
+ hantro_reg_write(ctx->dev, &vp9_filt_ref_adj_3, d ? dec_params->lf.ref_deltas[3] : 0);
+ hantro_reg_write(ctx->dev, &vp9_filt_mb_adj_0, d ? dec_params->lf.mode_deltas[0] : 0);
+ hantro_reg_write(ctx->dev, &vp9_filt_mb_adj_1, d ? dec_params->lf.mode_deltas[1] : 0);
+}
+
+static void config_picture_dimensions(struct hantro_ctx *ctx, struct hantro_decoded_buffer *dst)
+{
+ u32 pic_w_4x4, pic_h_4x4;
+
+ hantro_reg_write(ctx->dev, &g2_pic_width_in_cbs, (dst->vp9.width + 7) / 8);
+ hantro_reg_write(ctx->dev, &g2_pic_height_in_cbs, (dst->vp9.height + 7) / 8);
+ pic_w_4x4 = roundup(dst->vp9.width, 8) >> 2;
+ pic_h_4x4 = roundup(dst->vp9.height, 8) >> 2;
+ hantro_reg_write(ctx->dev, &g2_pic_width_4x4, pic_w_4x4);
+ hantro_reg_write(ctx->dev, &g2_pic_height_4x4, pic_h_4x4);
+}
+
+static void
+config_bit_depth(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ if (ctx->dev->variant->legacy_regs) {
+ hantro_reg_write(ctx->dev, &g2_bit_depth_y, dec_params->bit_depth);
+ hantro_reg_write(ctx->dev, &g2_bit_depth_c, dec_params->bit_depth);
+ hantro_reg_write(ctx->dev, &g2_pix_shift, 0);
+ } else {
+ hantro_reg_write(ctx->dev, &g2_bit_depth_y_minus8, dec_params->bit_depth - 8);
+ hantro_reg_write(ctx->dev, &g2_bit_depth_c_minus8, dec_params->bit_depth - 8);
+ }
+}
+
+static inline bool is_lossless(const struct v4l2_vp9_quantization *quant)
+{
+ return quant->base_q_idx == 0 && quant->delta_q_uv_ac == 0 &&
+ quant->delta_q_uv_dc == 0 && quant->delta_q_y_dc == 0;
+}
+
+static void
+config_quant(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ hantro_reg_write(ctx->dev, &vp9_qp_delta_y_dc, dec_params->quant.delta_q_y_dc);
+ hantro_reg_write(ctx->dev, &vp9_qp_delta_ch_dc, dec_params->quant.delta_q_uv_dc);
+ hantro_reg_write(ctx->dev, &vp9_qp_delta_ch_ac, dec_params->quant.delta_q_uv_ac);
+ hantro_reg_write(ctx->dev, &vp9_lossless_e, is_lossless(&dec_params->quant));
+}
+
+static u32
+hantro_interp_filter_from_v4l2(unsigned int interpolation_filter)
+{
+ switch (interpolation_filter) {
+ case V4L2_VP9_INTERP_FILTER_EIGHTTAP:
+ return 0x1;
+ case V4L2_VP9_INTERP_FILTER_EIGHTTAP_SMOOTH:
+ return 0;
+ case V4L2_VP9_INTERP_FILTER_EIGHTTAP_SHARP:
+ return 0x2;
+ case V4L2_VP9_INTERP_FILTER_BILINEAR:
+ return 0x3;
+ case V4L2_VP9_INTERP_FILTER_SWITCHABLE:
+ return 0x4;
+ }
+
+ return 0;
+}
+
+static void
+config_others(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params,
+ bool intra_only, bool resolution_change)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+
+ hantro_reg_write(ctx->dev, &g2_idr_pic_e, intra_only);
+
+ hantro_reg_write(ctx->dev, &vp9_transform_mode, vp9_ctx->cur.tx_mode);
+
+ hantro_reg_write(ctx->dev, &vp9_mcomp_filt_type, intra_only ?
+ 0 : hantro_interp_filter_from_v4l2(dec_params->interpolation_filter));
+
+ hantro_reg_write(ctx->dev, &vp9_high_prec_mv_e,
+ !!(dec_params->flags & V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV));
+
+ hantro_reg_write(ctx->dev, &vp9_comp_pred_mode, dec_params->reference_mode);
+
+ hantro_reg_write(ctx->dev, &g2_tempor_mvp_e,
+ !(dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) &&
+ !(dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME) &&
+ !(vp9_ctx->last.flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME) &&
+ !(dec_params->flags & V4L2_VP9_FRAME_FLAG_INTRA_ONLY) &&
+ !resolution_change &&
+ vp9_ctx->last.flags & V4L2_VP9_FRAME_FLAG_SHOW_FRAME
+ );
+
+ hantro_reg_write(ctx->dev, &g2_write_mvs_e,
+ !(dec_params->flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME));
+}
+
+static void
+config_compound_reference(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ u32 comp_fixed_ref, comp_var_ref[2];
+ bool last_ref_frame_sign_bias;
+ bool golden_ref_frame_sign_bias;
+ bool alt_ref_frame_sign_bias;
+ bool comp_ref_allowed = 0;
+
+ comp_fixed_ref = 0;
+ comp_var_ref[0] = 0;
+ comp_var_ref[1] = 0;
+
+ last_ref_frame_sign_bias = dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_LAST;
+ golden_ref_frame_sign_bias = dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_GOLDEN;
+ alt_ref_frame_sign_bias = dec_params->ref_frame_sign_bias & V4L2_VP9_SIGN_BIAS_ALT;
+
+ /* 6.3.12 Frame reference mode syntax */
+ comp_ref_allowed |= golden_ref_frame_sign_bias != last_ref_frame_sign_bias;
+ comp_ref_allowed |= alt_ref_frame_sign_bias != last_ref_frame_sign_bias;
+
+ if (comp_ref_allowed) {
+ if (last_ref_frame_sign_bias ==
+ golden_ref_frame_sign_bias) {
+ comp_fixed_ref = ALTREF_FRAME;
+ comp_var_ref[0] = LAST_FRAME;
+ comp_var_ref[1] = GOLDEN_FRAME;
+ } else if (last_ref_frame_sign_bias ==
+ alt_ref_frame_sign_bias) {
+ comp_fixed_ref = GOLDEN_FRAME;
+ comp_var_ref[0] = LAST_FRAME;
+ comp_var_ref[1] = ALTREF_FRAME;
+ } else {
+ comp_fixed_ref = LAST_FRAME;
+ comp_var_ref[0] = GOLDEN_FRAME;
+ comp_var_ref[1] = ALTREF_FRAME;
+ }
+ }
+
+ hantro_reg_write(ctx->dev, &vp9_comp_pred_fixed_ref, comp_fixed_ref);
+ hantro_reg_write(ctx->dev, &vp9_comp_pred_var_ref0, comp_var_ref[0]);
+ hantro_reg_write(ctx->dev, &vp9_comp_pred_var_ref1, comp_var_ref[1]);
+}
+
+#define INNER_LOOP \
+do { \
+ for (m = 0; m < ARRAY_SIZE(adaptive->coef[0][0][0][0]); ++m) { \
+ memcpy(adaptive->coef[i][j][k][l][m], \
+ probs->coef[i][j][k][l][m], \
+ sizeof(probs->coef[i][j][k][l][m])); \
+ \
+ adaptive->coef[i][j][k][l][m][3] = 0; \
+ } \
+} while (0)
+
+static void config_probs(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ struct hantro_aux_buf *misc = &vp9_ctx->misc;
+ struct hantro_g2_all_probs *all_probs = misc->cpu;
+ struct hantro_g2_probs *adaptive;
+ struct hantro_g2_mv_probs *mv;
+ const struct v4l2_vp9_segmentation *seg = &dec_params->seg;
+ const struct v4l2_vp9_frame_context *probs = &vp9_ctx->probability_tables;
+ int i, j, k, l, m;
+
+ for (i = 0; i < ARRAY_SIZE(all_probs->kf_y_mode_prob); ++i)
+ for (j = 0; j < ARRAY_SIZE(all_probs->kf_y_mode_prob[0]); ++j) {
+ memcpy(all_probs->kf_y_mode_prob[i][j],
+ v4l2_vp9_kf_y_mode_prob[i][j],
+ ARRAY_SIZE(all_probs->kf_y_mode_prob[i][j]));
+
+ all_probs->kf_y_mode_prob_tail[i][j][0] =
+ v4l2_vp9_kf_y_mode_prob[i][j][8];
+ }
+
+ memcpy(all_probs->mb_segment_tree_probs, seg->tree_probs,
+ sizeof(all_probs->mb_segment_tree_probs));
+
+ memcpy(all_probs->segment_pred_probs, seg->pred_probs,
+ sizeof(all_probs->segment_pred_probs));
+
+ for (i = 0; i < ARRAY_SIZE(all_probs->kf_uv_mode_prob); ++i) {
+ memcpy(all_probs->kf_uv_mode_prob[i], v4l2_vp9_kf_uv_mode_prob[i],
+ ARRAY_SIZE(all_probs->kf_uv_mode_prob[i]));
+
+ all_probs->kf_uv_mode_prob_tail[i][0] = v4l2_vp9_kf_uv_mode_prob[i][8];
+ }
+
+ adaptive = &all_probs->probs;
+
+ for (i = 0; i < ARRAY_SIZE(adaptive->inter_mode); ++i) {
+ memcpy(adaptive->inter_mode[i], probs->inter_mode[i],
+ ARRAY_SIZE(probs->inter_mode[i]));
+
+ adaptive->inter_mode[i][3] = 0;
+ }
+
+ memcpy(adaptive->is_inter, probs->is_inter, sizeof(adaptive->is_inter));
+
+ for (i = 0; i < ARRAY_SIZE(adaptive->uv_mode); ++i) {
+ memcpy(adaptive->uv_mode[i], probs->uv_mode[i],
+ sizeof(adaptive->uv_mode[i]));
+ adaptive->uv_mode_tail[i][0] = probs->uv_mode[i][8];
+ }
+
+ memcpy(adaptive->tx8, probs->tx8, sizeof(adaptive->tx8));
+ memcpy(adaptive->tx16, probs->tx16, sizeof(adaptive->tx16));
+ memcpy(adaptive->tx32, probs->tx32, sizeof(adaptive->tx32));
+
+ for (i = 0; i < ARRAY_SIZE(adaptive->y_mode); ++i) {
+ memcpy(adaptive->y_mode[i], probs->y_mode[i],
+ ARRAY_SIZE(adaptive->y_mode[i]));
+
+ adaptive->y_mode_tail[i][0] = probs->y_mode[i][8];
+ }
+
+ for (i = 0; i < ARRAY_SIZE(adaptive->partition[0]); ++i) {
+ memcpy(adaptive->partition[0][i], v4l2_vp9_kf_partition_probs[i],
+ sizeof(v4l2_vp9_kf_partition_probs[i]));
+
+ adaptive->partition[0][i][3] = 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(adaptive->partition[1]); ++i) {
+ memcpy(adaptive->partition[1][i], probs->partition[i],
+ sizeof(probs->partition[i]));
+
+ adaptive->partition[1][i][3] = 0;
+ }
+
+ memcpy(adaptive->interp_filter, probs->interp_filter,
+ sizeof(adaptive->interp_filter));
+
+ memcpy(adaptive->comp_mode, probs->comp_mode, sizeof(adaptive->comp_mode));
+
+ memcpy(adaptive->skip, probs->skip, sizeof(adaptive->skip));
+
+ mv = &adaptive->mv;
+
+ memcpy(mv->joint, probs->mv.joint, sizeof(mv->joint));
+ memcpy(mv->sign, probs->mv.sign, sizeof(mv->sign));
+ memcpy(mv->class0_bit, probs->mv.class0_bit, sizeof(mv->class0_bit));
+ memcpy(mv->fr, probs->mv.fr, sizeof(mv->fr));
+ memcpy(mv->class0_hp, probs->mv.class0_hp, sizeof(mv->class0_hp));
+ memcpy(mv->hp, probs->mv.hp, sizeof(mv->hp));
+ memcpy(mv->classes, probs->mv.classes, sizeof(mv->classes));
+ memcpy(mv->class0_fr, probs->mv.class0_fr, sizeof(mv->class0_fr));
+ memcpy(mv->bits, probs->mv.bits, sizeof(mv->bits));
+
+ memcpy(adaptive->single_ref, probs->single_ref, sizeof(adaptive->single_ref));
+
+ memcpy(adaptive->comp_ref, probs->comp_ref, sizeof(adaptive->comp_ref));
+
+ for (i = 0; i < ARRAY_SIZE(adaptive->coef); ++i)
+ for (j = 0; j < ARRAY_SIZE(adaptive->coef[0]); ++j)
+ for (k = 0; k < ARRAY_SIZE(adaptive->coef[0][0]); ++k)
+ for (l = 0; l < ARRAY_SIZE(adaptive->coef[0][0][0]); ++l)
+ INNER_LOOP;
+
+ hantro_write_addr(ctx->dev, G2_VP9_PROBS_ADDR, misc->dma);
+}
+
+static void config_counts(struct hantro_ctx *ctx)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_dec = &ctx->vp9_dec;
+ struct hantro_aux_buf *misc = &vp9_dec->misc;
+ dma_addr_t addr = misc->dma + vp9_dec->ctx_counters_offset;
+
+ hantro_write_addr(ctx->dev, G2_VP9_CTX_COUNT_ADDR, addr);
+}
+
+static void config_seg_map(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp9_frame *dec_params,
+ bool intra_only, bool update_map)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ struct hantro_aux_buf *segment_map = &vp9_ctx->segment_map;
+ dma_addr_t addr;
+
+ if (intra_only ||
+ (dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT)) {
+ memset(segment_map->cpu, 0, segment_map->size);
+ memset(vp9_ctx->feature_data, 0, sizeof(vp9_ctx->feature_data));
+ memset(vp9_ctx->feature_enabled, 0, sizeof(vp9_ctx->feature_enabled));
+ }
+
+ addr = segment_map->dma + vp9_ctx->active_segment * vp9_ctx->segment_map_size;
+ hantro_write_addr(ctx->dev, G2_VP9_SEGMENT_READ_ADDR, addr);
+
+ addr = segment_map->dma + (1 - vp9_ctx->active_segment) * vp9_ctx->segment_map_size;
+ hantro_write_addr(ctx->dev, G2_VP9_SEGMENT_WRITE_ADDR, addr);
+
+ if (update_map)
+ vp9_ctx->active_segment = 1 - vp9_ctx->active_segment;
+}
+
+static void
+config_source(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params,
+ struct vb2_v4l2_buffer *vb2_src)
+{
+ dma_addr_t stream_base, tmp_addr;
+ unsigned int headres_size;
+ u32 src_len, start_bit, src_buf_len;
+
+ headres_size = dec_params->uncompressed_header_size
+ + dec_params->compressed_header_size;
+
+ stream_base = vb2_dma_contig_plane_dma_addr(&vb2_src->vb2_buf, 0);
+
+ tmp_addr = stream_base + headres_size;
+ if (ctx->dev->variant->legacy_regs)
+ hantro_write_addr(ctx->dev, G2_STREAM_ADDR, (tmp_addr & ~0xf));
+ else
+ hantro_write_addr(ctx->dev, G2_STREAM_ADDR, stream_base);
+
+ start_bit = (tmp_addr & 0xf) * 8;
+ hantro_reg_write(ctx->dev, &g2_start_bit, start_bit);
+
+ src_len = vb2_get_plane_payload(&vb2_src->vb2_buf, 0);
+ src_len += start_bit / 8 - headres_size;
+ hantro_reg_write(ctx->dev, &g2_stream_len, src_len);
+
+ if (!ctx->dev->variant->legacy_regs) {
+ tmp_addr &= ~0xf;
+ hantro_reg_write(ctx->dev, &g2_strm_start_offset, tmp_addr - stream_base);
+ src_buf_len = vb2_plane_size(&vb2_src->vb2_buf, 0);
+ hantro_reg_write(ctx->dev, &g2_strm_buffer_len, src_buf_len);
+ }
+}
+
+static void
+config_registers(struct hantro_ctx *ctx, const struct v4l2_ctrl_vp9_frame *dec_params,
+ struct vb2_v4l2_buffer *vb2_src, struct vb2_v4l2_buffer *vb2_dst)
+{
+ struct hantro_decoded_buffer *dst, *last, *mv_ref;
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ const struct v4l2_vp9_segmentation *seg;
+ bool intra_only, resolution_change;
+
+ /* vp9 stuff */
+ dst = vb2_to_hantro_decoded_buf(&vb2_dst->vb2_buf);
+
+ if (vp9_ctx->last.valid)
+ last = get_ref_buf(ctx, &dst->base.vb, vp9_ctx->last.timestamp);
+ else
+ last = dst;
+
+ update_dec_buf_info(dst, dec_params);
+ update_ctx_cur_info(vp9_ctx, dst, dec_params);
+ seg = &dec_params->seg;
+
+ intra_only = !!(dec_params->flags &
+ (V4L2_VP9_FRAME_FLAG_KEY_FRAME |
+ V4L2_VP9_FRAME_FLAG_INTRA_ONLY));
+
+ if (!intra_only &&
+ !(dec_params->flags & V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT) &&
+ vp9_ctx->last.valid)
+ mv_ref = last;
+ else
+ mv_ref = dst;
+
+ resolution_change = dst->vp9.width != last->vp9.width ||
+ dst->vp9.height != last->vp9.height;
+
+ /* configure basic registers */
+ hantro_reg_write(ctx->dev, &g2_mode, VP9_DEC_MODE);
+ if (!ctx->dev->variant->legacy_regs) {
+ hantro_reg_write(ctx->dev, &g2_strm_swap, 0xf);
+ hantro_reg_write(ctx->dev, &g2_dirmv_swap, 0xf);
+ hantro_reg_write(ctx->dev, &g2_compress_swap, 0xf);
+ hantro_reg_write(ctx->dev, &g2_ref_compress_bypass, 1);
+ } else {
+ hantro_reg_write(ctx->dev, &g2_strm_swap_old, 0x1f);
+ hantro_reg_write(ctx->dev, &g2_pic_swap, 0x10);
+ hantro_reg_write(ctx->dev, &g2_dirmv_swap_old, 0x10);
+ hantro_reg_write(ctx->dev, &g2_tab0_swap_old, 0x10);
+ hantro_reg_write(ctx->dev, &g2_tab1_swap_old, 0x10);
+ hantro_reg_write(ctx->dev, &g2_tab2_swap_old, 0x10);
+ hantro_reg_write(ctx->dev, &g2_tab3_swap_old, 0x10);
+ hantro_reg_write(ctx->dev, &g2_rscan_swap, 0x10);
+ }
+ hantro_reg_write(ctx->dev, &g2_buswidth, BUS_WIDTH_128);
+ hantro_reg_write(ctx->dev, &g2_max_burst, 16);
+ hantro_reg_write(ctx->dev, &g2_apf_threshold, 8);
+ hantro_reg_write(ctx->dev, &g2_clk_gate_e, 1);
+ hantro_reg_write(ctx->dev, &g2_max_cb_size, 6);
+ hantro_reg_write(ctx->dev, &g2_min_cb_size, 3);
+ if (ctx->dev->variant->double_buffer)
+ hantro_reg_write(ctx->dev, &g2_double_buffer_e, 1);
+
+ config_output(ctx, dst, dec_params);
+
+ if (!intra_only)
+ config_ref_registers(ctx, dec_params, dst, mv_ref);
+
+ config_tiles(ctx, dec_params, dst);
+ config_segment(ctx, dec_params);
+ config_loop_filter(ctx, dec_params);
+ config_picture_dimensions(ctx, dst);
+ config_bit_depth(ctx, dec_params);
+ config_quant(ctx, dec_params);
+ config_others(ctx, dec_params, intra_only, resolution_change);
+ config_compound_reference(ctx, dec_params);
+ config_probs(ctx, dec_params);
+ config_counts(ctx);
+ config_seg_map(ctx, dec_params, intra_only,
+ seg->flags & V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP);
+ config_source(ctx, dec_params, vb2_src);
+}
+
+int hantro_g2_vp9_dec_run(struct hantro_ctx *ctx)
+{
+ const struct v4l2_ctrl_vp9_frame *decode_params;
+ struct vb2_v4l2_buffer *src;
+ struct vb2_v4l2_buffer *dst;
+ int ret;
+
+ hantro_g2_check_idle(ctx->dev);
+
+ ret = start_prepare_run(ctx, &decode_params);
+ if (ret) {
+ hantro_end_prepare_run(ctx);
+ return ret;
+ }
+
+ src = hantro_get_src_buf(ctx);
+ dst = hantro_get_dst_buf(ctx);
+
+ config_registers(ctx, decode_params, src, dst);
+
+ hantro_end_prepare_run(ctx);
+
+ vdpu_write(ctx->dev, G2_REG_INTERRUPT_DEC_E, G2_REG_INTERRUPT);
+
+ return 0;
+}
+
+#define copy_tx_and_skip(p1, p2) \
+do { \
+ memcpy((p1)->tx8, (p2)->tx8, sizeof((p1)->tx8)); \
+ memcpy((p1)->tx16, (p2)->tx16, sizeof((p1)->tx16)); \
+ memcpy((p1)->tx32, (p2)->tx32, sizeof((p1)->tx32)); \
+ memcpy((p1)->skip, (p2)->skip, sizeof((p1)->skip)); \
+} while (0)
+
+void hantro_g2_vp9_dec_done(struct hantro_ctx *ctx)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ unsigned int fctx_idx;
+
+ if (!(vp9_ctx->cur.flags & V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX))
+ goto out_update_last;
+
+ fctx_idx = vp9_ctx->cur.frame_context_idx;
+
+ if (!(vp9_ctx->cur.flags & V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE)) {
+ /* error_resilient_mode == 0 && frame_parallel_decoding_mode == 0 */
+ struct v4l2_vp9_frame_context *probs = &vp9_ctx->probability_tables;
+ bool frame_is_intra = vp9_ctx->cur.flags &
+ (V4L2_VP9_FRAME_FLAG_KEY_FRAME | V4L2_VP9_FRAME_FLAG_INTRA_ONLY);
+ struct tx_and_skip {
+ u8 tx8[2][1];
+ u8 tx16[2][2];
+ u8 tx32[2][3];
+ u8 skip[3];
+ } _tx_skip, *tx_skip = &_tx_skip;
+ struct v4l2_vp9_frame_symbol_counts *counts;
+ struct symbol_counts *hantro_cnts;
+ u32 tx16p[2][4];
+ int i;
+
+ /* buffer the forward-updated TX and skip probs */
+ if (frame_is_intra)
+ copy_tx_and_skip(tx_skip, probs);
+
+ /* 6.1.2 refresh_probs(): load_probs() and load_probs2() */
+ *probs = vp9_ctx->frame_context[fctx_idx];
+
+ /* if FrameIsIntra then undo the effect of load_probs2() */
+ if (frame_is_intra)
+ copy_tx_and_skip(probs, tx_skip);
+
+ counts = &vp9_ctx->cnts;
+ hantro_cnts = vp9_ctx->misc.cpu + vp9_ctx->ctx_counters_offset;
+ for (i = 0; i < ARRAY_SIZE(tx16p); ++i) {
+ memcpy(tx16p[i],
+ hantro_cnts->tx16x16_count[i],
+ sizeof(hantro_cnts->tx16x16_count[0]));
+ tx16p[i][3] = 0;
+ }
+ counts->tx16p = &tx16p;
+
+ v4l2_vp9_adapt_coef_probs(probs, counts,
+ !vp9_ctx->last.valid ||
+ vp9_ctx->last.flags & V4L2_VP9_FRAME_FLAG_KEY_FRAME,
+ frame_is_intra);
+
+ if (!frame_is_intra) {
+ /* load_probs2() already done */
+ u32 mv_mode[7][4];
+
+ for (i = 0; i < ARRAY_SIZE(mv_mode); ++i) {
+ mv_mode[i][0] = hantro_cnts->inter_mode_counts[i][1][0];
+ mv_mode[i][1] = hantro_cnts->inter_mode_counts[i][2][0];
+ mv_mode[i][2] = hantro_cnts->inter_mode_counts[i][0][0];
+ mv_mode[i][3] = hantro_cnts->inter_mode_counts[i][2][1];
+ }
+ counts->mv_mode = &mv_mode;
+ v4l2_vp9_adapt_noncoef_probs(&vp9_ctx->probability_tables, counts,
+ vp9_ctx->cur.reference_mode,
+ vp9_ctx->cur.interpolation_filter,
+ vp9_ctx->cur.tx_mode, vp9_ctx->cur.flags);
+ }
+ }
+
+ vp9_ctx->frame_context[fctx_idx] = vp9_ctx->probability_tables;
+
+out_update_last:
+ vp9_ctx->last = vp9_ctx->cur;
+}
diff --git a/drivers/media/platform/verisilicon/hantro_h1_jpeg_enc.c b/drivers/media/platform/verisilicon/hantro_h1_jpeg_enc.c
new file mode 100644
index 000000000000..12d69503d6ba
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_h1_jpeg_enc.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ */
+
+#include <asm/unaligned.h>
+#include <media/v4l2-mem2mem.h>
+#include "hantro_jpeg.h"
+#include "hantro.h"
+#include "hantro_v4l2.h"
+#include "hantro_hw.h"
+#include "hantro_h1_regs.h"
+
+#define H1_JPEG_QUANT_TABLE_COUNT 16
+
+static void hantro_h1_set_src_img_ctrl(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx)
+{
+ u32 overfill_r, overfill_b;
+ u32 reg;
+
+ /*
+ * The format width and height are already macroblock aligned
+ * by .vidioc_s_fmt_vid_cap_mplane() callback. Destination
+ * format width and height can be further modified by
+ * .vidioc_s_selection(), and the width is 4-aligned.
+ */
+ overfill_r = ctx->src_fmt.width - ctx->dst_fmt.width;
+ overfill_b = ctx->src_fmt.height - ctx->dst_fmt.height;
+
+ reg = H1_REG_IN_IMG_CTRL_ROW_LEN(ctx->src_fmt.width)
+ | H1_REG_IN_IMG_CTRL_OVRFLR_D4(overfill_r / 4)
+ | H1_REG_IN_IMG_CTRL_OVRFLB(overfill_b)
+ | H1_REG_IN_IMG_CTRL_FMT(ctx->vpu_src_fmt->enc_fmt);
+ vepu_write_relaxed(vpu, reg, H1_REG_IN_IMG_CTRL);
+}
+
+static void hantro_h1_jpeg_enc_set_buffers(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ struct vb2_buffer *src_buf,
+ struct vb2_buffer *dst_buf)
+{
+ struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
+ dma_addr_t src[3];
+ u32 size_left;
+
+ size_left = vb2_plane_size(dst_buf, 0) - ctx->vpu_dst_fmt->header_size;
+ if (WARN_ON(vb2_plane_size(dst_buf, 0) < ctx->vpu_dst_fmt->header_size))
+ size_left = 0;
+
+ WARN_ON(pix_fmt->num_planes > 3);
+
+ vepu_write_relaxed(vpu, vb2_dma_contig_plane_dma_addr(dst_buf, 0) +
+ ctx->vpu_dst_fmt->header_size,
+ H1_REG_ADDR_OUTPUT_STREAM);
+ vepu_write_relaxed(vpu, size_left, H1_REG_STR_BUF_LIMIT);
+
+ if (pix_fmt->num_planes == 1) {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ /* single plane formats we supported are all interlaced */
+ vepu_write_relaxed(vpu, src[0], H1_REG_ADDR_IN_PLANE_0);
+ } else if (pix_fmt->num_planes == 2) {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
+ vepu_write_relaxed(vpu, src[0], H1_REG_ADDR_IN_PLANE_0);
+ vepu_write_relaxed(vpu, src[1], H1_REG_ADDR_IN_PLANE_1);
+ } else {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
+ src[2] = vb2_dma_contig_plane_dma_addr(src_buf, 2);
+ vepu_write_relaxed(vpu, src[0], H1_REG_ADDR_IN_PLANE_0);
+ vepu_write_relaxed(vpu, src[1], H1_REG_ADDR_IN_PLANE_1);
+ vepu_write_relaxed(vpu, src[2], H1_REG_ADDR_IN_PLANE_2);
+ }
+}
+
+static void
+hantro_h1_jpeg_enc_set_qtable(struct hantro_dev *vpu,
+ unsigned char *luma_qtable,
+ unsigned char *chroma_qtable)
+{
+ u32 reg, i;
+ __be32 *luma_qtable_p;
+ __be32 *chroma_qtable_p;
+
+ luma_qtable_p = (__be32 *)luma_qtable;
+ chroma_qtable_p = (__be32 *)chroma_qtable;
+
+ /*
+ * Quantization table registers must be written in contiguous blocks.
+ * DO NOT collapse the below two "for" loops into one.
+ */
+ for (i = 0; i < H1_JPEG_QUANT_TABLE_COUNT; i++) {
+ reg = get_unaligned_be32(&luma_qtable_p[i]);
+ vepu_write_relaxed(vpu, reg, H1_REG_JPEG_LUMA_QUAT(i));
+ }
+
+ for (i = 0; i < H1_JPEG_QUANT_TABLE_COUNT; i++) {
+ reg = get_unaligned_be32(&chroma_qtable_p[i]);
+ vepu_write_relaxed(vpu, reg, H1_REG_JPEG_CHROMA_QUAT(i));
+ }
+}
+
+int hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct hantro_jpeg_ctx jpeg_ctx;
+ u32 reg;
+
+ src_buf = hantro_get_src_buf(ctx);
+ dst_buf = hantro_get_dst_buf(ctx);
+
+ hantro_start_prepare_run(ctx);
+
+ memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
+ jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
+ jpeg_ctx.width = ctx->dst_fmt.width;
+ jpeg_ctx.height = ctx->dst_fmt.height;
+ jpeg_ctx.quality = ctx->jpeg_quality;
+ hantro_jpeg_header_assemble(&jpeg_ctx);
+
+ /* Switch to JPEG encoder mode before writing registers */
+ vepu_write_relaxed(vpu, H1_REG_ENC_CTRL_ENC_MODE_JPEG,
+ H1_REG_ENC_CTRL);
+
+ hantro_h1_set_src_img_ctrl(vpu, ctx);
+ hantro_h1_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf,
+ &dst_buf->vb2_buf);
+ hantro_h1_jpeg_enc_set_qtable(vpu, jpeg_ctx.hw_luma_qtable,
+ jpeg_ctx.hw_chroma_qtable);
+
+ reg = H1_REG_AXI_CTRL_OUTPUT_SWAP16
+ | H1_REG_AXI_CTRL_INPUT_SWAP16
+ | H1_REG_AXI_CTRL_BURST_LEN(16)
+ | H1_REG_AXI_CTRL_OUTPUT_SWAP32
+ | H1_REG_AXI_CTRL_INPUT_SWAP32
+ | H1_REG_AXI_CTRL_OUTPUT_SWAP8
+ | H1_REG_AXI_CTRL_INPUT_SWAP8;
+ /* Make sure that all registers are written at this point. */
+ vepu_write(vpu, reg, H1_REG_AXI_CTRL);
+
+ reg = H1_REG_ENC_CTRL_WIDTH(MB_WIDTH(ctx->src_fmt.width))
+ | H1_REG_ENC_CTRL_HEIGHT(MB_HEIGHT(ctx->src_fmt.height))
+ | H1_REG_ENC_CTRL_ENC_MODE_JPEG
+ | H1_REG_ENC_PIC_INTRA
+ | H1_REG_ENC_CTRL_EN_BIT;
+
+ hantro_end_prepare_run(ctx);
+
+ vepu_write(vpu, reg, H1_REG_ENC_CTRL);
+
+ return 0;
+}
+
+void hantro_h1_jpeg_enc_done(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ u32 bytesused = vepu_read(vpu, H1_REG_STR_BUF_LIMIT) / 8;
+ struct vb2_v4l2_buffer *dst_buf = hantro_get_dst_buf(ctx);
+
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ ctx->vpu_dst_fmt->header_size + bytesused);
+}
diff --git a/drivers/media/platform/verisilicon/hantro_h1_regs.h b/drivers/media/platform/verisilicon/hantro_h1_regs.h
new file mode 100644
index 000000000000..30e7e7b920b5
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_h1_regs.h
@@ -0,0 +1,154 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#ifndef HANTRO_H1_REGS_H_
+#define HANTRO_H1_REGS_H_
+
+/* Encoder registers. */
+#define H1_REG_INTERRUPT 0x004
+#define H1_REG_INTERRUPT_FRAME_RDY BIT(2)
+#define H1_REG_INTERRUPT_DIS_BIT BIT(1)
+#define H1_REG_INTERRUPT_BIT BIT(0)
+#define H1_REG_AXI_CTRL 0x008
+#define H1_REG_AXI_CTRL_OUTPUT_SWAP16 BIT(15)
+#define H1_REG_AXI_CTRL_INPUT_SWAP16 BIT(14)
+#define H1_REG_AXI_CTRL_BURST_LEN(x) ((x) << 8)
+#define H1_REG_AXI_CTRL_GATE_BIT BIT(4)
+#define H1_REG_AXI_CTRL_OUTPUT_SWAP32 BIT(3)
+#define H1_REG_AXI_CTRL_INPUT_SWAP32 BIT(2)
+#define H1_REG_AXI_CTRL_OUTPUT_SWAP8 BIT(1)
+#define H1_REG_AXI_CTRL_INPUT_SWAP8 BIT(0)
+#define H1_REG_ADDR_OUTPUT_STREAM 0x014
+#define H1_REG_ADDR_OUTPUT_CTRL 0x018
+#define H1_REG_ADDR_REF_LUMA 0x01c
+#define H1_REG_ADDR_REF_CHROMA 0x020
+#define H1_REG_ADDR_REC_LUMA 0x024
+#define H1_REG_ADDR_REC_CHROMA 0x028
+#define H1_REG_ADDR_IN_PLANE_0 0x02c
+#define H1_REG_ADDR_IN_PLANE_1 0x030
+#define H1_REG_ADDR_IN_PLANE_2 0x034
+#define H1_REG_ENC_CTRL 0x038
+#define H1_REG_ENC_CTRL_TIMEOUT_EN BIT(31)
+#define H1_REG_ENC_CTRL_NAL_MODE_BIT BIT(29)
+#define H1_REG_ENC_CTRL_WIDTH(w) ((w) << 19)
+#define H1_REG_ENC_CTRL_HEIGHT(h) ((h) << 10)
+#define H1_REG_ENC_PIC_INTER (0x0 << 3)
+#define H1_REG_ENC_PIC_INTRA (0x1 << 3)
+#define H1_REG_ENC_PIC_MVCINTER (0x2 << 3)
+#define H1_REG_ENC_CTRL_ENC_MODE_H264 (0x3 << 1)
+#define H1_REG_ENC_CTRL_ENC_MODE_JPEG (0x2 << 1)
+#define H1_REG_ENC_CTRL_ENC_MODE_VP8 (0x1 << 1)
+#define H1_REG_ENC_CTRL_EN_BIT BIT(0)
+#define H1_REG_IN_IMG_CTRL 0x03c
+#define H1_REG_IN_IMG_CTRL_ROW_LEN(x) ((x) << 12)
+#define H1_REG_IN_IMG_CTRL_OVRFLR_D4(x) ((x) << 10)
+#define H1_REG_IN_IMG_CTRL_OVRFLB(x) ((x) << 6)
+#define H1_REG_IN_IMG_CTRL_FMT(x) ((x) << 2)
+#define H1_REG_ENC_CTRL0 0x040
+#define H1_REG_ENC_CTRL0_INIT_QP(x) ((x) << 26)
+#define H1_REG_ENC_CTRL0_SLICE_ALPHA(x) ((x) << 22)
+#define H1_REG_ENC_CTRL0_SLICE_BETA(x) ((x) << 18)
+#define H1_REG_ENC_CTRL0_CHROMA_QP_OFFSET(x) ((x) << 13)
+#define H1_REG_ENC_CTRL0_FILTER_DIS(x) ((x) << 5)
+#define H1_REG_ENC_CTRL0_IDR_PICID(x) ((x) << 1)
+#define H1_REG_ENC_CTRL0_CONSTR_INTRA_PRED BIT(0)
+#define H1_REG_ENC_CTRL1 0x044
+#define H1_REG_ENC_CTRL1_PPS_ID(x) ((x) << 24)
+#define H1_REG_ENC_CTRL1_INTRA_PRED_MODE(x) ((x) << 16)
+#define H1_REG_ENC_CTRL1_FRAME_NUM(x) ((x))
+#define H1_REG_ENC_CTRL2 0x048
+#define H1_REG_ENC_CTRL2_DEBLOCKING_FILETER_MODE(x) ((x) << 30)
+#define H1_REG_ENC_CTRL2_H264_SLICE_SIZE(x) ((x) << 23)
+#define H1_REG_ENC_CTRL2_DISABLE_QUARTER_PIXMV BIT(22)
+#define H1_REG_ENC_CTRL2_TRANS8X8_MODE_EN BIT(21)
+#define H1_REG_ENC_CTRL2_CABAC_INIT_IDC(x) ((x) << 19)
+#define H1_REG_ENC_CTRL2_ENTROPY_CODING_MODE BIT(18)
+#define H1_REG_ENC_CTRL2_H264_INTER4X4_MODE BIT(17)
+#define H1_REG_ENC_CTRL2_H264_STREAM_MODE BIT(16)
+#define H1_REG_ENC_CTRL2_INTRA16X16_MODE(x) ((x))
+#define H1_REG_ENC_CTRL3 0x04c
+#define H1_REG_ENC_CTRL3_MUTIMV_EN BIT(30)
+#define H1_REG_ENC_CTRL3_MV_PENALTY_1_4P(x) ((x) << 20)
+#define H1_REG_ENC_CTRL3_MV_PENALTY_4P(x) ((x) << 10)
+#define H1_REG_ENC_CTRL3_MV_PENALTY_1P(x) ((x))
+#define H1_REG_ENC_CTRL4 0x050
+#define H1_REG_ENC_CTRL4_MV_PENALTY_16X8_8X16(x) ((x) << 20)
+#define H1_REG_ENC_CTRL4_MV_PENALTY_8X8(x) ((x) << 10)
+#define H1_REG_ENC_CTRL4_8X4_4X8(x) ((x))
+#define H1_REG_ENC_CTRL5 0x054
+#define H1_REG_ENC_CTRL5_MACROBLOCK_PENALTY(x) ((x) << 24)
+#define H1_REG_ENC_CTRL5_COMPLETE_SLICES(x) ((x) << 16)
+#define H1_REG_ENC_CTRL5_INTER_MODE(x) ((x))
+#define H1_REG_STR_HDR_REM_MSB 0x058
+#define H1_REG_STR_HDR_REM_LSB 0x05c
+#define H1_REG_STR_BUF_LIMIT 0x060
+#define H1_REG_MAD_CTRL 0x064
+#define H1_REG_MAD_CTRL_QP_ADJUST(x) ((x) << 28)
+#define H1_REG_MAD_CTRL_MAD_THREDHOLD(x) ((x) << 22)
+#define H1_REG_MAD_CTRL_QP_SUM_DIV2(x) ((x))
+#define H1_REG_ADDR_VP8_PROB_CNT 0x068
+#define H1_REG_QP_VAL 0x06c
+#define H1_REG_QP_VAL_LUM(x) ((x) << 26)
+#define H1_REG_QP_VAL_MAX(x) ((x) << 20)
+#define H1_REG_QP_VAL_MIN(x) ((x) << 14)
+#define H1_REG_QP_VAL_CHECKPOINT_DISTAN(x) ((x))
+#define H1_REG_VP8_QP_VAL(i) (0x06c + ((i) * 0x4))
+#define H1_REG_CHECKPOINT(i) (0x070 + ((i) * 0x4))
+#define H1_REG_CHECKPOINT_CHECK0(x) (((x) & 0xffff))
+#define H1_REG_CHECKPOINT_CHECK1(x) (((x) & 0xffff) << 16)
+#define H1_REG_CHECKPOINT_RESULT(x) ((((x) >> (16 - 16 \
+ * (i & 1))) & 0xffff) \
+ * 32)
+#define H1_REG_CHKPT_WORD_ERR(i) (0x084 + ((i) * 0x4))
+#define H1_REG_CHKPT_WORD_ERR_CHK0(x) (((x) & 0xffff))
+#define H1_REG_CHKPT_WORD_ERR_CHK1(x) (((x) & 0xffff) << 16)
+#define H1_REG_VP8_BOOL_ENC 0x08c
+#define H1_REG_CHKPT_DELTA_QP 0x090
+#define H1_REG_CHKPT_DELTA_QP_CHK0(x) (((x) & 0x0f) << 0)
+#define H1_REG_CHKPT_DELTA_QP_CHK1(x) (((x) & 0x0f) << 4)
+#define H1_REG_CHKPT_DELTA_QP_CHK2(x) (((x) & 0x0f) << 8)
+#define H1_REG_CHKPT_DELTA_QP_CHK3(x) (((x) & 0x0f) << 12)
+#define H1_REG_CHKPT_DELTA_QP_CHK4(x) (((x) & 0x0f) << 16)
+#define H1_REG_CHKPT_DELTA_QP_CHK5(x) (((x) & 0x0f) << 20)
+#define H1_REG_CHKPT_DELTA_QP_CHK6(x) (((x) & 0x0f) << 24)
+#define H1_REG_VP8_CTRL0 0x090
+#define H1_REG_RLC_CTRL 0x094
+#define H1_REG_RLC_CTRL_STR_OFFS_SHIFT 23
+#define H1_REG_RLC_CTRL_STR_OFFS_MASK (0x3f << 23)
+#define H1_REG_RLC_CTRL_RLC_SUM(x) ((x))
+#define H1_REG_MB_CTRL 0x098
+#define H1_REG_MB_CNT_OUT(x) (((x) & 0xffff))
+#define H1_REG_MB_CNT_SET(x) (((x) & 0xffff) << 16)
+#define H1_REG_ADDR_NEXT_PIC 0x09c
+#define H1_REG_JPEG_LUMA_QUAT(i) (0x100 + ((i) * 0x4))
+#define H1_REG_JPEG_CHROMA_QUAT(i) (0x140 + ((i) * 0x4))
+#define H1_REG_STABILIZATION_OUTPUT 0x0A0
+#define H1_REG_ADDR_CABAC_TBL 0x0cc
+#define H1_REG_ADDR_MV_OUT 0x0d0
+#define H1_REG_RGB_YUV_COEFF(i) (0x0d4 + ((i) * 0x4))
+#define H1_REG_RGB_MASK_MSB 0x0dc
+#define H1_REG_INTRA_AREA_CTRL 0x0e0
+#define H1_REG_CIR_INTRA_CTRL 0x0e4
+#define H1_REG_INTRA_SLICE_BITMAP(i) (0x0e8 + ((i) * 0x4))
+#define H1_REG_ADDR_VP8_DCT_PART(i) (0x0e8 + ((i) * 0x4))
+#define H1_REG_FIRST_ROI_AREA 0x0f0
+#define H1_REG_SECOND_ROI_AREA 0x0f4
+#define H1_REG_MVC_CTRL 0x0f8
+#define H1_REG_MVC_CTRL_MV16X16_FAVOR(x) ((x) << 28)
+#define H1_REG_VP8_INTRA_PENALTY(i) (0x100 + ((i) * 0x4))
+#define H1_REG_ADDR_VP8_SEG_MAP 0x11c
+#define H1_REG_VP8_SEG_QP(i) (0x120 + ((i) * 0x4))
+#define H1_REG_DMV_4P_1P_PENALTY(i) (0x180 + ((i) * 0x4))
+#define H1_REG_DMV_4P_1P_PENALTY_BIT(x, i) ((x) << (i) * 8)
+#define H1_REG_DMV_QPEL_PENALTY(i) (0x200 + ((i) * 0x4))
+#define H1_REG_DMV_QPEL_PENALTY_BIT(x, i) ((x) << (i) * 8)
+#define H1_REG_VP8_CTRL1 0x280
+#define H1_REG_VP8_BIT_COST_GOLDEN 0x284
+#define H1_REG_VP8_LOOP_FLT_DELTA(i) (0x288 + ((i) * 0x4))
+
+#endif /* HANTRO_H1_REGS_H_ */
diff --git a/drivers/media/platform/verisilicon/hantro_h264.c b/drivers/media/platform/verisilicon/hantro_h264.c
new file mode 100644
index 000000000000..4e9a0ecf5c13
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_h264.c
@@ -0,0 +1,521 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip RK3288 VPU codec driver
+ *
+ * Copyright (c) 2014 Rockchip Electronics Co., Ltd.
+ * Hertz Wong <hertz.wong@rock-chips.com>
+ * Herman Chen <herman.chen@rock-chips.com>
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#include <linux/types.h>
+#include <media/v4l2-h264.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "hantro.h"
+#include "hantro_hw.h"
+
+/* Size with u32 units. */
+#define CABAC_INIT_BUFFER_SIZE (460 * 2)
+#define POC_BUFFER_SIZE 34
+#define SCALING_LIST_SIZE (6 * 16 + 2 * 64)
+
+/*
+ * For valid and long term reference marking, index are reversed, so bit 31
+ * indicates the status of the picture 0.
+ */
+#define REF_BIT(i) BIT(32 - 1 - (i))
+
+/* Data structure describing auxiliary buffer format. */
+struct hantro_h264_dec_priv_tbl {
+ u32 cabac_table[CABAC_INIT_BUFFER_SIZE];
+ u32 poc[POC_BUFFER_SIZE];
+ u8 scaling_list[SCALING_LIST_SIZE];
+};
+
+/*
+ * Constant CABAC table.
+ * From drivers/media/platform/rk3288-vpu/rk3288_vpu_hw_h264d.c
+ * in https://chromium.googlesource.com/chromiumos/third_party/kernel,
+ * chromeos-3.14 branch.
+ */
+static const u32 h264_cabac_table[] = {
+ 0x14f10236, 0x034a14f1, 0x0236034a, 0xe47fe968, 0xfa35ff36, 0x07330000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x0029003f, 0x003f003f, 0xf7530456, 0x0061f948, 0x0d29033e, 0x000b0137,
+ 0x0045ef7f, 0xf3660052, 0xf94aeb6b, 0xe57fe17f, 0xe87fee5f, 0xe57feb72,
+ 0xe27fef7b, 0xf473f07a, 0xf573f43f, 0xfe44f154, 0xf368fd46, 0xf85df65a,
+ 0xe27fff4a, 0xfa61f95b, 0xec7ffc38, 0xfb52f94c, 0xea7df95d, 0xf557fd4d,
+ 0xfb47fc3f, 0xfc44f454, 0xf93ef941, 0x083d0538, 0xfe420140, 0x003dfe4e,
+ 0x01320734, 0x0a23002c, 0x0b26012d, 0x002e052c, 0x1f110133, 0x07321c13,
+ 0x10210e3e, 0xf36cf164, 0xf365f35b, 0xf45ef658, 0xf054f656, 0xf953f357,
+ 0xed5e0146, 0x0048fb4a, 0x123bf866, 0xf164005f, 0xfc4b0248, 0xf54bfd47,
+ 0x0f2ef345, 0x003e0041, 0x1525f148, 0x09391036, 0x003e0c48, 0x18000f09,
+ 0x08190d12, 0x0f090d13, 0x0a250c12, 0x061d1421, 0x0f1e042d, 0x013a003e,
+ 0x073d0c26, 0x0b2d0f27, 0x0b2a0d2c, 0x102d0c29, 0x0a311e22, 0x122a0a37,
+ 0x1133112e, 0x00591aed, 0x16ef1aef, 0x1ee71cec, 0x21e925e5, 0x21e928e4,
+ 0x26ef21f5, 0x28f129fa, 0x26012911, 0x1efa1b03, 0x1a1625f0, 0x23fc26f8,
+ 0x26fd2503, 0x26052a00, 0x23102716, 0x0e301b25, 0x153c0c44, 0x0261fd47,
+ 0xfa2afb32, 0xfd36fe3e, 0x003a013f, 0xfe48ff4a, 0xf75bfb43, 0xfb1bfd27,
+ 0xfe2c002e, 0xf040f844, 0xf64efa4d, 0xf656f45c, 0xf137f63c, 0xfa3efc41,
+ 0xf449f84c, 0xf950f758, 0xef6ef561, 0xec54f54f, 0xfa49fc4a, 0xf356f360,
+ 0xf561ed75, 0xf84efb21, 0xfc30fe35, 0xfd3ef347, 0xf64ff456, 0xf35af261,
+ 0x0000fa5d, 0xfa54f84f, 0x0042ff47, 0x003efe3c, 0xfe3bfb4b, 0xfd3efc3a,
+ 0xf742ff4f, 0x00470344, 0x0a2cf93e, 0x0f240e28, 0x101b0c1d, 0x012c1424,
+ 0x1220052a, 0x01300a3e, 0x112e0940, 0xf468f561, 0xf060f958, 0xf855f955,
+ 0xf755f358, 0x0442fd4d, 0xfd4cfa4c, 0x0a3aff4c, 0xff53f963, 0xf25f025f,
+ 0x004cfb4a, 0x0046f54b, 0x01440041, 0xf249033e, 0x043eff44, 0xf34b0b37,
+ 0x05400c46, 0x0f060613, 0x07100c0e, 0x120d0d0b, 0x0d0f0f10, 0x0c170d17,
+ 0x0f140e1a, 0x0e2c1128, 0x112f1811, 0x15151916, 0x1f1b161d, 0x13230e32,
+ 0x0a39073f, 0xfe4dfc52, 0xfd5e0945, 0xf46d24dd, 0x24de20e6, 0x25e22ce0,
+ 0x22ee22f1, 0x28f121f9, 0x23fb2100, 0x2602210d, 0x17230d3a, 0x1dfd1a00,
+ 0x161e1ff9, 0x23f122fd, 0x220324ff, 0x2205200b, 0x2305220c, 0x270b1e1d,
+ 0x221a1d27, 0x13421f15, 0x1f1f1932, 0xef78ec70, 0xee72f555, 0xf15cf259,
+ 0xe647f151, 0xf2500044, 0xf246e838, 0xe944e832, 0xf54a17f3, 0x1af328f1,
+ 0x31f22c03, 0x2d062c22, 0x21361352, 0xfd4bff17, 0x0122012b, 0x0036fe37,
+ 0x003d0140, 0x0044f75c, 0xf26af361, 0xf15af45a, 0xee58f649, 0xf74ff256,
+ 0xf649f646, 0xf645fb42, 0xf740fb3a, 0x023b15f6, 0x18f51cf8, 0x1cff1d03,
+ 0x1d092314, 0x1d240e43, 0x14f10236, 0x034a14f1, 0x0236034a, 0xe47fe968,
+ 0xfa35ff36, 0x07331721, 0x17021500, 0x01090031, 0xdb760539, 0xf34ef541,
+ 0x013e0c31, 0xfc491132, 0x1240092b, 0x1d001a43, 0x105a0968, 0xd27fec68,
+ 0x0143f34e, 0xf541013e, 0xfa56ef5f, 0xfa3d092d, 0xfd45fa51, 0xf5600637,
+ 0x0743fb56, 0x0258003a, 0xfd4cf65e, 0x05360445, 0xfd510058, 0xf943fb4a,
+ 0xfc4afb50, 0xf948013a, 0x0029003f, 0x003f003f, 0xf7530456, 0x0061f948,
+ 0x0d29033e, 0x002dfc4e, 0xfd60e57e, 0xe462e765, 0xe943e452, 0xec5ef053,
+ 0xea6eeb5b, 0xee66f35d, 0xe37ff95c, 0xfb59f960, 0xf36cfd2e, 0xff41ff39,
+ 0xf75dfd4a, 0xf75cf857, 0xe97e0536, 0x063c063b, 0x0645ff30, 0x0044fc45,
+ 0xf858fe55, 0xfa4eff4b, 0xf94d0236, 0x0532fd44, 0x0132062a, 0xfc51013f,
+ 0xfc460043, 0x0239fe4c, 0x0b230440, 0x013d0b23, 0x12190c18, 0x0d1d0d24,
+ 0xf65df949, 0xfe490d2e, 0x0931f964, 0x09350235, 0x0535fe3d, 0x00380038,
+ 0xf33ffb3c, 0xff3e0439, 0xfa450439, 0x0e270433, 0x0d440340, 0x013d093f,
+ 0x07321027, 0x052c0434, 0x0b30fb3c, 0xff3b003b, 0x1621052c, 0x0e2bff4e,
+ 0x003c0945, 0x0b1c0228, 0x032c0031, 0x002e022c, 0x0233002f, 0x0427023e,
+ 0x062e0036, 0x0336023a, 0x043f0633, 0x06390735, 0x06340637, 0x0b2d0e24,
+ 0x0835ff52, 0x0737fd4e, 0x0f2e161f, 0xff541907, 0x1ef91c03, 0x1c042000,
+ 0x22ff1e06, 0x1e062009, 0x1f131a1b, 0x1a1e2514, 0x1c221146, 0x0143053b,
+ 0x0943101e, 0x12201223, 0x161d181f, 0x1726122b, 0x14290b3f, 0x093b0940,
+ 0xff5efe59, 0xf76cfa4c, 0xfe2c002d, 0x0034fd40, 0xfe3bfc46, 0xfc4bf852,
+ 0xef66f74d, 0x0318002a, 0x00300037, 0xfa3bf947, 0xf453f557, 0xe277013a,
+ 0xfd1dff24, 0x0126022b, 0xfa37003a, 0x0040fd4a, 0xf65a0046, 0xfc1d051f,
+ 0x072a013b, 0xfe3afd48, 0xfd51f561, 0x003a0805, 0x0a0e0e12, 0x0d1b0228,
+ 0x003afd46, 0xfa4ff855, 0x0000f36a, 0xf06af657, 0xeb72ee6e, 0xf262ea6e,
+ 0xeb6aee67, 0xeb6be96c, 0xe670f660, 0xf45ffb5b, 0xf75dea5e, 0xfb560943,
+ 0xfc50f655, 0xff46073c, 0x093a053d, 0x0c320f32, 0x12311136, 0x0a29072e,
+ 0xff330731, 0x08340929, 0x062f0237, 0x0d290a2c, 0x06320535, 0x0d31043f,
+ 0x0640fe45, 0xfe3b0646, 0x0a2c091f, 0x0c2b0335, 0x0e220a26, 0xfd340d28,
+ 0x1120072c, 0x07260d32, 0x0a391a2b, 0x0e0b0b0e, 0x090b120b, 0x150917fe,
+ 0x20f120f1, 0x22eb27e9, 0x2adf29e1, 0x2ee426f4, 0x151d2de8, 0x35d330e6,
+ 0x41d52bed, 0x27f61e09, 0x121a141b, 0x0039f252, 0xfb4bed61, 0xdd7d1b00,
+ 0x1c001ffc, 0x1b062208, 0x1e0a1816, 0x21131620, 0x1a1f1529, 0x1a2c172f,
+ 0x10410e47, 0x083c063f, 0x11411518, 0x17141a17, 0x1b201c17, 0x1c181728,
+ 0x18201c1d, 0x172a1339, 0x1635163d, 0x0b560c28, 0x0b330e3b, 0xfc4ff947,
+ 0xfb45f746, 0xf842f644, 0xed49f445, 0xf046f143, 0xec3eed46, 0xf042ea41,
+ 0xec3f09fe, 0x1af721f7, 0x27f929fe, 0x2d033109, 0x2d1b243b, 0xfa42f923,
+ 0xf92af82d, 0xfb30f438, 0xfa3cfb3e, 0xf842f84c, 0xfb55fa51, 0xf64df951,
+ 0xef50ee49, 0xfc4af653, 0xf747f743, 0xff3df842, 0xf242003b, 0x023b15f3,
+ 0x21f227f9, 0x2efe3302, 0x3c063d11, 0x37222a3e, 0x14f10236, 0x034a14f1,
+ 0x0236034a, 0xe47fe968, 0xfa35ff36, 0x07331619, 0x22001000, 0xfe090429,
+ 0xe3760241, 0xfa47f34f, 0x05340932, 0xfd460a36, 0x1a221316, 0x28003902,
+ 0x29241a45, 0xd37ff165, 0xfc4cfa47, 0xf34f0534, 0x0645f35a, 0x0034082b,
+ 0xfe45fb52, 0xf660023b, 0x024bfd57, 0xfd640138, 0xfd4afa55, 0x003bfd51,
+ 0xf956fb5f, 0xff42ff4d, 0x0146fe56, 0xfb48003d, 0x0029003f, 0x003f003f,
+ 0xf7530456, 0x0061f948, 0x0d29033e, 0x0d0f0733, 0x0250d97f, 0xee5bef60,
+ 0xe651dd62, 0xe866e961, 0xe577e863, 0xeb6eee66, 0xdc7f0050, 0xfb59f95e,
+ 0xfc5c0027, 0x0041f154, 0xdd7ffe49, 0xf468f75b, 0xe17f0337, 0x07380737,
+ 0x083dfd35, 0x0044f94a, 0xf758f367, 0xf35bf759, 0xf25cf84c, 0xf457e96e,
+ 0xe869f64e, 0xec70ef63, 0xb27fba7f, 0xce7fd27f, 0xfc42fb4e, 0xfc47f848,
+ 0x023bff37, 0xf946fa4b, 0xf859de77, 0xfd4b2014, 0x1e16d47f, 0x0036fb3d,
+ 0x003aff3c, 0xfd3df843, 0xe754f24a, 0xfb410534, 0x0239003d, 0xf745f546,
+ 0x1237fc47, 0x003a073d, 0x09291219, 0x0920052b, 0x092f002c, 0x0033022e,
+ 0x1326fc42, 0x0f260c2a, 0x09220059, 0x042d0a1c, 0x0a1f21f5, 0x34d5120f,
+ 0x1c0023ea, 0x26e72200, 0x27ee20f4, 0x66a20000, 0x38f121fc, 0x1d0a25fb,
+ 0x33e327f7, 0x34de45c6, 0x43c12cfb, 0x200737e3, 0x20010000, 0x1b2421e7,
+ 0x22e224e4, 0x26e426e5, 0x22ee23f0, 0x22f220f8, 0x25fa2300, 0x1e0a1c12,
+ 0x1a191d29, 0x004b0248, 0x084d0e23, 0x121f1123, 0x151e112d, 0x142a122d,
+ 0x1b1a1036, 0x07421038, 0x0b490a43, 0xf674e970, 0xf147f93d, 0x0035fb42,
+ 0xf54df750, 0xf754f657, 0xde7feb65, 0xfd27fb35, 0xf93df54b, 0xf14def5b,
+ 0xe76be76f, 0xe47af54c, 0xf62cf634, 0xf639f73a, 0xf048f945, 0xfc45fb4a,
+ 0xf7560242, 0xf7220120, 0x0b1f0534, 0xfe37fe43, 0x0049f859, 0x03340704,
+ 0x0a081108, 0x10130325, 0xff3dfb49, 0xff46fc4e, 0x0000eb7e, 0xe97cec6e,
+ 0xe67ee77c, 0xef69e579, 0xe575ef66, 0xe675e574, 0xdf7af65f, 0xf264f85f,
+ 0xef6fe472, 0xfa59fe50, 0xfc52f755, 0xf851ff48, 0x05400143, 0x09380045,
+ 0x01450745, 0xf945fa43, 0xf04dfe40, 0x023dfa43, 0xfd400239, 0xfd41fd42,
+ 0x003e0933, 0xff42fe47, 0xfe4bff46, 0xf7480e3c, 0x1025002f, 0x12230b25,
+ 0x0c290a29, 0x02300c29, 0x0d29003b, 0x03321328, 0x03421232, 0x13fa12fa,
+ 0x0e001af4, 0x1ff021e7, 0x21ea25e4, 0x27e22ae2, 0x2fd62ddc, 0x31de29ef,
+ 0x200945b9, 0x3fc142c0, 0x4db636d9, 0x34dd29f6, 0x240028ff, 0x1e0e1c1a,
+ 0x17250c37, 0x0b4125df, 0x27dc28db, 0x26e22edf, 0x2ae228e8, 0x31e326f4,
+ 0x28f626fd, 0x2efb1f14, 0x1d1e192c, 0x0c300b31, 0x1a2d1616, 0x17161b15,
+ 0x21141a1c, 0x1e181b22, 0x122a1927, 0x12320c46, 0x15360e47, 0x0b531920,
+ 0x15311536, 0xfb55fa51, 0xf64df951, 0xef50ee49, 0xfc4af653, 0xf747f743,
+ 0xff3df842, 0xf242003b, 0x023b11f6, 0x20f32af7, 0x31fb3500, 0x4003440a,
+ 0x421b2f39, 0xfb470018, 0xff24fe2a, 0xfe34f739, 0xfa3ffc41, 0xfc43f952,
+ 0xfd51fd4c, 0xf948fa4e, 0xf448f244, 0xfd46fa4c, 0xfb42fb3e, 0x0039fc3d,
+ 0xf73c0136, 0x023a11f6, 0x20f32af7, 0x31fb3500, 0x4003440a, 0x421b2f39,
+ 0x14f10236, 0x034a14f1, 0x0236034a, 0xe47fe968, 0xfa35ff36, 0x07331d10,
+ 0x19000e00, 0xf633fd3e, 0xe5631a10, 0xfc55e866, 0x05390639, 0xef490e39,
+ 0x1428140a, 0x1d003600, 0x252a0c61, 0xe07fea75, 0xfe4afc55, 0xe8660539,
+ 0xfa5df258, 0xfa2c0437, 0xf559f167, 0xeb741339, 0x143a0454, 0x0660013f,
+ 0xfb55f36a, 0x053f064b, 0xfd5aff65, 0x0337fc4f, 0xfe4bf461, 0xf932013c,
+ 0x0029003f, 0x003f003f, 0xf7530456, 0x0061f948, 0x0d29033e, 0x0722f758,
+ 0xec7fdc7f, 0xef5bf25f, 0xe754e756, 0xf459ef5b, 0xe17ff24c, 0xee67f35a,
+ 0xdb7f0b50, 0x054c0254, 0x054efa37, 0x043df253, 0xdb7ffb4f, 0xf568f55b,
+ 0xe27f0041, 0xfe4f0048, 0xfc5cfa38, 0x0344f847, 0xf362fc56, 0xf458fb52,
+ 0xfd48fc43, 0xf848f059, 0xf745ff3b, 0x05420439, 0xfc47fe47, 0x023aff4a,
+ 0xfc2cff45, 0x003ef933, 0xfc2ffa2a, 0xfd29fa35, 0x084cf74e, 0xf5530934,
+ 0x0043fb5a, 0x0143f148, 0xfb4bf850, 0xeb53eb40, 0xf31fe740, 0xe35e094b,
+ 0x113ff84a, 0xfb23fe1b, 0x0d5b0341, 0xf945084d, 0xf642033e, 0xfd44ec51,
+ 0x001e0107, 0xfd17eb4a, 0x1042e97c, 0x11252cee, 0x32deea7f, 0x0427002a,
+ 0x07220b1d, 0x081f0625, 0x072a0328, 0x08210d2b, 0x0d24042f, 0x0337023a,
+ 0x063c082c, 0x0b2c0e2a, 0x07300438, 0x04340d25, 0x0931133a, 0x0a300c2d,
+ 0x00451421, 0x083f23ee, 0x21e71cfd, 0x180a1b00, 0x22f234d4, 0x27e81311,
+ 0x1f19241d, 0x1821220f, 0x1e141649, 0x1422131f, 0x1b2c1310, 0x0f240f24,
+ 0x151c1915, 0x1e141f0c, 0x1b10182a, 0x005d0e38, 0x0f391a26, 0xe87fe873,
+ 0xea52f73e, 0x0035003b, 0xf255f359, 0xf35ef55c, 0xe37feb64, 0xf239f443,
+ 0xf547f64d, 0xeb55f058, 0xe968f162, 0xdb7ff652, 0xf830f83d, 0xf842f946,
+ 0xf24bf64f, 0xf753f45c, 0xee6cfc4f, 0xea45f04b, 0xfe3a013a, 0xf34ef753,
+ 0xfc51f363, 0xf351fa26, 0xf33efa3a, 0xfe3bf049, 0xf64cf356, 0xf753f657,
+ 0x0000ea7f, 0xe77fe778, 0xe57fed72, 0xe975e776, 0xe675e871, 0xe476e178,
+ 0xdb7cf65e, 0xf166f663, 0xf36ace7f, 0xfb5c1139, 0xfb56f35e, 0xf45bfe4d,
+ 0x0047ff49, 0x0440f951, 0x05400f39, 0x01430044, 0xf6430144, 0x004d0240,
+ 0x0044fb4e, 0x0737053b, 0x02410e36, 0x0f2c053c, 0x0246fe4c, 0xee560c46,
+ 0x0540f446, 0x0b370538, 0x00450241, 0xfa4a0536, 0x0736fa4c, 0xf552fe4d,
+ 0xfe4d192a, 0x11f310f7, 0x11f41beb, 0x25e229d8, 0x2ad730d1, 0x27e02ed8,
+ 0x34cd2ed7, 0x34d92bed, 0x200b3dc9, 0x38d23ece, 0x51bd2dec, 0x23fe1c0f,
+ 0x22012701, 0x1e111426, 0x122d0f36, 0x004f24f0, 0x25f225ef, 0x2001220f,
+ 0x1d0f1819, 0x22161f10, 0x23121f1c, 0x2129241c, 0x1b2f153e, 0x121f131a,
+ 0x24181817, 0x1b10181e, 0x1f1d1629, 0x162a103c, 0x0f340e3c, 0x034ef07b,
+ 0x15351638, 0x193d1521, 0x1332113d, 0xfd4ef84a, 0xf748f648, 0xee4bf447,
+ 0xf53ffb46, 0xef4bf248, 0xf043f835, 0xf23bf734, 0xf54409fe, 0x1ef61ffc,
+ 0x21ff2107, 0x1f0c2517, 0x1f261440, 0xf747f925, 0xf82cf531, 0xf638f43b,
+ 0xf83ff743, 0xfa44f64f, 0xfd4ef84a, 0xf748f648, 0xee4bf447, 0xf53ffb46,
+ 0xef4bf248, 0xf043f835, 0xf23bf734, 0xf54409fe, 0x1ef61ffc, 0x21ff2107,
+ 0x1f0c2517, 0x1f261440
+};
+
+static void
+assemble_scaling_list(struct hantro_ctx *ctx)
+{
+ const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
+ const struct v4l2_ctrl_h264_scaling_matrix *scaling = ctrls->scaling;
+ const struct v4l2_ctrl_h264_pps *pps = ctrls->pps;
+ const size_t num_list_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4);
+ const size_t list_len_4x4 = ARRAY_SIZE(scaling->scaling_list_4x4[0]);
+ const size_t list_len_8x8 = ARRAY_SIZE(scaling->scaling_list_8x8[0]);
+ struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu;
+ u32 *dst = (u32 *)tbl->scaling_list;
+ const u32 *src;
+ int i, j;
+
+ if (!(pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT))
+ return;
+
+ for (i = 0; i < num_list_4x4; i++) {
+ src = (u32 *)&scaling->scaling_list_4x4[i];
+ for (j = 0; j < list_len_4x4 / 4; j++)
+ *dst++ = swab32(src[j]);
+ }
+
+ /* Only Intra/Inter Y lists */
+ for (i = 0; i < 2; i++) {
+ src = (u32 *)&scaling->scaling_list_8x8[i];
+ for (j = 0; j < list_len_8x8 / 4; j++)
+ *dst++ = swab32(src[j]);
+ }
+}
+
+static void prepare_table(struct hantro_ctx *ctx)
+{
+ const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
+ const struct v4l2_ctrl_h264_decode_params *dec_param = ctrls->decode;
+ const struct v4l2_ctrl_h264_sps *sps = ctrls->sps;
+ struct hantro_h264_dec_priv_tbl *tbl = ctx->h264_dec.priv.cpu;
+ const struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb;
+ u32 dpb_longterm = 0;
+ u32 dpb_valid = 0;
+ int i;
+
+ for (i = 0; i < HANTRO_H264_DPB_SIZE; ++i) {
+ tbl->poc[i * 2] = dpb[i].top_field_order_cnt;
+ tbl->poc[i * 2 + 1] = dpb[i].bottom_field_order_cnt;
+
+ if (!(dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_VALID))
+ continue;
+
+ /*
+ * Set up bit maps of valid and long term DPBs.
+ * NOTE: The bits are reversed, i.e. MSb is DPB 0. For frame
+ * decoding, bit 31 to 15 are used, while for field decoding,
+ * all bits are used, with bit 31 being a top field, 30 a bottom
+ * field and so on.
+ */
+ if (dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC) {
+ if (dpb[i].fields & V4L2_H264_TOP_FIELD_REF)
+ dpb_valid |= REF_BIT(i * 2);
+
+ if (dpb[i].fields & V4L2_H264_BOTTOM_FIELD_REF)
+ dpb_valid |= REF_BIT(i * 2 + 1);
+
+ if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM) {
+ dpb_longterm |= REF_BIT(i * 2);
+ dpb_longterm |= REF_BIT(i * 2 + 1);
+ }
+ } else {
+ dpb_valid |= REF_BIT(i);
+
+ if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM)
+ dpb_longterm |= REF_BIT(i);
+ }
+ }
+ ctx->h264_dec.dpb_valid = dpb_valid;
+ ctx->h264_dec.dpb_longterm = dpb_longterm;
+
+ if ((dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC) ||
+ !(sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)) {
+ tbl->poc[32] = ctx->h264_dec.cur_poc;
+ tbl->poc[33] = 0;
+ } else {
+ tbl->poc[32] = dec_param->top_field_order_cnt;
+ tbl->poc[33] = dec_param->bottom_field_order_cnt;
+ }
+
+ assemble_scaling_list(ctx);
+}
+
+static bool dpb_entry_match(const struct v4l2_h264_dpb_entry *a,
+ const struct v4l2_h264_dpb_entry *b)
+{
+ return a->reference_ts == b->reference_ts;
+}
+
+static void update_dpb(struct hantro_ctx *ctx)
+{
+ const struct v4l2_ctrl_h264_decode_params *dec_param;
+ DECLARE_BITMAP(new, ARRAY_SIZE(dec_param->dpb)) = { 0, };
+ DECLARE_BITMAP(used, ARRAY_SIZE(dec_param->dpb)) = { 0, };
+ unsigned int i, j;
+
+ dec_param = ctx->h264_dec.ctrls.decode;
+
+ /* Disable all entries by default. */
+ for (i = 0; i < ARRAY_SIZE(ctx->h264_dec.dpb); i++)
+ ctx->h264_dec.dpb[i].flags = 0;
+
+ /* Try to match new DPB entries with existing ones by their POCs. */
+ for (i = 0; i < ARRAY_SIZE(dec_param->dpb); i++) {
+ const struct v4l2_h264_dpb_entry *ndpb = &dec_param->dpb[i];
+
+ if (!(ndpb->flags & V4L2_H264_DPB_ENTRY_FLAG_VALID))
+ continue;
+
+ /*
+ * To cut off some comparisons, iterate only on target DPB
+ * entries which are not used yet.
+ */
+ for_each_clear_bit(j, used, ARRAY_SIZE(ctx->h264_dec.dpb)) {
+ struct v4l2_h264_dpb_entry *cdpb;
+
+ cdpb = &ctx->h264_dec.dpb[j];
+ if (!dpb_entry_match(cdpb, ndpb))
+ continue;
+
+ *cdpb = *ndpb;
+ set_bit(j, used);
+ break;
+ }
+
+ if (j == ARRAY_SIZE(ctx->h264_dec.dpb))
+ set_bit(i, new);
+ }
+
+ /* For entries that could not be matched, use remaining free slots. */
+ for_each_set_bit(i, new, ARRAY_SIZE(dec_param->dpb)) {
+ const struct v4l2_h264_dpb_entry *ndpb = &dec_param->dpb[i];
+ struct v4l2_h264_dpb_entry *cdpb;
+
+ /*
+ * Both arrays are of the same sizes, so there is no way
+ * we can end up with no space in target array, unless
+ * something is buggy.
+ */
+ j = find_first_zero_bit(used, ARRAY_SIZE(ctx->h264_dec.dpb));
+ if (WARN_ON(j >= ARRAY_SIZE(ctx->h264_dec.dpb)))
+ return;
+
+ cdpb = &ctx->h264_dec.dpb[j];
+ *cdpb = *ndpb;
+ set_bit(j, used);
+ }
+}
+
+dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
+ unsigned int dpb_idx)
+{
+ struct v4l2_h264_dpb_entry *dpb = ctx->h264_dec.dpb;
+ dma_addr_t dma_addr = 0;
+ s32 cur_poc = ctx->h264_dec.cur_poc;
+ u32 flags;
+
+ if (dpb[dpb_idx].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
+ dma_addr = hantro_get_ref(ctx, dpb[dpb_idx].reference_ts);
+
+ if (!dma_addr) {
+ struct vb2_v4l2_buffer *dst_buf;
+ struct vb2_buffer *buf;
+
+ /*
+ * If a DPB entry is unused or invalid, address of current
+ * destination buffer is returned.
+ */
+ dst_buf = hantro_get_dst_buf(ctx);
+ buf = &dst_buf->vb2_buf;
+ dma_addr = hantro_get_dec_buf_addr(ctx, buf);
+ }
+
+ flags = dpb[dpb_idx].flags & V4L2_H264_DPB_ENTRY_FLAG_FIELD ? 0x2 : 0;
+ flags |= abs(dpb[dpb_idx].top_field_order_cnt - cur_poc) <
+ abs(dpb[dpb_idx].bottom_field_order_cnt - cur_poc) ?
+ 0x1 : 0;
+
+ return dma_addr | flags;
+}
+
+u16 hantro_h264_get_ref_nbr(struct hantro_ctx *ctx, unsigned int dpb_idx)
+{
+ const struct v4l2_h264_dpb_entry *dpb = &ctx->h264_dec.dpb[dpb_idx];
+
+ if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
+ return 0;
+ return dpb->frame_num;
+}
+
+/*
+ * Removes all references with the same parity as the current picture from the
+ * reference list. The remaining list will have references with the opposite
+ * parity. This is effectively a deduplication of references since each buffer
+ * stores two fields. For this reason, each buffer is found twice in the
+ * reference list.
+ *
+ * This technique has been chosen through trial and error. This simple approach
+ * resulted in the highest conformance score. Note that this method may suffer
+ * worse quality in the case an opposite reference frame has been lost. If this
+ * becomes a problem in the future, it should be possible to add a preprocessing
+ * to identify un-paired fields and avoid removing them.
+ */
+static void deduplicate_reflist(struct v4l2_h264_reflist_builder *b,
+ struct v4l2_h264_reference *reflist)
+{
+ int write_idx = 0;
+ int i;
+
+ if (b->cur_pic_fields == V4L2_H264_FRAME_REF) {
+ write_idx = b->num_valid;
+ goto done;
+ }
+
+ for (i = 0; i < b->num_valid; i++) {
+ if (!(b->cur_pic_fields == reflist[i].fields)) {
+ reflist[write_idx++] = reflist[i];
+ continue;
+ }
+ }
+
+done:
+ /* Should not happen unless we have a bug in the reflist builder. */
+ if (WARN_ON(write_idx > 16))
+ write_idx = 16;
+
+ /* Clear the remaining, some streams fails otherwise */
+ for (; write_idx < 16; write_idx++)
+ reflist[write_idx].index = 15;
+}
+
+int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx)
+{
+ struct hantro_h264_dec_hw_ctx *h264_ctx = &ctx->h264_dec;
+ struct hantro_h264_dec_ctrls *ctrls = &h264_ctx->ctrls;
+ struct v4l2_h264_reflist_builder reflist_builder;
+
+ hantro_start_prepare_run(ctx);
+
+ ctrls->scaling =
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_H264_SCALING_MATRIX);
+ if (WARN_ON(!ctrls->scaling))
+ return -EINVAL;
+
+ ctrls->decode =
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_H264_DECODE_PARAMS);
+ if (WARN_ON(!ctrls->decode))
+ return -EINVAL;
+
+ ctrls->sps =
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_H264_SPS);
+ if (WARN_ON(!ctrls->sps))
+ return -EINVAL;
+
+ ctrls->pps =
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_H264_PPS);
+ if (WARN_ON(!ctrls->pps))
+ return -EINVAL;
+
+ /* Update the DPB with new refs. */
+ update_dpb(ctx);
+
+ /* Build the P/B{0,1} ref lists. */
+ v4l2_h264_init_reflist_builder(&reflist_builder, ctrls->decode,
+ ctrls->sps, ctx->h264_dec.dpb);
+ h264_ctx->cur_poc = reflist_builder.cur_pic_order_count;
+
+ /* Prepare data in memory. */
+ prepare_table(ctx);
+
+ v4l2_h264_build_p_ref_list(&reflist_builder, h264_ctx->reflists.p);
+ v4l2_h264_build_b_ref_lists(&reflist_builder, h264_ctx->reflists.b0,
+ h264_ctx->reflists.b1);
+
+ /*
+ * Reduce ref lists to at most 16 entries, Hantro hardware will deduce
+ * the actual picture lists in field through the dpb_valid,
+ * dpb_longterm bitmap along with the current frame parity.
+ */
+ if (reflist_builder.cur_pic_fields != V4L2_H264_FRAME_REF) {
+ deduplicate_reflist(&reflist_builder, h264_ctx->reflists.p);
+ deduplicate_reflist(&reflist_builder, h264_ctx->reflists.b0);
+ deduplicate_reflist(&reflist_builder, h264_ctx->reflists.b1);
+ }
+
+ return 0;
+}
+
+void hantro_h264_dec_exit(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_h264_dec_hw_ctx *h264_dec = &ctx->h264_dec;
+ struct hantro_aux_buf *priv = &h264_dec->priv;
+
+ dma_free_coherent(vpu->dev, priv->size, priv->cpu, priv->dma);
+}
+
+int hantro_h264_dec_init(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_h264_dec_hw_ctx *h264_dec = &ctx->h264_dec;
+ struct hantro_aux_buf *priv = &h264_dec->priv;
+ struct hantro_h264_dec_priv_tbl *tbl;
+
+ priv->cpu = dma_alloc_coherent(vpu->dev, sizeof(*tbl), &priv->dma,
+ GFP_KERNEL);
+ if (!priv->cpu)
+ return -ENOMEM;
+
+ priv->size = sizeof(*tbl);
+ tbl = priv->cpu;
+ memcpy(tbl->cabac_table, h264_cabac_table, sizeof(tbl->cabac_table));
+
+ return 0;
+}
diff --git a/drivers/media/platform/verisilicon/hantro_hevc.c b/drivers/media/platform/verisilicon/hantro_hevc.c
new file mode 100644
index 000000000000..b990bc98164c
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_hevc.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU HEVC codec driver
+ *
+ * Copyright (C) 2020 Safran Passenger Innovations LLC
+ */
+
+#include <linux/types.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "hantro.h"
+#include "hantro_hw.h"
+
+#define VERT_FILTER_RAM_SIZE 8 /* bytes per pixel row */
+/*
+ * BSD control data of current picture at tile border
+ * 128 bits per 4x4 tile = 128/(8*4) bytes per row
+ */
+#define BSD_CTRL_RAM_SIZE 4 /* bytes per pixel row */
+/* tile border coefficients of filter */
+#define VERT_SAO_RAM_SIZE 48 /* bytes per pixel */
+
+#define SCALING_LIST_SIZE (16 * 64)
+
+#define MAX_TILE_COLS 20
+#define MAX_TILE_ROWS 22
+
+void hantro_hevc_ref_init(struct hantro_ctx *ctx)
+{
+ struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
+
+ hevc_dec->ref_bufs_used = 0;
+}
+
+dma_addr_t hantro_hevc_get_ref_buf(struct hantro_ctx *ctx,
+ s32 poc)
+{
+ struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
+ int i;
+
+ /* Find the reference buffer in already known ones */
+ for (i = 0; i < NUM_REF_PICTURES; i++) {
+ if (hevc_dec->ref_bufs_poc[i] == poc) {
+ hevc_dec->ref_bufs_used |= 1 << i;
+ return hevc_dec->ref_bufs[i].dma;
+ }
+ }
+
+ return 0;
+}
+
+int hantro_hevc_add_ref_buf(struct hantro_ctx *ctx, int poc, dma_addr_t addr)
+{
+ struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
+ int i;
+
+ /* Add a new reference buffer */
+ for (i = 0; i < NUM_REF_PICTURES; i++) {
+ if (!(hevc_dec->ref_bufs_used & 1 << i)) {
+ hevc_dec->ref_bufs_used |= 1 << i;
+ hevc_dec->ref_bufs_poc[i] = poc;
+ hevc_dec->ref_bufs[i].dma = addr;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int tile_buffer_reallocate(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_pps *pps = ctrls->pps;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+ unsigned int num_tile_cols = pps->num_tile_columns_minus1 + 1;
+ unsigned int height64 = (sps->pic_height_in_luma_samples + 63) & ~63;
+ unsigned int size;
+
+ if (num_tile_cols <= 1 ||
+ num_tile_cols <= hevc_dec->num_tile_cols_allocated)
+ return 0;
+
+ /* Need to reallocate due to tiles passed via PPS */
+ if (hevc_dec->tile_filter.cpu) {
+ dma_free_coherent(vpu->dev, hevc_dec->tile_filter.size,
+ hevc_dec->tile_filter.cpu,
+ hevc_dec->tile_filter.dma);
+ hevc_dec->tile_filter.cpu = NULL;
+ }
+
+ if (hevc_dec->tile_sao.cpu) {
+ dma_free_coherent(vpu->dev, hevc_dec->tile_sao.size,
+ hevc_dec->tile_sao.cpu,
+ hevc_dec->tile_sao.dma);
+ hevc_dec->tile_sao.cpu = NULL;
+ }
+
+ if (hevc_dec->tile_bsd.cpu) {
+ dma_free_coherent(vpu->dev, hevc_dec->tile_bsd.size,
+ hevc_dec->tile_bsd.cpu,
+ hevc_dec->tile_bsd.dma);
+ hevc_dec->tile_bsd.cpu = NULL;
+ }
+
+ size = VERT_FILTER_RAM_SIZE * height64 * (num_tile_cols - 1);
+ hevc_dec->tile_filter.cpu = dma_alloc_coherent(vpu->dev, size,
+ &hevc_dec->tile_filter.dma,
+ GFP_KERNEL);
+ if (!hevc_dec->tile_filter.cpu)
+ goto err_free_tile_buffers;
+ hevc_dec->tile_filter.size = size;
+
+ size = VERT_SAO_RAM_SIZE * height64 * (num_tile_cols - 1);
+ hevc_dec->tile_sao.cpu = dma_alloc_coherent(vpu->dev, size,
+ &hevc_dec->tile_sao.dma,
+ GFP_KERNEL);
+ if (!hevc_dec->tile_sao.cpu)
+ goto err_free_tile_buffers;
+ hevc_dec->tile_sao.size = size;
+
+ size = BSD_CTRL_RAM_SIZE * height64 * (num_tile_cols - 1);
+ hevc_dec->tile_bsd.cpu = dma_alloc_coherent(vpu->dev, size,
+ &hevc_dec->tile_bsd.dma,
+ GFP_KERNEL);
+ if (!hevc_dec->tile_bsd.cpu)
+ goto err_free_tile_buffers;
+ hevc_dec->tile_bsd.size = size;
+
+ hevc_dec->num_tile_cols_allocated = num_tile_cols;
+
+ return 0;
+
+err_free_tile_buffers:
+ if (hevc_dec->tile_filter.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_filter.size,
+ hevc_dec->tile_filter.cpu,
+ hevc_dec->tile_filter.dma);
+ hevc_dec->tile_filter.cpu = NULL;
+
+ if (hevc_dec->tile_sao.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_sao.size,
+ hevc_dec->tile_sao.cpu,
+ hevc_dec->tile_sao.dma);
+ hevc_dec->tile_sao.cpu = NULL;
+
+ if (hevc_dec->tile_bsd.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_bsd.size,
+ hevc_dec->tile_bsd.cpu,
+ hevc_dec->tile_bsd.dma);
+ hevc_dec->tile_bsd.cpu = NULL;
+
+ return -ENOMEM;
+}
+
+static int hantro_hevc_validate_sps(struct hantro_ctx *ctx, const struct v4l2_ctrl_hevc_sps *sps)
+{
+ /*
+ * for tile pixel format check if the width and height match
+ * hardware constraints
+ */
+ if (ctx->vpu_dst_fmt->fourcc == V4L2_PIX_FMT_NV12_4L4) {
+ if (ctx->dst_fmt.width !=
+ ALIGN(sps->pic_width_in_luma_samples, ctx->vpu_dst_fmt->frmsize.step_width))
+ return -EINVAL;
+
+ if (ctx->dst_fmt.height !=
+ ALIGN(sps->pic_height_in_luma_samples, ctx->vpu_dst_fmt->frmsize.step_height))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int hantro_hevc_dec_prepare_run(struct hantro_ctx *ctx)
+{
+ struct hantro_hevc_dec_hw_ctx *hevc_ctx = &ctx->hevc_dec;
+ struct hantro_hevc_dec_ctrls *ctrls = &hevc_ctx->ctrls;
+ int ret;
+
+ hantro_start_prepare_run(ctx);
+
+ ctrls->decode_params =
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_HEVC_DECODE_PARAMS);
+ if (WARN_ON(!ctrls->decode_params))
+ return -EINVAL;
+
+ ctrls->scaling =
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_HEVC_SCALING_MATRIX);
+ if (WARN_ON(!ctrls->scaling))
+ return -EINVAL;
+
+ ctrls->sps =
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_HEVC_SPS);
+ if (WARN_ON(!ctrls->sps))
+ return -EINVAL;
+
+ ret = hantro_hevc_validate_sps(ctx, ctrls->sps);
+ if (ret)
+ return ret;
+
+ ctrls->pps =
+ hantro_get_ctrl(ctx, V4L2_CID_STATELESS_HEVC_PPS);
+ if (WARN_ON(!ctrls->pps))
+ return -EINVAL;
+
+ ret = tile_buffer_reallocate(ctx);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void hantro_hevc_dec_exit(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
+
+ if (hevc_dec->tile_sizes.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_sizes.size,
+ hevc_dec->tile_sizes.cpu,
+ hevc_dec->tile_sizes.dma);
+ hevc_dec->tile_sizes.cpu = NULL;
+
+ if (hevc_dec->scaling_lists.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->scaling_lists.size,
+ hevc_dec->scaling_lists.cpu,
+ hevc_dec->scaling_lists.dma);
+ hevc_dec->scaling_lists.cpu = NULL;
+
+ if (hevc_dec->tile_filter.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_filter.size,
+ hevc_dec->tile_filter.cpu,
+ hevc_dec->tile_filter.dma);
+ hevc_dec->tile_filter.cpu = NULL;
+
+ if (hevc_dec->tile_sao.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_sao.size,
+ hevc_dec->tile_sao.cpu,
+ hevc_dec->tile_sao.dma);
+ hevc_dec->tile_sao.cpu = NULL;
+
+ if (hevc_dec->tile_bsd.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->tile_bsd.size,
+ hevc_dec->tile_bsd.cpu,
+ hevc_dec->tile_bsd.dma);
+ hevc_dec->tile_bsd.cpu = NULL;
+}
+
+int hantro_hevc_dec_init(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_hevc_dec_hw_ctx *hevc_dec = &ctx->hevc_dec;
+ unsigned int size;
+
+ memset(hevc_dec, 0, sizeof(*hevc_dec));
+
+ /*
+ * Maximum number of tiles times width and height (2 bytes each),
+ * rounding up to next 16 bytes boundary + one extra 16 byte
+ * chunk (HW guys wanted to have this).
+ */
+ size = round_up(MAX_TILE_COLS * MAX_TILE_ROWS * 4 * sizeof(u16) + 16, 16);
+ hevc_dec->tile_sizes.cpu = dma_alloc_coherent(vpu->dev, size,
+ &hevc_dec->tile_sizes.dma,
+ GFP_KERNEL);
+ if (!hevc_dec->tile_sizes.cpu)
+ return -ENOMEM;
+
+ hevc_dec->tile_sizes.size = size;
+
+ hevc_dec->scaling_lists.cpu = dma_alloc_coherent(vpu->dev, SCALING_LIST_SIZE,
+ &hevc_dec->scaling_lists.dma,
+ GFP_KERNEL);
+ if (!hevc_dec->scaling_lists.cpu)
+ return -ENOMEM;
+
+ hevc_dec->scaling_lists.size = SCALING_LIST_SIZE;
+
+ hantro_hevc_ref_init(ctx);
+
+ return 0;
+}
diff --git a/drivers/media/platform/verisilicon/hantro_hw.h b/drivers/media/platform/verisilicon/hantro_hw.h
new file mode 100644
index 000000000000..e83f0c523a30
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_hw.h
@@ -0,0 +1,441 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#ifndef HANTRO_HW_H_
+#define HANTRO_HW_H_
+
+#include <linux/interrupt.h>
+#include <linux/v4l2-controls.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-vp9.h>
+#include <media/videobuf2-core.h>
+
+#define DEC_8190_ALIGN_MASK 0x07U
+
+#define MB_DIM 16
+#define TILE_MB_DIM 4
+#define MB_WIDTH(w) DIV_ROUND_UP(w, MB_DIM)
+#define MB_HEIGHT(h) DIV_ROUND_UP(h, MB_DIM)
+
+#define FMT_MIN_WIDTH 48
+#define FMT_MIN_HEIGHT 48
+#define FMT_HD_WIDTH 1280
+#define FMT_HD_HEIGHT 720
+#define FMT_FHD_WIDTH 1920
+#define FMT_FHD_HEIGHT 1088
+#define FMT_UHD_WIDTH 3840
+#define FMT_UHD_HEIGHT 2160
+#define FMT_4K_WIDTH 4096
+#define FMT_4K_HEIGHT 2304
+
+#define NUM_REF_PICTURES (V4L2_HEVC_DPB_ENTRIES_NUM_MAX + 1)
+
+struct hantro_dev;
+struct hantro_ctx;
+struct hantro_buf;
+struct hantro_variant;
+
+/**
+ * struct hantro_aux_buf - auxiliary DMA buffer for hardware data
+ *
+ * @cpu: CPU pointer to the buffer.
+ * @dma: DMA address of the buffer.
+ * @size: Size of the buffer.
+ * @attrs: Attributes of the DMA mapping.
+ */
+struct hantro_aux_buf {
+ void *cpu;
+ dma_addr_t dma;
+ size_t size;
+ unsigned long attrs;
+};
+
+/* Max. number of entries in the DPB (HW limitation). */
+#define HANTRO_H264_DPB_SIZE 16
+
+/**
+ * struct hantro_h264_dec_ctrls
+ *
+ * @decode: Decode params
+ * @scaling: Scaling info
+ * @sps: SPS info
+ * @pps: PPS info
+ */
+struct hantro_h264_dec_ctrls {
+ const struct v4l2_ctrl_h264_decode_params *decode;
+ const struct v4l2_ctrl_h264_scaling_matrix *scaling;
+ const struct v4l2_ctrl_h264_sps *sps;
+ const struct v4l2_ctrl_h264_pps *pps;
+};
+
+/**
+ * struct hantro_h264_dec_reflists
+ *
+ * @p: P reflist
+ * @b0: B0 reflist
+ * @b1: B1 reflist
+ */
+struct hantro_h264_dec_reflists {
+ struct v4l2_h264_reference p[V4L2_H264_REF_LIST_LEN];
+ struct v4l2_h264_reference b0[V4L2_H264_REF_LIST_LEN];
+ struct v4l2_h264_reference b1[V4L2_H264_REF_LIST_LEN];
+};
+
+/**
+ * struct hantro_h264_dec_hw_ctx
+ *
+ * @priv: Private auxiliary buffer for hardware.
+ * @dpb: DPB
+ * @reflists: P/B0/B1 reflists
+ * @ctrls: V4L2 controls attached to a run
+ * @dpb_longterm: DPB long-term
+ * @dpb_valid: DPB valid
+ * @cur_poc: Current picture order count
+ */
+struct hantro_h264_dec_hw_ctx {
+ struct hantro_aux_buf priv;
+ struct v4l2_h264_dpb_entry dpb[HANTRO_H264_DPB_SIZE];
+ struct hantro_h264_dec_reflists reflists;
+ struct hantro_h264_dec_ctrls ctrls;
+ u32 dpb_longterm;
+ u32 dpb_valid;
+ s32 cur_poc;
+};
+
+/**
+ * struct hantro_hevc_dec_ctrls
+ * @decode_params: Decode params
+ * @scaling: Scaling matrix
+ * @sps: SPS info
+ * @pps: PPS info
+ * @hevc_hdr_skip_length: the number of data (in bits) to skip in the
+ * slice segment header syntax after 'slice type'
+ * token
+ */
+struct hantro_hevc_dec_ctrls {
+ const struct v4l2_ctrl_hevc_decode_params *decode_params;
+ const struct v4l2_ctrl_hevc_scaling_matrix *scaling;
+ const struct v4l2_ctrl_hevc_sps *sps;
+ const struct v4l2_ctrl_hevc_pps *pps;
+ u32 hevc_hdr_skip_length;
+};
+
+/**
+ * struct hantro_hevc_dec_hw_ctx
+ * @tile_sizes: Tile sizes buffer
+ * @tile_filter: Tile vertical filter buffer
+ * @tile_sao: Tile SAO buffer
+ * @tile_bsd: Tile BSD control buffer
+ * @ref_bufs: Internal reference buffers
+ * @scaling_lists: Scaling lists buffer
+ * @ref_bufs_poc: Internal reference buffers picture order count
+ * @ref_bufs_used: Bitfield of used reference buffers
+ * @ctrls: V4L2 controls attached to a run
+ * @num_tile_cols_allocated: number of allocated tiles
+ */
+struct hantro_hevc_dec_hw_ctx {
+ struct hantro_aux_buf tile_sizes;
+ struct hantro_aux_buf tile_filter;
+ struct hantro_aux_buf tile_sao;
+ struct hantro_aux_buf tile_bsd;
+ struct hantro_aux_buf ref_bufs[NUM_REF_PICTURES];
+ struct hantro_aux_buf scaling_lists;
+ s32 ref_bufs_poc[NUM_REF_PICTURES];
+ u32 ref_bufs_used;
+ struct hantro_hevc_dec_ctrls ctrls;
+ unsigned int num_tile_cols_allocated;
+};
+
+/**
+ * struct hantro_mpeg2_dec_hw_ctx
+ *
+ * @qtable: Quantization table
+ */
+struct hantro_mpeg2_dec_hw_ctx {
+ struct hantro_aux_buf qtable;
+};
+
+/**
+ * struct hantro_vp8_dec_hw_ctx
+ *
+ * @segment_map: Segment map buffer.
+ * @prob_tbl: Probability table buffer.
+ */
+struct hantro_vp8_dec_hw_ctx {
+ struct hantro_aux_buf segment_map;
+ struct hantro_aux_buf prob_tbl;
+};
+
+/**
+ * struct hantro_vp9_frame_info
+ *
+ * @valid: frame info valid flag
+ * @frame_context_idx: index of frame context
+ * @reference_mode: inter prediction type
+ * @tx_mode: transform mode
+ * @interpolation_filter: filter selection for inter prediction
+ * @flags: frame flags
+ * @timestamp: frame timestamp
+ */
+struct hantro_vp9_frame_info {
+ u32 valid : 1;
+ u32 frame_context_idx : 2;
+ u32 reference_mode : 2;
+ u32 tx_mode : 3;
+ u32 interpolation_filter : 3;
+ u32 flags;
+ u64 timestamp;
+};
+
+#define MAX_SB_COLS 64
+#define MAX_SB_ROWS 34
+
+/**
+ * struct hantro_vp9_dec_hw_ctx
+ *
+ * @tile_edge: auxiliary DMA buffer for tile edge processing
+ * @segment_map: auxiliary DMA buffer for segment map
+ * @misc: auxiliary DMA buffer for tile info, probabilities and hw counters
+ * @cnts: vp9 library struct for abstracting hw counters access
+ * @probability_tables: VP9 probability tables implied by the spec
+ * @frame_context: VP9 frame contexts
+ * @cur: current frame information
+ * @last: last frame information
+ * @bsd_ctrl_offset: bsd offset into tile_edge
+ * @segment_map_size: size of segment map
+ * @ctx_counters_offset: hw counters offset into misc
+ * @tile_info_offset: tile info offset into misc
+ * @tile_r_info: per-tile information array
+ * @tile_c_info: per-tile information array
+ * @last_tile_r: last number of tile rows
+ * @last_tile_c: last number of tile cols
+ * @last_sbs_r: last number of superblock rows
+ * @last_sbs_c: last number of superblock cols
+ * @active_segment: number of active segment (alternating between 0 and 1)
+ * @feature_enabled: segmentation feature enabled flags
+ * @feature_data: segmentation feature data
+ */
+struct hantro_vp9_dec_hw_ctx {
+ struct hantro_aux_buf tile_edge;
+ struct hantro_aux_buf segment_map;
+ struct hantro_aux_buf misc;
+ struct v4l2_vp9_frame_symbol_counts cnts;
+ struct v4l2_vp9_frame_context probability_tables;
+ struct v4l2_vp9_frame_context frame_context[4];
+ struct hantro_vp9_frame_info cur;
+ struct hantro_vp9_frame_info last;
+
+ unsigned int bsd_ctrl_offset;
+ unsigned int segment_map_size;
+ unsigned int ctx_counters_offset;
+ unsigned int tile_info_offset;
+
+ unsigned short tile_r_info[MAX_SB_ROWS];
+ unsigned short tile_c_info[MAX_SB_COLS];
+ unsigned int last_tile_r;
+ unsigned int last_tile_c;
+ unsigned int last_sbs_r;
+ unsigned int last_sbs_c;
+
+ unsigned int active_segment;
+ u8 feature_enabled[8];
+ s16 feature_data[8][4];
+};
+
+/**
+ * struct hantro_postproc_ctx
+ *
+ * @dec_q: References buffers, in decoder format.
+ */
+struct hantro_postproc_ctx {
+ struct hantro_aux_buf dec_q[VB2_MAX_FRAME];
+};
+
+/**
+ * struct hantro_postproc_ops - post-processor operations
+ *
+ * @enable: Enable the post-processor block. Optional.
+ * @disable: Disable the post-processor block. Optional.
+ * @enum_framesizes: Enumerate possible scaled output formats.
+ * Returns zero if OK, a negative value in error cases.
+ * Optional.
+ */
+struct hantro_postproc_ops {
+ void (*enable)(struct hantro_ctx *ctx);
+ void (*disable)(struct hantro_ctx *ctx);
+ int (*enum_framesizes)(struct hantro_ctx *ctx, struct v4l2_frmsizeenum *fsize);
+};
+
+/**
+ * struct hantro_codec_ops - codec mode specific operations
+ *
+ * @init: If needed, can be used for initialization.
+ * Optional and called from process context.
+ * @exit: If needed, can be used to undo the .init phase.
+ * Optional and called from process context.
+ * @run: Start single {en,de)coding job. Called from atomic context
+ * to indicate that a pair of buffers is ready and the hardware
+ * should be programmed and started. Returns zero if OK, a
+ * negative value in error cases.
+ * @done: Read back processing results and additional data from hardware.
+ * @reset: Reset the hardware in case of a timeout.
+ */
+struct hantro_codec_ops {
+ int (*init)(struct hantro_ctx *ctx);
+ void (*exit)(struct hantro_ctx *ctx);
+ int (*run)(struct hantro_ctx *ctx);
+ void (*done)(struct hantro_ctx *ctx);
+ void (*reset)(struct hantro_ctx *ctx);
+};
+
+/**
+ * enum hantro_enc_fmt - source format ID for hardware registers.
+ *
+ * @ROCKCHIP_VPU_ENC_FMT_YUV420P: Y/CbCr 4:2:0 planar format
+ * @ROCKCHIP_VPU_ENC_FMT_YUV420SP: Y/CbCr 4:2:0 semi-planar format
+ * @ROCKCHIP_VPU_ENC_FMT_YUYV422: YUV 4:2:2 packed format (YUYV)
+ * @ROCKCHIP_VPU_ENC_FMT_UYVY422: YUV 4:2:2 packed format (UYVY)
+ */
+enum hantro_enc_fmt {
+ ROCKCHIP_VPU_ENC_FMT_YUV420P = 0,
+ ROCKCHIP_VPU_ENC_FMT_YUV420SP = 1,
+ ROCKCHIP_VPU_ENC_FMT_YUYV422 = 2,
+ ROCKCHIP_VPU_ENC_FMT_UYVY422 = 3,
+};
+
+extern const struct hantro_variant imx8mm_vpu_g1_variant;
+extern const struct hantro_variant imx8mq_vpu_g1_variant;
+extern const struct hantro_variant imx8mq_vpu_g2_variant;
+extern const struct hantro_variant imx8mq_vpu_variant;
+extern const struct hantro_variant px30_vpu_variant;
+extern const struct hantro_variant rk3036_vpu_variant;
+extern const struct hantro_variant rk3066_vpu_variant;
+extern const struct hantro_variant rk3288_vpu_variant;
+extern const struct hantro_variant rk3328_vpu_variant;
+extern const struct hantro_variant rk3399_vpu_variant;
+extern const struct hantro_variant rk3568_vepu_variant;
+extern const struct hantro_variant rk3568_vpu_variant;
+extern const struct hantro_variant sama5d4_vdec_variant;
+extern const struct hantro_variant sunxi_vpu_variant;
+
+extern const struct hantro_postproc_ops hantro_g1_postproc_ops;
+extern const struct hantro_postproc_ops hantro_g2_postproc_ops;
+
+extern const u32 hantro_vp8_dec_mc_filter[8][6];
+
+void hantro_watchdog(struct work_struct *work);
+void hantro_run(struct hantro_ctx *ctx);
+void hantro_irq_done(struct hantro_dev *vpu,
+ enum vb2_buffer_state result);
+void hantro_start_prepare_run(struct hantro_ctx *ctx);
+void hantro_end_prepare_run(struct hantro_ctx *ctx);
+
+irqreturn_t hantro_g1_irq(int irq, void *dev_id);
+void hantro_g1_reset(struct hantro_ctx *ctx);
+
+int hantro_h1_jpeg_enc_run(struct hantro_ctx *ctx);
+int rockchip_vpu2_jpeg_enc_run(struct hantro_ctx *ctx);
+void hantro_h1_jpeg_enc_done(struct hantro_ctx *ctx);
+void rockchip_vpu2_jpeg_enc_done(struct hantro_ctx *ctx);
+
+dma_addr_t hantro_h264_get_ref_buf(struct hantro_ctx *ctx,
+ unsigned int dpb_idx);
+u16 hantro_h264_get_ref_nbr(struct hantro_ctx *ctx,
+ unsigned int dpb_idx);
+int hantro_h264_dec_prepare_run(struct hantro_ctx *ctx);
+int rockchip_vpu2_h264_dec_run(struct hantro_ctx *ctx);
+int hantro_g1_h264_dec_run(struct hantro_ctx *ctx);
+int hantro_h264_dec_init(struct hantro_ctx *ctx);
+void hantro_h264_dec_exit(struct hantro_ctx *ctx);
+
+int hantro_hevc_dec_init(struct hantro_ctx *ctx);
+void hantro_hevc_dec_exit(struct hantro_ctx *ctx);
+int hantro_g2_hevc_dec_run(struct hantro_ctx *ctx);
+int hantro_hevc_dec_prepare_run(struct hantro_ctx *ctx);
+void hantro_hevc_ref_init(struct hantro_ctx *ctx);
+dma_addr_t hantro_hevc_get_ref_buf(struct hantro_ctx *ctx, s32 poc);
+int hantro_hevc_add_ref_buf(struct hantro_ctx *ctx, int poc, dma_addr_t addr);
+
+
+static inline unsigned short hantro_vp9_num_sbs(unsigned short dimension)
+{
+ return (dimension + 63) / 64;
+}
+
+static inline size_t
+hantro_vp9_mv_size(unsigned int width, unsigned int height)
+{
+ int num_ctbs;
+
+ /*
+ * There can be up to (CTBs x 64) number of blocks,
+ * and the motion vector for each block needs 16 bytes.
+ */
+ num_ctbs = hantro_vp9_num_sbs(width) * hantro_vp9_num_sbs(height);
+ return (num_ctbs * 64) * 16;
+}
+
+static inline size_t
+hantro_h264_mv_size(unsigned int width, unsigned int height)
+{
+ /*
+ * A decoded 8-bit 4:2:0 NV12 frame may need memory for up to
+ * 448 bytes per macroblock with additional 32 bytes on
+ * multi-core variants.
+ *
+ * The H264 decoder needs extra space on the output buffers
+ * to store motion vectors. This is needed for reference
+ * frames and only if the format is non-post-processed NV12.
+ *
+ * Memory layout is as follow:
+ *
+ * +---------------------------+
+ * | Y-plane 256 bytes x MBs |
+ * +---------------------------+
+ * | UV-plane 128 bytes x MBs |
+ * +---------------------------+
+ * | MV buffer 64 bytes x MBs |
+ * +---------------------------+
+ * | MC sync 32 bytes |
+ * +---------------------------+
+ */
+ return 64 * MB_WIDTH(width) * MB_WIDTH(height) + 32;
+}
+
+static inline size_t
+hantro_hevc_mv_size(unsigned int width, unsigned int height)
+{
+ /*
+ * A CTB can be 64x64, 32x32 or 16x16.
+ * Allocated memory for the "worse" case: 16x16
+ */
+ return width * height / 16;
+}
+
+int hantro_g1_mpeg2_dec_run(struct hantro_ctx *ctx);
+int rockchip_vpu2_mpeg2_dec_run(struct hantro_ctx *ctx);
+void hantro_mpeg2_dec_copy_qtable(u8 *qtable,
+ const struct v4l2_ctrl_mpeg2_quantisation *ctrl);
+int hantro_mpeg2_dec_init(struct hantro_ctx *ctx);
+void hantro_mpeg2_dec_exit(struct hantro_ctx *ctx);
+
+int hantro_g1_vp8_dec_run(struct hantro_ctx *ctx);
+int rockchip_vpu2_vp8_dec_run(struct hantro_ctx *ctx);
+int hantro_vp8_dec_init(struct hantro_ctx *ctx);
+void hantro_vp8_dec_exit(struct hantro_ctx *ctx);
+void hantro_vp8_prob_update(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr);
+
+int hantro_g2_vp9_dec_run(struct hantro_ctx *ctx);
+void hantro_g2_vp9_dec_done(struct hantro_ctx *ctx);
+int hantro_vp9_dec_init(struct hantro_ctx *ctx);
+void hantro_vp9_dec_exit(struct hantro_ctx *ctx);
+void hantro_g2_check_idle(struct hantro_dev *vpu);
+irqreturn_t hantro_g2_irq(int irq, void *dev_id);
+
+#endif /* HANTRO_HW_H_ */
diff --git a/drivers/media/platform/verisilicon/hantro_jpeg.c b/drivers/media/platform/verisilicon/hantro_jpeg.c
new file mode 100644
index 000000000000..d07b1b449b61
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_jpeg.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) Collabora, Ltd.
+ *
+ * Based on GSPCA and CODA drivers:
+ * Copyright (C) Jean-Francois Moine (http://moinejf.free.fr)
+ * Copyright (C) 2014 Philipp Zabel, Pengutronix
+ */
+
+#include <linux/align.h>
+#include <linux/build_bug.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include "hantro_jpeg.h"
+#include "hantro.h"
+
+#define LUMA_QUANT_OFF 25
+#define CHROMA_QUANT_OFF 90
+#define HEIGHT_OFF 159
+#define WIDTH_OFF 161
+
+#define HUFF_LUMA_DC_OFF 178
+#define HUFF_LUMA_AC_OFF 211
+#define HUFF_CHROMA_DC_OFF 394
+#define HUFF_CHROMA_AC_OFF 427
+
+/* Default tables from JPEG ITU-T.81
+ * (ISO/IEC 10918-1) Annex K, tables K.1 and K.2
+ */
+static const unsigned char luma_q_table[] = {
+ 0x10, 0x0b, 0x0a, 0x10, 0x18, 0x28, 0x33, 0x3d,
+ 0x0c, 0x0c, 0x0e, 0x13, 0x1a, 0x3a, 0x3c, 0x37,
+ 0x0e, 0x0d, 0x10, 0x18, 0x28, 0x39, 0x45, 0x38,
+ 0x0e, 0x11, 0x16, 0x1d, 0x33, 0x57, 0x50, 0x3e,
+ 0x12, 0x16, 0x25, 0x38, 0x44, 0x6d, 0x67, 0x4d,
+ 0x18, 0x23, 0x37, 0x40, 0x51, 0x68, 0x71, 0x5c,
+ 0x31, 0x40, 0x4e, 0x57, 0x67, 0x79, 0x78, 0x65,
+ 0x48, 0x5c, 0x5f, 0x62, 0x70, 0x64, 0x67, 0x63
+};
+
+static const unsigned char chroma_q_table[] = {
+ 0x11, 0x12, 0x18, 0x2f, 0x63, 0x63, 0x63, 0x63,
+ 0x12, 0x15, 0x1a, 0x42, 0x63, 0x63, 0x63, 0x63,
+ 0x18, 0x1a, 0x38, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x2f, 0x42, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63,
+ 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63
+};
+
+static const unsigned char zigzag[] = {
+ 0, 1, 8, 16, 9, 2, 3, 10,
+ 17, 24, 32, 25, 18, 11, 4, 5,
+ 12, 19, 26, 33, 40, 48, 41, 34,
+ 27, 20, 13, 6, 7, 14, 21, 28,
+ 35, 42, 49, 56, 57, 50, 43, 36,
+ 29, 22, 15, 23, 30, 37, 44, 51,
+ 58, 59, 52, 45, 38, 31, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63
+};
+
+static const u32 hw_reorder[] = {
+ 0, 8, 16, 24, 1, 9, 17, 25,
+ 32, 40, 48, 56, 33, 41, 49, 57,
+ 2, 10, 18, 26, 3, 11, 19, 27,
+ 34, 42, 50, 58, 35, 43, 51, 59,
+ 4, 12, 20, 28, 5, 13, 21, 29,
+ 36, 44, 52, 60, 37, 45, 53, 61,
+ 6, 14, 22, 30, 7, 15, 23, 31,
+ 38, 46, 54, 62, 39, 47, 55, 63
+};
+
+/* Huffman tables are shared with CODA */
+static const unsigned char luma_dc_table[] = {
+ 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b,
+};
+
+static const unsigned char chroma_dc_table[] = {
+ 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0a, 0x0b,
+};
+
+static const unsigned char luma_ac_table[] = {
+ 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03,
+ 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7d,
+ 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12,
+ 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07,
+ 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08,
+ 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0,
+ 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16,
+ 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28,
+ 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39,
+ 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49,
+ 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
+ 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69,
+ 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79,
+ 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89,
+ 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98,
+ 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
+ 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6,
+ 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5,
+ 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4,
+ 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2,
+ 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea,
+ 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xf9, 0xfa,
+};
+
+static const unsigned char chroma_ac_table[] = {
+ 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04,
+ 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77,
+ 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21,
+ 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71,
+ 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91,
+ 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0,
+ 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34,
+ 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26,
+ 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38,
+ 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48,
+ 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
+ 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68,
+ 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
+ 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
+ 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96,
+ 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5,
+ 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4,
+ 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3,
+ 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2,
+ 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda,
+ 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9,
+ 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8,
+ 0xf9, 0xfa,
+};
+
+/* For simplicity, we keep a pre-formatted JPEG header,
+ * and we'll use fixed offsets to change the width, height
+ * quantization tables, etc.
+ */
+static const unsigned char hantro_jpeg_header[] = {
+ /* SOI */
+ 0xff, 0xd8,
+
+ /* JFIF-APP0 */
+ 0xff, 0xe0, 0x00, 0x10, 0x4a, 0x46, 0x49, 0x46,
+ 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x00, 0x01,
+ 0x00, 0x00,
+
+ /* DQT */
+ 0xff, 0xdb, 0x00, 0x84,
+
+ 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* SOF */
+ 0xff, 0xc0, 0x00, 0x11, 0x08, 0x00, 0xf0, 0x01,
+ 0x40, 0x03, 0x01, 0x22, 0x00, 0x02, 0x11, 0x01,
+ 0x03, 0x11, 0x01,
+
+ /* DHT */
+ 0xff, 0xc4, 0x00, 0x1f, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ /* DHT */
+ 0xff, 0xc4, 0x00, 0xb5, 0x10,
+
+ 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* DHT */
+ 0xff, 0xc4, 0x00, 0x1f, 0x01,
+
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ /* DHT */
+ 0xff, 0xc4, 0x00, 0xb5, 0x11,
+
+ 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* COM */
+ 0xff, 0xfe, 0x00, 0x03, 0x00,
+
+ /* SOS */
+ 0xff, 0xda, 0x00, 0x0c, 0x03, 0x01, 0x00, 0x02,
+ 0x11, 0x03, 0x11, 0x00, 0x3f, 0x00,
+};
+
+/*
+ * JPEG_HEADER_SIZE is used in other parts of the driver in lieu of
+ * "sizeof(hantro_jpeg_header)". The two must be equal.
+ */
+static_assert(sizeof(hantro_jpeg_header) == JPEG_HEADER_SIZE);
+
+/*
+ * hantro_jpeg_header is padded with a COM segment, so that the payload
+ * of the SOS segment (the entropy-encoded image scan), which should
+ * trail the whole header, is 8-byte aligned for the hardware to write
+ * to directly.
+ */
+static_assert(IS_ALIGNED(sizeof(hantro_jpeg_header), 8),
+ "Hantro JPEG header size needs to be 8-byte aligned.");
+
+static unsigned char jpeg_scale_qp(const unsigned char qp, int scale)
+{
+ unsigned int temp;
+
+ temp = DIV_ROUND_CLOSEST((unsigned int)qp * scale, 100);
+ if (temp <= 0)
+ temp = 1;
+ if (temp > 255)
+ temp = 255;
+
+ return (unsigned char)temp;
+}
+
+static void
+jpeg_scale_quant_table(unsigned char *file_q_tab,
+ unsigned char *reordered_q_tab,
+ const unsigned char *tab, int scale)
+{
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(zigzag) != JPEG_QUANT_SIZE);
+ BUILD_BUG_ON(ARRAY_SIZE(hw_reorder) != JPEG_QUANT_SIZE);
+
+ for (i = 0; i < JPEG_QUANT_SIZE; i++) {
+ file_q_tab[i] = jpeg_scale_qp(tab[zigzag[i]], scale);
+ reordered_q_tab[i] = jpeg_scale_qp(tab[hw_reorder[i]], scale);
+ }
+}
+
+static void jpeg_set_quality(struct hantro_jpeg_ctx *ctx)
+{
+ int scale;
+
+ /*
+ * Non-linear scaling factor:
+ * [5,50] -> [1000..100], [51,100] -> [98..0]
+ */
+ if (ctx->quality < 50)
+ scale = 5000 / ctx->quality;
+ else
+ scale = 200 - 2 * ctx->quality;
+
+ BUILD_BUG_ON(ARRAY_SIZE(luma_q_table) != JPEG_QUANT_SIZE);
+ BUILD_BUG_ON(ARRAY_SIZE(chroma_q_table) != JPEG_QUANT_SIZE);
+ BUILD_BUG_ON(ARRAY_SIZE(ctx->hw_luma_qtable) != JPEG_QUANT_SIZE);
+ BUILD_BUG_ON(ARRAY_SIZE(ctx->hw_chroma_qtable) != JPEG_QUANT_SIZE);
+
+ jpeg_scale_quant_table(ctx->buffer + LUMA_QUANT_OFF,
+ ctx->hw_luma_qtable, luma_q_table, scale);
+ jpeg_scale_quant_table(ctx->buffer + CHROMA_QUANT_OFF,
+ ctx->hw_chroma_qtable, chroma_q_table, scale);
+}
+
+void hantro_jpeg_header_assemble(struct hantro_jpeg_ctx *ctx)
+{
+ char *buf = ctx->buffer;
+
+ memcpy(buf, hantro_jpeg_header,
+ sizeof(hantro_jpeg_header));
+
+ buf[HEIGHT_OFF + 0] = ctx->height >> 8;
+ buf[HEIGHT_OFF + 1] = ctx->height;
+ buf[WIDTH_OFF + 0] = ctx->width >> 8;
+ buf[WIDTH_OFF + 1] = ctx->width;
+
+ memcpy(buf + HUFF_LUMA_DC_OFF, luma_dc_table, sizeof(luma_dc_table));
+ memcpy(buf + HUFF_LUMA_AC_OFF, luma_ac_table, sizeof(luma_ac_table));
+ memcpy(buf + HUFF_CHROMA_DC_OFF, chroma_dc_table,
+ sizeof(chroma_dc_table));
+ memcpy(buf + HUFF_CHROMA_AC_OFF, chroma_ac_table,
+ sizeof(chroma_ac_table));
+
+ jpeg_set_quality(ctx);
+}
diff --git a/drivers/media/platform/verisilicon/hantro_jpeg.h b/drivers/media/platform/verisilicon/hantro_jpeg.h
new file mode 100644
index 000000000000..0b49d0b82caa
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_jpeg.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#define JPEG_HEADER_SIZE 624
+#define JPEG_QUANT_SIZE 64
+
+struct hantro_jpeg_ctx {
+ int width;
+ int height;
+ int quality;
+ unsigned char *buffer;
+ unsigned char hw_luma_qtable[JPEG_QUANT_SIZE];
+ unsigned char hw_chroma_qtable[JPEG_QUANT_SIZE];
+};
+
+void hantro_jpeg_header_assemble(struct hantro_jpeg_ctx *ctx);
diff --git a/drivers/media/platform/verisilicon/hantro_mpeg2.c b/drivers/media/platform/verisilicon/hantro_mpeg2.c
new file mode 100644
index 000000000000..04e545eb0a83
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_mpeg2.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ */
+
+#include "hantro.h"
+
+static const u8 zigzag[64] = {
+ 0, 1, 8, 16, 9, 2, 3, 10,
+ 17, 24, 32, 25, 18, 11, 4, 5,
+ 12, 19, 26, 33, 40, 48, 41, 34,
+ 27, 20, 13, 6, 7, 14, 21, 28,
+ 35, 42, 49, 56, 57, 50, 43, 36,
+ 29, 22, 15, 23, 30, 37, 44, 51,
+ 58, 59, 52, 45, 38, 31, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63
+};
+
+void hantro_mpeg2_dec_copy_qtable(u8 *qtable,
+ const struct v4l2_ctrl_mpeg2_quantisation *ctrl)
+{
+ int i, n;
+
+ if (!qtable || !ctrl)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(zigzag); i++) {
+ n = zigzag[i];
+ qtable[n + 0] = ctrl->intra_quantiser_matrix[i];
+ qtable[n + 64] = ctrl->non_intra_quantiser_matrix[i];
+ qtable[n + 128] = ctrl->chroma_intra_quantiser_matrix[i];
+ qtable[n + 192] = ctrl->chroma_non_intra_quantiser_matrix[i];
+ }
+}
+
+int hantro_mpeg2_dec_init(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ ctx->mpeg2_dec.qtable.size = ARRAY_SIZE(zigzag) * 4;
+ ctx->mpeg2_dec.qtable.cpu =
+ dma_alloc_coherent(vpu->dev,
+ ctx->mpeg2_dec.qtable.size,
+ &ctx->mpeg2_dec.qtable.dma,
+ GFP_KERNEL);
+ if (!ctx->mpeg2_dec.qtable.cpu)
+ return -ENOMEM;
+ return 0;
+}
+
+void hantro_mpeg2_dec_exit(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ dma_free_coherent(vpu->dev,
+ ctx->mpeg2_dec.qtable.size,
+ ctx->mpeg2_dec.qtable.cpu,
+ ctx->mpeg2_dec.qtable.dma);
+}
diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c
new file mode 100644
index 000000000000..a0928c508434
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_postproc.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro G1 post-processor support
+ *
+ * Copyright (C) 2019 Collabora, Ltd.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+
+#include "hantro.h"
+#include "hantro_hw.h"
+#include "hantro_g1_regs.h"
+#include "hantro_g2_regs.h"
+#include "hantro_v4l2.h"
+
+#define HANTRO_PP_REG_WRITE(vpu, reg_name, val) \
+{ \
+ hantro_reg_write(vpu, \
+ &hantro_g1_postproc_regs.reg_name, \
+ val); \
+}
+
+#define HANTRO_PP_REG_WRITE_S(vpu, reg_name, val) \
+{ \
+ hantro_reg_write_s(vpu, \
+ &hantro_g1_postproc_regs.reg_name, \
+ val); \
+}
+
+#define VPU_PP_IN_YUYV 0x0
+#define VPU_PP_IN_NV12 0x1
+#define VPU_PP_IN_YUV420 0x2
+#define VPU_PP_IN_YUV240_TILED 0x5
+#define VPU_PP_OUT_RGB 0x0
+#define VPU_PP_OUT_YUYV 0x3
+
+static const struct hantro_postproc_regs hantro_g1_postproc_regs = {
+ .pipeline_en = {G1_REG_PP_INTERRUPT, 1, 0x1},
+ .max_burst = {G1_REG_PP_DEV_CONFIG, 0, 0x1f},
+ .clk_gate = {G1_REG_PP_DEV_CONFIG, 1, 0x1},
+ .out_swap32 = {G1_REG_PP_DEV_CONFIG, 5, 0x1},
+ .out_endian = {G1_REG_PP_DEV_CONFIG, 6, 0x1},
+ .out_luma_base = {G1_REG_PP_OUT_LUMA_BASE, 0, 0xffffffff},
+ .input_width = {G1_REG_PP_INPUT_SIZE, 0, 0x1ff},
+ .input_height = {G1_REG_PP_INPUT_SIZE, 9, 0x1ff},
+ .output_width = {G1_REG_PP_CONTROL, 4, 0x7ff},
+ .output_height = {G1_REG_PP_CONTROL, 15, 0x7ff},
+ .input_fmt = {G1_REG_PP_CONTROL, 29, 0x7},
+ .output_fmt = {G1_REG_PP_CONTROL, 26, 0x7},
+ .orig_width = {G1_REG_PP_MASK1_ORIG_WIDTH, 23, 0x1ff},
+ .display_width = {G1_REG_PP_DISPLAY_WIDTH, 0, 0xfff},
+};
+
+bool hantro_needs_postproc(const struct hantro_ctx *ctx,
+ const struct hantro_fmt *fmt)
+{
+ if (ctx->is_encoder)
+ return false;
+ return fmt->postprocessed;
+}
+
+static void hantro_postproc_g1_enable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *dst_buf;
+ u32 src_pp_fmt, dst_pp_fmt;
+ dma_addr_t dst_dma;
+
+ /* Turn on pipeline mode. Must be done first. */
+ HANTRO_PP_REG_WRITE_S(vpu, pipeline_en, 0x1);
+
+ src_pp_fmt = VPU_PP_IN_NV12;
+
+ switch (ctx->vpu_dst_fmt->fourcc) {
+ case V4L2_PIX_FMT_YUYV:
+ dst_pp_fmt = VPU_PP_OUT_YUYV;
+ break;
+ default:
+ WARN(1, "output format %d not supported by the post-processor, this wasn't expected.",
+ ctx->vpu_dst_fmt->fourcc);
+ dst_pp_fmt = 0;
+ break;
+ }
+
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
+ dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+
+ HANTRO_PP_REG_WRITE(vpu, clk_gate, 0x1);
+ HANTRO_PP_REG_WRITE(vpu, out_endian, 0x1);
+ HANTRO_PP_REG_WRITE(vpu, out_swap32, 0x1);
+ HANTRO_PP_REG_WRITE(vpu, max_burst, 16);
+ HANTRO_PP_REG_WRITE(vpu, out_luma_base, dst_dma);
+ HANTRO_PP_REG_WRITE(vpu, input_width, MB_WIDTH(ctx->dst_fmt.width));
+ HANTRO_PP_REG_WRITE(vpu, input_height, MB_HEIGHT(ctx->dst_fmt.height));
+ HANTRO_PP_REG_WRITE(vpu, input_fmt, src_pp_fmt);
+ HANTRO_PP_REG_WRITE(vpu, output_fmt, dst_pp_fmt);
+ HANTRO_PP_REG_WRITE(vpu, output_width, ctx->dst_fmt.width);
+ HANTRO_PP_REG_WRITE(vpu, output_height, ctx->dst_fmt.height);
+ HANTRO_PP_REG_WRITE(vpu, orig_width, MB_WIDTH(ctx->dst_fmt.width));
+ HANTRO_PP_REG_WRITE(vpu, display_width, ctx->dst_fmt.width);
+}
+
+static int down_scale_factor(struct hantro_ctx *ctx)
+{
+ if (ctx->src_fmt.width == ctx->dst_fmt.width)
+ return 0;
+
+ return DIV_ROUND_CLOSEST(ctx->src_fmt.width, ctx->dst_fmt.width);
+}
+
+static void hantro_postproc_g2_enable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *dst_buf;
+ int down_scale = down_scale_factor(ctx);
+ size_t chroma_offset;
+ dma_addr_t dst_dma;
+
+ dst_buf = hantro_get_dst_buf(ctx);
+ dst_dma = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ chroma_offset = ctx->dst_fmt.plane_fmt[0].bytesperline *
+ ctx->dst_fmt.height;
+
+ if (down_scale) {
+ hantro_reg_write(vpu, &g2_down_scale_e, 1);
+ hantro_reg_write(vpu, &g2_down_scale_y, down_scale >> 2);
+ hantro_reg_write(vpu, &g2_down_scale_x, down_scale >> 2);
+ hantro_write_addr(vpu, G2_DS_DST, dst_dma);
+ hantro_write_addr(vpu, G2_DS_DST_CHR, dst_dma + (chroma_offset >> down_scale));
+ } else {
+ hantro_write_addr(vpu, G2_RS_OUT_LUMA_ADDR, dst_dma);
+ hantro_write_addr(vpu, G2_RS_OUT_CHROMA_ADDR, dst_dma + chroma_offset);
+ }
+ if (ctx->dev->variant->legacy_regs) {
+ int out_depth = hantro_get_format_depth(ctx->dst_fmt.pixelformat);
+ u8 pp_shift = 0;
+
+ if (out_depth > 8)
+ pp_shift = 16 - out_depth;
+
+ hantro_reg_write(ctx->dev, &g2_rs_out_bit_depth, out_depth);
+ hantro_reg_write(ctx->dev, &g2_pp_pix_shift, pp_shift);
+ }
+ hantro_reg_write(vpu, &g2_out_rs_e, 1);
+}
+
+static int hantro_postproc_g2_enum_framesizes(struct hantro_ctx *ctx,
+ struct v4l2_frmsizeenum *fsize)
+{
+ /**
+ * G2 scaler can scale down by 0, 2, 4 or 8
+ * use fsize->index has power of 2 diviser
+ **/
+ if (fsize->index > 3)
+ return -EINVAL;
+
+ if (!ctx->src_fmt.width || !ctx->src_fmt.height)
+ return -EINVAL;
+
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = ctx->src_fmt.width >> fsize->index;
+ fsize->discrete.height = ctx->src_fmt.height >> fsize->index;
+
+ return 0;
+}
+
+void hantro_postproc_free(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ unsigned int i;
+
+ for (i = 0; i < VB2_MAX_FRAME; ++i) {
+ struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
+
+ if (priv->cpu) {
+ dma_free_attrs(vpu->dev, priv->size, priv->cpu,
+ priv->dma, priv->attrs);
+ priv->cpu = NULL;
+ }
+ }
+}
+
+int hantro_postproc_alloc(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
+ struct vb2_queue *cap_queue = &m2m_ctx->cap_q_ctx.q;
+ unsigned int num_buffers = cap_queue->num_buffers;
+ struct v4l2_pix_format_mplane pix_mp;
+ const struct hantro_fmt *fmt;
+ unsigned int i, buf_size;
+
+ /* this should always pick native format */
+ fmt = hantro_get_default_fmt(ctx, false);
+ if (!fmt)
+ return -EINVAL;
+ v4l2_fill_pixfmt_mp(&pix_mp, fmt->fourcc, ctx->src_fmt.width,
+ ctx->src_fmt.height);
+
+ buf_size = pix_mp.plane_fmt[0].sizeimage;
+ if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE)
+ buf_size += hantro_h264_mv_size(pix_mp.width,
+ pix_mp.height);
+ else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_VP9_FRAME)
+ buf_size += hantro_vp9_mv_size(pix_mp.width,
+ pix_mp.height);
+ else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_HEVC_SLICE)
+ buf_size += hantro_hevc_mv_size(pix_mp.width,
+ pix_mp.height);
+
+ for (i = 0; i < num_buffers; ++i) {
+ struct hantro_aux_buf *priv = &ctx->postproc.dec_q[i];
+
+ /*
+ * The buffers on this queue are meant as intermediate
+ * buffers for the decoder, so no mapping is needed.
+ */
+ priv->attrs = DMA_ATTR_NO_KERNEL_MAPPING;
+ priv->cpu = dma_alloc_attrs(vpu->dev, buf_size, &priv->dma,
+ GFP_KERNEL, priv->attrs);
+ if (!priv->cpu)
+ return -ENOMEM;
+ priv->size = buf_size;
+ }
+ return 0;
+}
+
+static void hantro_postproc_g1_disable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ HANTRO_PP_REG_WRITE_S(vpu, pipeline_en, 0x0);
+}
+
+static void hantro_postproc_g2_disable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ hantro_reg_write(vpu, &g2_out_rs_e, 0);
+}
+
+void hantro_postproc_disable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ if (vpu->variant->postproc_ops && vpu->variant->postproc_ops->disable)
+ vpu->variant->postproc_ops->disable(ctx);
+}
+
+void hantro_postproc_enable(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ if (vpu->variant->postproc_ops && vpu->variant->postproc_ops->enable)
+ vpu->variant->postproc_ops->enable(ctx);
+}
+
+int hanto_postproc_enum_framesizes(struct hantro_ctx *ctx,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ if (vpu->variant->postproc_ops && vpu->variant->postproc_ops->enum_framesizes)
+ return vpu->variant->postproc_ops->enum_framesizes(ctx, fsize);
+
+ return -EINVAL;
+}
+
+const struct hantro_postproc_ops hantro_g1_postproc_ops = {
+ .enable = hantro_postproc_g1_enable,
+ .disable = hantro_postproc_g1_disable,
+};
+
+const struct hantro_postproc_ops hantro_g2_postproc_ops = {
+ .enable = hantro_postproc_g2_enable,
+ .disable = hantro_postproc_g2_disable,
+ .enum_framesizes = hantro_postproc_g2_enum_framesizes,
+};
diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.c b/drivers/media/platform/verisilicon/hantro_v4l2.c
new file mode 100644
index 000000000000..2c7a805289e7
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_v4l2.c
@@ -0,0 +1,990 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Collabora, Ltd.
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Alpha Lin <Alpha.Lin@rock-chips.com>
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2010-2011 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "hantro.h"
+#include "hantro_hw.h"
+#include "hantro_v4l2.h"
+
+static int hantro_set_fmt_out(struct hantro_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix_mp);
+static int hantro_set_fmt_cap(struct hantro_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix_mp);
+
+static const struct hantro_fmt *
+hantro_get_formats(const struct hantro_ctx *ctx, unsigned int *num_fmts)
+{
+ const struct hantro_fmt *formats;
+
+ if (ctx->is_encoder) {
+ formats = ctx->dev->variant->enc_fmts;
+ *num_fmts = ctx->dev->variant->num_enc_fmts;
+ } else {
+ formats = ctx->dev->variant->dec_fmts;
+ *num_fmts = ctx->dev->variant->num_dec_fmts;
+ }
+
+ return formats;
+}
+
+static const struct hantro_fmt *
+hantro_get_postproc_formats(const struct hantro_ctx *ctx,
+ unsigned int *num_fmts)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ if (ctx->is_encoder || !vpu->variant->postproc_fmts) {
+ *num_fmts = 0;
+ return NULL;
+ }
+
+ *num_fmts = ctx->dev->variant->num_postproc_fmts;
+ return ctx->dev->variant->postproc_fmts;
+}
+
+int hantro_get_format_depth(u32 fourcc)
+{
+ switch (fourcc) {
+ case V4L2_PIX_FMT_P010:
+ case V4L2_PIX_FMT_P010_4L4:
+ return 10;
+ default:
+ return 8;
+ }
+}
+
+static bool
+hantro_check_depth_match(const struct hantro_ctx *ctx,
+ const struct hantro_fmt *fmt)
+{
+ int fmt_depth, ctx_depth = 8;
+
+ if (!fmt->match_depth && !fmt->postprocessed)
+ return true;
+
+ /* 0 means default depth, which is 8 */
+ if (ctx->bit_depth)
+ ctx_depth = ctx->bit_depth;
+
+ fmt_depth = hantro_get_format_depth(fmt->fourcc);
+
+ /*
+ * Allow only downconversion for postproc formats for now.
+ * It may be possible to relax that on some HW.
+ */
+ if (!fmt->match_depth)
+ return fmt_depth <= ctx_depth;
+
+ return fmt_depth == ctx_depth;
+}
+
+static const struct hantro_fmt *
+hantro_find_format(const struct hantro_ctx *ctx, u32 fourcc)
+{
+ const struct hantro_fmt *formats;
+ unsigned int i, num_fmts;
+
+ formats = hantro_get_formats(ctx, &num_fmts);
+ for (i = 0; i < num_fmts; i++)
+ if (formats[i].fourcc == fourcc)
+ return &formats[i];
+
+ formats = hantro_get_postproc_formats(ctx, &num_fmts);
+ for (i = 0; i < num_fmts; i++)
+ if (formats[i].fourcc == fourcc)
+ return &formats[i];
+ return NULL;
+}
+
+const struct hantro_fmt *
+hantro_get_default_fmt(const struct hantro_ctx *ctx, bool bitstream)
+{
+ const struct hantro_fmt *formats;
+ unsigned int i, num_fmts;
+
+ formats = hantro_get_formats(ctx, &num_fmts);
+ for (i = 0; i < num_fmts; i++) {
+ if (bitstream == (formats[i].codec_mode !=
+ HANTRO_MODE_NONE) &&
+ hantro_check_depth_match(ctx, &formats[i]))
+ return &formats[i];
+ }
+ return NULL;
+}
+
+static int vidioc_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct hantro_dev *vpu = video_drvdata(file);
+ struct video_device *vdev = video_devdata(file);
+
+ strscpy(cap->driver, vpu->dev->driver->name, sizeof(cap->driver));
+ strscpy(cap->card, vdev->name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform: %s",
+ vpu->dev->driver->name);
+ return 0;
+}
+
+static int vidioc_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+ const struct hantro_fmt *fmt;
+
+ fmt = hantro_find_format(ctx, fsize->pixel_format);
+ if (!fmt) {
+ vpu_debug(0, "unsupported bitstream format (%08x)\n",
+ fsize->pixel_format);
+ return -EINVAL;
+ }
+
+ /* For non-coded formats check if postprocessing scaling is possible */
+ if (fmt->codec_mode == HANTRO_MODE_NONE && hantro_needs_postproc(ctx, fmt)) {
+ return hanto_postproc_enum_framesizes(ctx, fsize);
+ } else if (fsize->index != 0) {
+ vpu_debug(0, "invalid frame size index (expected 0, got %d)\n",
+ fsize->index);
+ return -EINVAL;
+ }
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise = fmt->frmsize;
+
+ return 0;
+}
+
+static int vidioc_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f, bool capture)
+
+{
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+ const struct hantro_fmt *fmt, *formats;
+ unsigned int num_fmts, i, j = 0;
+ bool skip_mode_none;
+
+ /*
+ * When dealing with an encoder:
+ * - on the capture side we want to filter out all MODE_NONE formats.
+ * - on the output side we want to filter out all formats that are
+ * not MODE_NONE.
+ * When dealing with a decoder:
+ * - on the capture side we want to filter out all formats that are
+ * not MODE_NONE.
+ * - on the output side we want to filter out all MODE_NONE formats.
+ */
+ skip_mode_none = capture == ctx->is_encoder;
+
+ formats = hantro_get_formats(ctx, &num_fmts);
+ for (i = 0; i < num_fmts; i++) {
+ bool mode_none = formats[i].codec_mode == HANTRO_MODE_NONE;
+ fmt = &formats[i];
+
+ if (skip_mode_none == mode_none)
+ continue;
+ if (!hantro_check_depth_match(ctx, fmt))
+ continue;
+ if (j == f->index) {
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+ ++j;
+ }
+
+ /*
+ * Enumerate post-processed formats. As per the specification,
+ * we enumerated these formats after natively decoded formats
+ * as a hint for applications on what's the preferred fomat.
+ */
+ if (!capture)
+ return -EINVAL;
+ formats = hantro_get_postproc_formats(ctx, &num_fmts);
+ for (i = 0; i < num_fmts; i++) {
+ fmt = &formats[i];
+
+ if (!hantro_check_depth_match(ctx, fmt))
+ continue;
+ if (j == f->index) {
+ f->pixelformat = fmt->fourcc;
+ return 0;
+ }
+ ++j;
+ }
+
+ return -EINVAL;
+}
+
+static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(file, priv, f, true);
+}
+
+static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ return vidioc_enum_fmt(file, priv, f, false);
+}
+
+static int vidioc_g_fmt_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+
+ vpu_debug(4, "f->type = %d\n", f->type);
+
+ *pix_mp = ctx->src_fmt;
+
+ return 0;
+}
+
+static int vidioc_g_fmt_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct v4l2_pix_format_mplane *pix_mp = &f->fmt.pix_mp;
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+
+ vpu_debug(4, "f->type = %d\n", f->type);
+
+ *pix_mp = ctx->dst_fmt;
+
+ return 0;
+}
+
+static int hantro_try_fmt(const struct hantro_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix_mp,
+ enum v4l2_buf_type type)
+{
+ const struct hantro_fmt *fmt, *vpu_fmt;
+ bool capture = V4L2_TYPE_IS_CAPTURE(type);
+ bool coded;
+
+ coded = capture == ctx->is_encoder;
+
+ vpu_debug(4, "trying format %c%c%c%c\n",
+ (pix_mp->pixelformat & 0x7f),
+ (pix_mp->pixelformat >> 8) & 0x7f,
+ (pix_mp->pixelformat >> 16) & 0x7f,
+ (pix_mp->pixelformat >> 24) & 0x7f);
+
+ fmt = hantro_find_format(ctx, pix_mp->pixelformat);
+ if (!fmt) {
+ fmt = hantro_get_default_fmt(ctx, coded);
+ pix_mp->pixelformat = fmt->fourcc;
+ }
+
+ if (coded) {
+ pix_mp->num_planes = 1;
+ vpu_fmt = fmt;
+ } else if (ctx->is_encoder) {
+ vpu_fmt = ctx->vpu_dst_fmt;
+ } else {
+ vpu_fmt = fmt;
+ /*
+ * Width/height on the CAPTURE end of a decoder are ignored and
+ * replaced by the OUTPUT ones.
+ */
+ pix_mp->width = ctx->src_fmt.width;
+ pix_mp->height = ctx->src_fmt.height;
+ }
+
+ pix_mp->field = V4L2_FIELD_NONE;
+
+ v4l2_apply_frmsize_constraints(&pix_mp->width, &pix_mp->height,
+ &vpu_fmt->frmsize);
+
+ if (!coded) {
+ /* Fill remaining fields */
+ v4l2_fill_pixfmt_mp(pix_mp, fmt->fourcc, pix_mp->width,
+ pix_mp->height);
+ if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_H264_SLICE &&
+ !hantro_needs_postproc(ctx, fmt))
+ pix_mp->plane_fmt[0].sizeimage +=
+ hantro_h264_mv_size(pix_mp->width,
+ pix_mp->height);
+ else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_VP9_FRAME &&
+ !hantro_needs_postproc(ctx, fmt))
+ pix_mp->plane_fmt[0].sizeimage +=
+ hantro_vp9_mv_size(pix_mp->width,
+ pix_mp->height);
+ else if (ctx->vpu_src_fmt->fourcc == V4L2_PIX_FMT_HEVC_SLICE &&
+ !hantro_needs_postproc(ctx, fmt))
+ pix_mp->plane_fmt[0].sizeimage +=
+ hantro_hevc_mv_size(pix_mp->width,
+ pix_mp->height);
+ } else if (!pix_mp->plane_fmt[0].sizeimage) {
+ /*
+ * For coded formats the application can specify
+ * sizeimage. If the application passes a zero sizeimage,
+ * let's default to the maximum frame size.
+ */
+ pix_mp->plane_fmt[0].sizeimage = fmt->header_size +
+ pix_mp->width * pix_mp->height * fmt->max_depth;
+ }
+
+ return 0;
+}
+
+static int vidioc_try_fmt_cap_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return hantro_try_fmt(fh_to_ctx(priv), &f->fmt.pix_mp, f->type);
+}
+
+static int vidioc_try_fmt_out_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ return hantro_try_fmt(fh_to_ctx(priv), &f->fmt.pix_mp, f->type);
+}
+
+static void
+hantro_reset_fmt(struct v4l2_pix_format_mplane *fmt,
+ const struct hantro_fmt *vpu_fmt)
+{
+ memset(fmt, 0, sizeof(*fmt));
+
+ fmt->pixelformat = vpu_fmt->fourcc;
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_JPEG;
+ fmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
+ fmt->quantization = V4L2_QUANTIZATION_DEFAULT;
+ fmt->xfer_func = V4L2_XFER_FUNC_DEFAULT;
+}
+
+static void
+hantro_reset_encoded_fmt(struct hantro_ctx *ctx)
+{
+ const struct hantro_fmt *vpu_fmt;
+ struct v4l2_pix_format_mplane *fmt;
+
+ vpu_fmt = hantro_get_default_fmt(ctx, true);
+
+ if (ctx->is_encoder) {
+ ctx->vpu_dst_fmt = vpu_fmt;
+ fmt = &ctx->dst_fmt;
+ } else {
+ ctx->vpu_src_fmt = vpu_fmt;
+ fmt = &ctx->src_fmt;
+ }
+
+ hantro_reset_fmt(fmt, vpu_fmt);
+ fmt->width = vpu_fmt->frmsize.min_width;
+ fmt->height = vpu_fmt->frmsize.min_height;
+ if (ctx->is_encoder)
+ hantro_set_fmt_cap(ctx, fmt);
+ else
+ hantro_set_fmt_out(ctx, fmt);
+}
+
+static void
+hantro_reset_raw_fmt(struct hantro_ctx *ctx)
+{
+ const struct hantro_fmt *raw_vpu_fmt;
+ struct v4l2_pix_format_mplane *raw_fmt, *encoded_fmt;
+
+ raw_vpu_fmt = hantro_get_default_fmt(ctx, false);
+
+ if (ctx->is_encoder) {
+ ctx->vpu_src_fmt = raw_vpu_fmt;
+ raw_fmt = &ctx->src_fmt;
+ encoded_fmt = &ctx->dst_fmt;
+ } else {
+ ctx->vpu_dst_fmt = raw_vpu_fmt;
+ raw_fmt = &ctx->dst_fmt;
+ encoded_fmt = &ctx->src_fmt;
+ }
+
+ hantro_reset_fmt(raw_fmt, raw_vpu_fmt);
+ raw_fmt->width = encoded_fmt->width;
+ raw_fmt->height = encoded_fmt->height;
+ if (ctx->is_encoder)
+ hantro_set_fmt_out(ctx, raw_fmt);
+ else
+ hantro_set_fmt_cap(ctx, raw_fmt);
+}
+
+void hantro_reset_fmts(struct hantro_ctx *ctx)
+{
+ hantro_reset_encoded_fmt(ctx);
+ hantro_reset_raw_fmt(ctx);
+}
+
+static void
+hantro_update_requires_request(struct hantro_ctx *ctx, u32 fourcc)
+{
+ switch (fourcc) {
+ case V4L2_PIX_FMT_JPEG:
+ ctx->fh.m2m_ctx->out_q_ctx.q.requires_requests = false;
+ break;
+ case V4L2_PIX_FMT_MPEG2_SLICE:
+ case V4L2_PIX_FMT_VP8_FRAME:
+ case V4L2_PIX_FMT_H264_SLICE:
+ case V4L2_PIX_FMT_HEVC_SLICE:
+ case V4L2_PIX_FMT_VP9_FRAME:
+ ctx->fh.m2m_ctx->out_q_ctx.q.requires_requests = true;
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+hantro_update_requires_hold_capture_buf(struct hantro_ctx *ctx, u32 fourcc)
+{
+ struct vb2_queue *vq;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ switch (fourcc) {
+ case V4L2_PIX_FMT_JPEG:
+ case V4L2_PIX_FMT_MPEG2_SLICE:
+ case V4L2_PIX_FMT_VP8_FRAME:
+ case V4L2_PIX_FMT_HEVC_SLICE:
+ case V4L2_PIX_FMT_VP9_FRAME:
+ vq->subsystem_flags &= ~(VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF);
+ break;
+ case V4L2_PIX_FMT_H264_SLICE:
+ vq->subsystem_flags |= VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
+ break;
+ default:
+ break;
+ }
+}
+
+static int hantro_set_fmt_out(struct hantro_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix_mp)
+{
+ struct vb2_queue *vq;
+ int ret;
+
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ ret = hantro_try_fmt(ctx, pix_mp, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (ret)
+ return ret;
+
+ if (!ctx->is_encoder) {
+ struct vb2_queue *peer_vq;
+
+ /*
+ * In order to support dynamic resolution change,
+ * the decoder admits a resolution change, as long
+ * as the pixelformat remains. Can't be done if streaming.
+ */
+ if (vb2_is_streaming(vq) || (vb2_is_busy(vq) &&
+ pix_mp->pixelformat != ctx->src_fmt.pixelformat))
+ return -EBUSY;
+ /*
+ * Since format change on the OUTPUT queue will reset
+ * the CAPTURE queue, we can't allow doing so
+ * when the CAPTURE queue has buffers allocated.
+ */
+ peer_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (vb2_is_busy(peer_vq))
+ return -EBUSY;
+ } else {
+ /*
+ * The encoder doesn't admit a format change if
+ * there are OUTPUT buffers allocated.
+ */
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+ }
+
+ ctx->vpu_src_fmt = hantro_find_format(ctx, pix_mp->pixelformat);
+ ctx->src_fmt = *pix_mp;
+
+ /*
+ * Current raw format might have become invalid with newly
+ * selected codec, so reset it to default just to be safe and
+ * keep internal driver state sane. User is mandated to set
+ * the raw format again after we return, so we don't need
+ * anything smarter.
+ * Note that hantro_reset_raw_fmt() also propagates size
+ * changes to the raw format.
+ */
+ if (!ctx->is_encoder)
+ hantro_reset_raw_fmt(ctx);
+
+ /* Colorimetry information are always propagated. */
+ ctx->dst_fmt.colorspace = pix_mp->colorspace;
+ ctx->dst_fmt.ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->dst_fmt.xfer_func = pix_mp->xfer_func;
+ ctx->dst_fmt.quantization = pix_mp->quantization;
+
+ hantro_update_requires_request(ctx, pix_mp->pixelformat);
+ hantro_update_requires_hold_capture_buf(ctx, pix_mp->pixelformat);
+
+ vpu_debug(0, "OUTPUT codec mode: %d\n", ctx->vpu_src_fmt->codec_mode);
+ vpu_debug(0, "fmt - w: %d, h: %d\n",
+ pix_mp->width, pix_mp->height);
+ return 0;
+}
+
+static int hantro_set_fmt_cap(struct hantro_ctx *ctx,
+ struct v4l2_pix_format_mplane *pix_mp)
+{
+ struct vb2_queue *vq;
+ int ret;
+
+ /* Change not allowed if queue is busy. */
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (vb2_is_busy(vq))
+ return -EBUSY;
+
+ if (ctx->is_encoder) {
+ struct vb2_queue *peer_vq;
+
+ /*
+ * Since format change on the CAPTURE queue will reset
+ * the OUTPUT queue, we can't allow doing so
+ * when the OUTPUT queue has buffers allocated.
+ */
+ peer_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (vb2_is_busy(peer_vq) &&
+ (pix_mp->pixelformat != ctx->dst_fmt.pixelformat ||
+ pix_mp->height != ctx->dst_fmt.height ||
+ pix_mp->width != ctx->dst_fmt.width))
+ return -EBUSY;
+ }
+
+ ret = hantro_try_fmt(ctx, pix_mp, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ if (ret)
+ return ret;
+
+ ctx->vpu_dst_fmt = hantro_find_format(ctx, pix_mp->pixelformat);
+ ctx->dst_fmt = *pix_mp;
+
+ /*
+ * Current raw format might have become invalid with newly
+ * selected codec, so reset it to default just to be safe and
+ * keep internal driver state sane. User is mandated to set
+ * the raw format again after we return, so we don't need
+ * anything smarter.
+ * Note that hantro_reset_raw_fmt() also propagates size
+ * changes to the raw format.
+ */
+ if (ctx->is_encoder)
+ hantro_reset_raw_fmt(ctx);
+
+ /* Colorimetry information are always propagated. */
+ ctx->src_fmt.colorspace = pix_mp->colorspace;
+ ctx->src_fmt.ycbcr_enc = pix_mp->ycbcr_enc;
+ ctx->src_fmt.xfer_func = pix_mp->xfer_func;
+ ctx->src_fmt.quantization = pix_mp->quantization;
+
+ vpu_debug(0, "CAPTURE codec mode: %d\n", ctx->vpu_dst_fmt->codec_mode);
+ vpu_debug(0, "fmt - w: %d, h: %d\n",
+ pix_mp->width, pix_mp->height);
+
+ hantro_update_requires_request(ctx, pix_mp->pixelformat);
+
+ return 0;
+}
+
+static int
+vidioc_s_fmt_out_mplane(struct file *file, void *priv, struct v4l2_format *f)
+{
+ return hantro_set_fmt_out(fh_to_ctx(priv), &f->fmt.pix_mp);
+}
+
+static int
+vidioc_s_fmt_cap_mplane(struct file *file, void *priv, struct v4l2_format *f)
+{
+ return hantro_set_fmt_cap(fh_to_ctx(priv), &f->fmt.pix_mp);
+}
+
+static int vidioc_g_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+
+ /* Crop only supported on source. */
+ if (!ctx->is_encoder ||
+ sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = ctx->src_fmt.width;
+ sel->r.height = ctx->src_fmt.height;
+ break;
+ case V4L2_SEL_TGT_CROP:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = ctx->dst_fmt.width;
+ sel->r.height = ctx->dst_fmt.height;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int vidioc_s_selection(struct file *file, void *priv,
+ struct v4l2_selection *sel)
+{
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+ struct v4l2_rect *rect = &sel->r;
+ struct vb2_queue *vq;
+
+ /* Crop only supported on source. */
+ if (!ctx->is_encoder ||
+ sel->type != V4L2_BUF_TYPE_VIDEO_OUTPUT)
+ return -EINVAL;
+
+ /* Change not allowed if the queue is streaming. */
+ vq = v4l2_m2m_get_src_vq(ctx->fh.m2m_ctx);
+ if (vb2_is_streaming(vq))
+ return -EBUSY;
+
+ if (sel->target != V4L2_SEL_TGT_CROP)
+ return -EINVAL;
+
+ /*
+ * We do not support offsets, and we can crop only inside
+ * right-most or bottom-most macroblocks.
+ */
+ if (rect->left != 0 || rect->top != 0 ||
+ round_up(rect->width, MB_DIM) != ctx->src_fmt.width ||
+ round_up(rect->height, MB_DIM) != ctx->src_fmt.height) {
+ /* Default to full frame for incorrect settings. */
+ rect->left = 0;
+ rect->top = 0;
+ rect->width = ctx->src_fmt.width;
+ rect->height = ctx->src_fmt.height;
+ } else {
+ /* We support widths aligned to 4 pixels and arbitrary heights. */
+ rect->width = round_up(rect->width, 4);
+ }
+
+ ctx->dst_fmt.width = rect->width;
+ ctx->dst_fmt.height = rect->height;
+
+ return 0;
+}
+
+static const struct v4l2_event hantro_eos_event = {
+ .type = V4L2_EVENT_EOS
+};
+
+static int vidioc_encoder_cmd(struct file *file, void *priv,
+ struct v4l2_encoder_cmd *ec)
+{
+ struct hantro_ctx *ctx = fh_to_ctx(priv);
+ int ret;
+
+ ret = v4l2_m2m_ioctl_try_encoder_cmd(file, priv, ec);
+ if (ret < 0)
+ return ret;
+
+ if (!vb2_is_streaming(v4l2_m2m_get_src_vq(ctx->fh.m2m_ctx)) ||
+ !vb2_is_streaming(v4l2_m2m_get_dst_vq(ctx->fh.m2m_ctx)))
+ return 0;
+
+ ret = v4l2_m2m_ioctl_encoder_cmd(file, priv, ec);
+ if (ret < 0)
+ return ret;
+
+ if (ec->cmd == V4L2_ENC_CMD_STOP &&
+ v4l2_m2m_has_stopped(ctx->fh.m2m_ctx))
+ v4l2_event_queue_fh(&ctx->fh, &hantro_eos_event);
+
+ if (ec->cmd == V4L2_ENC_CMD_START)
+ vb2_clear_last_buffer_dequeued(&ctx->fh.m2m_ctx->cap_q_ctx.q);
+
+ return 0;
+}
+
+const struct v4l2_ioctl_ops hantro_ioctl_ops = {
+ .vidioc_querycap = vidioc_querycap,
+ .vidioc_enum_framesizes = vidioc_enum_framesizes,
+
+ .vidioc_try_fmt_vid_cap_mplane = vidioc_try_fmt_cap_mplane,
+ .vidioc_try_fmt_vid_out_mplane = vidioc_try_fmt_out_mplane,
+ .vidioc_s_fmt_vid_out_mplane = vidioc_s_fmt_out_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = vidioc_s_fmt_cap_mplane,
+ .vidioc_g_fmt_vid_out_mplane = vidioc_g_fmt_out_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = vidioc_g_fmt_cap_mplane,
+ .vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
+ .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+
+ .vidioc_streamon = v4l2_m2m_ioctl_streamon,
+ .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
+
+ .vidioc_g_selection = vidioc_g_selection,
+ .vidioc_s_selection = vidioc_s_selection,
+
+ .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd,
+ .vidioc_encoder_cmd = vidioc_encoder_cmd,
+};
+
+static int
+hantro_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
+ unsigned int *num_planes, unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format_mplane *pixfmt;
+ int i;
+
+ switch (vq->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ pixfmt = &ctx->dst_fmt;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ pixfmt = &ctx->src_fmt;
+ break;
+ default:
+ vpu_err("invalid queue type: %d\n", vq->type);
+ return -EINVAL;
+ }
+
+ if (*num_planes) {
+ if (*num_planes != pixfmt->num_planes)
+ return -EINVAL;
+ for (i = 0; i < pixfmt->num_planes; ++i)
+ if (sizes[i] < pixfmt->plane_fmt[i].sizeimage)
+ return -EINVAL;
+ return 0;
+ }
+
+ *num_planes = pixfmt->num_planes;
+ for (i = 0; i < pixfmt->num_planes; ++i)
+ sizes[i] = pixfmt->plane_fmt[i].sizeimage;
+ return 0;
+}
+
+static int
+hantro_buf_plane_check(struct vb2_buffer *vb,
+ struct v4l2_pix_format_mplane *pixfmt)
+{
+ unsigned int sz;
+ int i;
+
+ for (i = 0; i < pixfmt->num_planes; ++i) {
+ sz = pixfmt->plane_fmt[i].sizeimage;
+ vpu_debug(4, "plane %d size: %ld, sizeimage: %u\n",
+ i, vb2_plane_size(vb, i), sz);
+ if (vb2_plane_size(vb, i) < sz) {
+ vpu_err("plane %d is too small for output\n", i);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int hantro_buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *vq = vb->vb2_queue;
+ struct hantro_ctx *ctx = vb2_get_drv_priv(vq);
+ struct v4l2_pix_format_mplane *pix_fmt;
+ int ret;
+
+ if (V4L2_TYPE_IS_OUTPUT(vq->type))
+ pix_fmt = &ctx->src_fmt;
+ else
+ pix_fmt = &ctx->dst_fmt;
+ ret = hantro_buf_plane_check(vb, pix_fmt);
+ if (ret)
+ return ret;
+ /*
+ * Buffer's bytesused must be written by driver for CAPTURE buffers.
+ * (for OUTPUT buffers, if userspace passes 0 bytesused, v4l2-core sets
+ * it to buffer length).
+ */
+ if (V4L2_TYPE_IS_CAPTURE(vq->type)) {
+ if (ctx->is_encoder)
+ vb2_set_plane_payload(vb, 0, 0);
+ else
+ vb2_set_plane_payload(vb, 0, pix_fmt->plane_fmt[0].sizeimage);
+ }
+
+ return 0;
+}
+
+static void hantro_buf_queue(struct vb2_buffer *vb)
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ if (V4L2_TYPE_IS_CAPTURE(vb->vb2_queue->type) &&
+ vb2_is_streaming(vb->vb2_queue) &&
+ v4l2_m2m_dst_buf_is_last(ctx->fh.m2m_ctx)) {
+ unsigned int i;
+
+ for (i = 0; i < vb->num_planes; i++)
+ vb2_set_plane_payload(vb, i, 0);
+
+ vbuf->field = V4L2_FIELD_NONE;
+ vbuf->sequence = ctx->sequence_cap++;
+
+ v4l2_m2m_last_buffer_done(ctx->fh.m2m_ctx, vbuf);
+ v4l2_event_queue_fh(&ctx->fh, &hantro_eos_event);
+ return;
+ }
+
+ v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
+}
+
+static bool hantro_vq_is_coded(struct vb2_queue *q)
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(q);
+
+ return ctx->is_encoder != V4L2_TYPE_IS_OUTPUT(q->type);
+}
+
+static int hantro_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(q);
+ int ret = 0;
+
+ v4l2_m2m_update_start_streaming_state(ctx->fh.m2m_ctx, q);
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ ctx->sequence_out = 0;
+ else
+ ctx->sequence_cap = 0;
+
+ if (hantro_vq_is_coded(q)) {
+ enum hantro_codec_mode codec_mode;
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ codec_mode = ctx->vpu_src_fmt->codec_mode;
+ else
+ codec_mode = ctx->vpu_dst_fmt->codec_mode;
+
+ vpu_debug(4, "Codec mode = %d\n", codec_mode);
+ ctx->codec_ops = &ctx->dev->variant->codec_ops[codec_mode];
+ if (ctx->codec_ops->init) {
+ ret = ctx->codec_ops->init(ctx);
+ if (ret)
+ return ret;
+ }
+
+ if (hantro_needs_postproc(ctx, ctx->vpu_dst_fmt)) {
+ ret = hantro_postproc_alloc(ctx);
+ if (ret)
+ goto err_codec_exit;
+ }
+ }
+ return ret;
+
+err_codec_exit:
+ if (ctx->codec_ops->exit)
+ ctx->codec_ops->exit(ctx);
+ return ret;
+}
+
+static void
+hantro_return_bufs(struct vb2_queue *q,
+ struct vb2_v4l2_buffer *(*buf_remove)(struct v4l2_m2m_ctx *))
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(q);
+
+ for (;;) {
+ struct vb2_v4l2_buffer *vbuf;
+
+ vbuf = buf_remove(ctx->fh.m2m_ctx);
+ if (!vbuf)
+ break;
+ v4l2_ctrl_request_complete(vbuf->vb2_buf.req_obj.req,
+ &ctx->ctrl_handler);
+ v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
+ }
+}
+
+static void hantro_stop_streaming(struct vb2_queue *q)
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(q);
+
+ if (hantro_vq_is_coded(q)) {
+ hantro_postproc_free(ctx);
+ if (ctx->codec_ops && ctx->codec_ops->exit)
+ ctx->codec_ops->exit(ctx);
+ }
+
+ /*
+ * The mem2mem framework calls v4l2_m2m_cancel_job before
+ * .stop_streaming, so there isn't any job running and
+ * it is safe to return all the buffers.
+ */
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ hantro_return_bufs(q, v4l2_m2m_src_buf_remove);
+ else
+ hantro_return_bufs(q, v4l2_m2m_dst_buf_remove);
+
+ v4l2_m2m_update_stop_streaming_state(ctx->fh.m2m_ctx, q);
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type) &&
+ v4l2_m2m_has_stopped(ctx->fh.m2m_ctx))
+ v4l2_event_queue_fh(&ctx->fh, &hantro_eos_event);
+}
+
+static void hantro_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct hantro_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->ctrl_handler);
+}
+
+static int hantro_buf_out_validate(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vbuf->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
+const struct vb2_ops hantro_queue_ops = {
+ .queue_setup = hantro_queue_setup,
+ .buf_prepare = hantro_buf_prepare,
+ .buf_queue = hantro_buf_queue,
+ .buf_out_validate = hantro_buf_out_validate,
+ .buf_request_complete = hantro_buf_request_complete,
+ .start_streaming = hantro_start_streaming,
+ .stop_streaming = hantro_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
diff --git a/drivers/media/platform/verisilicon/hantro_v4l2.h b/drivers/media/platform/verisilicon/hantro_v4l2.h
new file mode 100644
index 000000000000..64f6f57e9d7a
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_v4l2.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Alpha Lin <Alpha.Lin@rock-chips.com>
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ *
+ * Copyright 2018 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Based on s5p-mfc driver by Samsung Electronics Co., Ltd.
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef HANTRO_V4L2_H_
+#define HANTRO_V4L2_H_
+
+#include "hantro.h"
+
+extern const struct v4l2_ioctl_ops hantro_ioctl_ops;
+extern const struct vb2_ops hantro_queue_ops;
+
+void hantro_reset_fmts(struct hantro_ctx *ctx);
+int hantro_get_format_depth(u32 fourcc);
+const struct hantro_fmt *
+hantro_get_default_fmt(const struct hantro_ctx *ctx, bool bitstream);
+
+#endif /* HANTRO_V4L2_H_ */
diff --git a/drivers/media/platform/verisilicon/hantro_vp8.c b/drivers/media/platform/verisilicon/hantro_vp8.c
new file mode 100644
index 000000000000..381bc1d3bfda
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_vp8.c
@@ -0,0 +1,201 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ */
+
+#include "hantro.h"
+
+/*
+ * probs table with packed
+ */
+struct vp8_prob_tbl_packed {
+ u8 prob_mb_skip_false;
+ u8 prob_intra;
+ u8 prob_ref_last;
+ u8 prob_ref_golden;
+ u8 prob_segment[3];
+ u8 padding0;
+
+ u8 prob_luma_16x16_pred_mode[4];
+ u8 prob_chroma_pred_mode[3];
+ u8 padding1;
+
+ /* mv prob */
+ u8 prob_mv_context[2][V4L2_VP8_MV_PROB_CNT];
+ u8 padding2[2];
+
+ /* coeff probs */
+ u8 prob_coeffs[4][8][3][V4L2_VP8_COEFF_PROB_CNT];
+ u8 padding3[96];
+};
+
+/*
+ * filter taps taken to 7-bit precision,
+ * reference RFC6386#Page-16, filters[8][6]
+ */
+const u32 hantro_vp8_dec_mc_filter[8][6] = {
+ { 0, 0, 128, 0, 0, 0 },
+ { 0, -6, 123, 12, -1, 0 },
+ { 2, -11, 108, 36, -8, 1 },
+ { 0, -9, 93, 50, -6, 0 },
+ { 3, -16, 77, 77, -16, 3 },
+ { 0, -6, 50, 93, -9, 0 },
+ { 1, -8, 36, 108, -11, 2 },
+ { 0, -1, 12, 123, -6, 0 }
+};
+
+void hantro_vp8_prob_update(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr)
+{
+ const struct v4l2_vp8_entropy *entropy = &hdr->entropy;
+ u32 i, j, k;
+ u8 *dst;
+
+ /* first probs */
+ dst = ctx->vp8_dec.prob_tbl.cpu;
+
+ dst[0] = hdr->prob_skip_false;
+ dst[1] = hdr->prob_intra;
+ dst[2] = hdr->prob_last;
+ dst[3] = hdr->prob_gf;
+ dst[4] = hdr->segment.segment_probs[0];
+ dst[5] = hdr->segment.segment_probs[1];
+ dst[6] = hdr->segment.segment_probs[2];
+ dst[7] = 0;
+
+ dst += 8;
+ dst[0] = entropy->y_mode_probs[0];
+ dst[1] = entropy->y_mode_probs[1];
+ dst[2] = entropy->y_mode_probs[2];
+ dst[3] = entropy->y_mode_probs[3];
+ dst[4] = entropy->uv_mode_probs[0];
+ dst[5] = entropy->uv_mode_probs[1];
+ dst[6] = entropy->uv_mode_probs[2];
+ dst[7] = 0; /*unused */
+
+ /* mv probs */
+ dst += 8;
+ dst[0] = entropy->mv_probs[0][0]; /* is short */
+ dst[1] = entropy->mv_probs[1][0];
+ dst[2] = entropy->mv_probs[0][1]; /* sign */
+ dst[3] = entropy->mv_probs[1][1];
+ dst[4] = entropy->mv_probs[0][8 + 9];
+ dst[5] = entropy->mv_probs[0][9 + 9];
+ dst[6] = entropy->mv_probs[1][8 + 9];
+ dst[7] = entropy->mv_probs[1][9 + 9];
+ dst += 8;
+ for (i = 0; i < 2; ++i) {
+ for (j = 0; j < 8; j += 4) {
+ dst[0] = entropy->mv_probs[i][j + 9 + 0];
+ dst[1] = entropy->mv_probs[i][j + 9 + 1];
+ dst[2] = entropy->mv_probs[i][j + 9 + 2];
+ dst[3] = entropy->mv_probs[i][j + 9 + 3];
+ dst += 4;
+ }
+ }
+ for (i = 0; i < 2; ++i) {
+ dst[0] = entropy->mv_probs[i][0 + 2];
+ dst[1] = entropy->mv_probs[i][1 + 2];
+ dst[2] = entropy->mv_probs[i][2 + 2];
+ dst[3] = entropy->mv_probs[i][3 + 2];
+ dst[4] = entropy->mv_probs[i][4 + 2];
+ dst[5] = entropy->mv_probs[i][5 + 2];
+ dst[6] = entropy->mv_probs[i][6 + 2];
+ dst[7] = 0; /*unused */
+ dst += 8;
+ }
+
+ /* coeff probs (header part) */
+ dst = ctx->vp8_dec.prob_tbl.cpu;
+ dst += (8 * 7);
+ for (i = 0; i < 4; ++i) {
+ for (j = 0; j < 8; ++j) {
+ for (k = 0; k < 3; ++k) {
+ dst[0] = entropy->coeff_probs[i][j][k][0];
+ dst[1] = entropy->coeff_probs[i][j][k][1];
+ dst[2] = entropy->coeff_probs[i][j][k][2];
+ dst[3] = entropy->coeff_probs[i][j][k][3];
+ dst += 4;
+ }
+ }
+ }
+
+ /* coeff probs (footer part) */
+ dst = ctx->vp8_dec.prob_tbl.cpu;
+ dst += (8 * 55);
+ for (i = 0; i < 4; ++i) {
+ for (j = 0; j < 8; ++j) {
+ for (k = 0; k < 3; ++k) {
+ dst[0] = entropy->coeff_probs[i][j][k][4];
+ dst[1] = entropy->coeff_probs[i][j][k][5];
+ dst[2] = entropy->coeff_probs[i][j][k][6];
+ dst[3] = entropy->coeff_probs[i][j][k][7];
+ dst[4] = entropy->coeff_probs[i][j][k][8];
+ dst[5] = entropy->coeff_probs[i][j][k][9];
+ dst[6] = entropy->coeff_probs[i][j][k][10];
+ dst[7] = 0; /*unused */
+ dst += 8;
+ }
+ }
+ }
+}
+
+int hantro_vp8_dec_init(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_aux_buf *aux_buf;
+ unsigned int mb_width, mb_height;
+ size_t segment_map_size;
+ int ret;
+
+ /* segment map table size calculation */
+ mb_width = DIV_ROUND_UP(ctx->dst_fmt.width, 16);
+ mb_height = DIV_ROUND_UP(ctx->dst_fmt.height, 16);
+ segment_map_size = round_up(DIV_ROUND_UP(mb_width * mb_height, 4), 64);
+
+ /*
+ * In context init the dma buffer for segment map must be allocated.
+ * And the data in segment map buffer must be set to all zero.
+ */
+ aux_buf = &ctx->vp8_dec.segment_map;
+ aux_buf->size = segment_map_size;
+ aux_buf->cpu = dma_alloc_coherent(vpu->dev, aux_buf->size,
+ &aux_buf->dma, GFP_KERNEL);
+ if (!aux_buf->cpu)
+ return -ENOMEM;
+
+ /*
+ * Allocate probability table buffer,
+ * total 1208 bytes, 4K page is far enough.
+ */
+ aux_buf = &ctx->vp8_dec.prob_tbl;
+ aux_buf->size = sizeof(struct vp8_prob_tbl_packed);
+ aux_buf->cpu = dma_alloc_coherent(vpu->dev, aux_buf->size,
+ &aux_buf->dma, GFP_KERNEL);
+ if (!aux_buf->cpu) {
+ ret = -ENOMEM;
+ goto err_free_seg_map;
+ }
+
+ return 0;
+
+err_free_seg_map:
+ dma_free_coherent(vpu->dev, ctx->vp8_dec.segment_map.size,
+ ctx->vp8_dec.segment_map.cpu,
+ ctx->vp8_dec.segment_map.dma);
+
+ return ret;
+}
+
+void hantro_vp8_dec_exit(struct hantro_ctx *ctx)
+{
+ struct hantro_vp8_dec_hw_ctx *vp8_dec = &ctx->vp8_dec;
+ struct hantro_dev *vpu = ctx->dev;
+
+ dma_free_coherent(vpu->dev, vp8_dec->segment_map.size,
+ vp8_dec->segment_map.cpu, vp8_dec->segment_map.dma);
+ dma_free_coherent(vpu->dev, vp8_dec->prob_tbl.size,
+ vp8_dec->prob_tbl.cpu, vp8_dec->prob_tbl.dma);
+}
diff --git a/drivers/media/platform/verisilicon/hantro_vp9.c b/drivers/media/platform/verisilicon/hantro_vp9.c
new file mode 100644
index 000000000000..566cd376c097
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_vp9.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VP9 codec driver
+ *
+ * Copyright (C) 2021 Collabora Ltd.
+ */
+
+#include <linux/types.h>
+#include <media/v4l2-mem2mem.h>
+
+#include "hantro.h"
+#include "hantro_hw.h"
+#include "hantro_vp9.h"
+
+#define POW2(x) (1 << (x))
+
+#define MAX_LOG2_TILE_COLUMNS 6
+#define MAX_NUM_TILE_COLS POW2(MAX_LOG2_TILE_COLUMNS)
+#define MAX_TILE_COLS 20
+#define MAX_TILE_ROWS 22
+
+static size_t hantro_vp9_tile_filter_size(unsigned int height)
+{
+ u32 h, height32, size;
+
+ h = roundup(height, 8);
+
+ height32 = roundup(h, 64);
+ size = 24 * height32 * (MAX_NUM_TILE_COLS - 1); /* luma: 8, chroma: 8 + 8 */
+
+ return size;
+}
+
+static size_t hantro_vp9_bsd_control_size(unsigned int height)
+{
+ u32 h, height32;
+
+ h = roundup(height, 8);
+ height32 = roundup(h, 64);
+
+ return 16 * (height32 / 4) * (MAX_NUM_TILE_COLS - 1);
+}
+
+static size_t hantro_vp9_segment_map_size(unsigned int width, unsigned int height)
+{
+ u32 w, h;
+ int num_ctbs;
+
+ w = roundup(width, 8);
+ h = roundup(height, 8);
+ num_ctbs = ((w + 63) / 64) * ((h + 63) / 64);
+
+ return num_ctbs * 32;
+}
+
+static inline size_t hantro_vp9_prob_tab_size(void)
+{
+ return roundup(sizeof(struct hantro_g2_all_probs), 16);
+}
+
+static inline size_t hantro_vp9_count_tab_size(void)
+{
+ return roundup(sizeof(struct symbol_counts), 16);
+}
+
+static inline size_t hantro_vp9_tile_info_size(void)
+{
+ return roundup((MAX_TILE_COLS * MAX_TILE_ROWS * 4 * sizeof(u16) + 15 + 16) & ~0xf, 16);
+}
+
+static void *get_coeffs_arr(struct symbol_counts *cnts, int i, int j, int k, int l, int m)
+{
+ if (i == 0)
+ return &cnts->count_coeffs[j][k][l][m];
+
+ if (i == 1)
+ return &cnts->count_coeffs8x8[j][k][l][m];
+
+ if (i == 2)
+ return &cnts->count_coeffs16x16[j][k][l][m];
+
+ if (i == 3)
+ return &cnts->count_coeffs32x32[j][k][l][m];
+
+ return NULL;
+}
+
+static void *get_eobs1(struct symbol_counts *cnts, int i, int j, int k, int l, int m)
+{
+ if (i == 0)
+ return &cnts->count_coeffs[j][k][l][m][3];
+
+ if (i == 1)
+ return &cnts->count_coeffs8x8[j][k][l][m][3];
+
+ if (i == 2)
+ return &cnts->count_coeffs16x16[j][k][l][m][3];
+
+ if (i == 3)
+ return &cnts->count_coeffs32x32[j][k][l][m][3];
+
+ return NULL;
+}
+
+#define INNER_LOOP \
+ do { \
+ for (m = 0; m < ARRAY_SIZE(vp9_ctx->cnts.coeff[i][0][0][0]); ++m) { \
+ vp9_ctx->cnts.coeff[i][j][k][l][m] = \
+ get_coeffs_arr(cnts, i, j, k, l, m); \
+ vp9_ctx->cnts.eob[i][j][k][l][m][0] = \
+ &cnts->count_eobs[i][j][k][l][m]; \
+ vp9_ctx->cnts.eob[i][j][k][l][m][1] = \
+ get_eobs1(cnts, i, j, k, l, m); \
+ } \
+ } while (0)
+
+static void init_v4l2_vp9_count_tbl(struct hantro_ctx *ctx)
+{
+ struct hantro_vp9_dec_hw_ctx *vp9_ctx = &ctx->vp9_dec;
+ struct symbol_counts *cnts = vp9_ctx->misc.cpu + vp9_ctx->ctx_counters_offset;
+ int i, j, k, l, m;
+
+ vp9_ctx->cnts.partition = &cnts->partition_counts;
+ vp9_ctx->cnts.skip = &cnts->mbskip_count;
+ vp9_ctx->cnts.intra_inter = &cnts->intra_inter_count;
+ vp9_ctx->cnts.tx32p = &cnts->tx32x32_count;
+ /*
+ * g2 hardware uses tx16x16_count[2][3], while the api
+ * expects tx16p[2][4], so this must be explicitly copied
+ * into vp9_ctx->cnts.tx16p when passing the data to the
+ * vp9 library function
+ */
+ vp9_ctx->cnts.tx8p = &cnts->tx8x8_count;
+
+ vp9_ctx->cnts.y_mode = &cnts->sb_ymode_counts;
+ vp9_ctx->cnts.uv_mode = &cnts->uv_mode_counts;
+ vp9_ctx->cnts.comp = &cnts->comp_inter_count;
+ vp9_ctx->cnts.comp_ref = &cnts->comp_ref_count;
+ vp9_ctx->cnts.single_ref = &cnts->single_ref_count;
+ vp9_ctx->cnts.filter = &cnts->switchable_interp_counts;
+ vp9_ctx->cnts.mv_joint = &cnts->mv_counts.joints;
+ vp9_ctx->cnts.sign = &cnts->mv_counts.sign;
+ vp9_ctx->cnts.classes = &cnts->mv_counts.classes;
+ vp9_ctx->cnts.class0 = &cnts->mv_counts.class0;
+ vp9_ctx->cnts.bits = &cnts->mv_counts.bits;
+ vp9_ctx->cnts.class0_fp = &cnts->mv_counts.class0_fp;
+ vp9_ctx->cnts.fp = &cnts->mv_counts.fp;
+ vp9_ctx->cnts.class0_hp = &cnts->mv_counts.class0_hp;
+ vp9_ctx->cnts.hp = &cnts->mv_counts.hp;
+
+ for (i = 0; i < ARRAY_SIZE(vp9_ctx->cnts.coeff); ++i)
+ for (j = 0; j < ARRAY_SIZE(vp9_ctx->cnts.coeff[i]); ++j)
+ for (k = 0; k < ARRAY_SIZE(vp9_ctx->cnts.coeff[i][0]); ++k)
+ for (l = 0; l < ARRAY_SIZE(vp9_ctx->cnts.coeff[i][0][0]); ++l)
+ INNER_LOOP;
+}
+
+int hantro_vp9_dec_init(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ const struct hantro_variant *variant = vpu->variant;
+ struct hantro_vp9_dec_hw_ctx *vp9_dec = &ctx->vp9_dec;
+ struct hantro_aux_buf *tile_edge = &vp9_dec->tile_edge;
+ struct hantro_aux_buf *segment_map = &vp9_dec->segment_map;
+ struct hantro_aux_buf *misc = &vp9_dec->misc;
+ u32 i, max_width, max_height, size;
+
+ if (variant->num_dec_fmts < 1)
+ return -EINVAL;
+
+ for (i = 0; i < variant->num_dec_fmts; ++i)
+ if (variant->dec_fmts[i].fourcc == V4L2_PIX_FMT_VP9_FRAME)
+ break;
+
+ if (i == variant->num_dec_fmts)
+ return -EINVAL;
+
+ max_width = vpu->variant->dec_fmts[i].frmsize.max_width;
+ max_height = vpu->variant->dec_fmts[i].frmsize.max_height;
+
+ size = hantro_vp9_tile_filter_size(max_height);
+ vp9_dec->bsd_ctrl_offset = size;
+ size += hantro_vp9_bsd_control_size(max_height);
+
+ tile_edge->cpu = dma_alloc_coherent(vpu->dev, size, &tile_edge->dma, GFP_KERNEL);
+ if (!tile_edge->cpu)
+ return -ENOMEM;
+
+ tile_edge->size = size;
+ memset(tile_edge->cpu, 0, size);
+
+ size = hantro_vp9_segment_map_size(max_width, max_height);
+ vp9_dec->segment_map_size = size;
+ size *= 2; /* we need two areas of this size, used alternately */
+
+ segment_map->cpu = dma_alloc_coherent(vpu->dev, size, &segment_map->dma, GFP_KERNEL);
+ if (!segment_map->cpu)
+ goto err_segment_map;
+
+ segment_map->size = size;
+ memset(segment_map->cpu, 0, size);
+
+ size = hantro_vp9_prob_tab_size();
+ vp9_dec->ctx_counters_offset = size;
+ size += hantro_vp9_count_tab_size();
+ vp9_dec->tile_info_offset = size;
+ size += hantro_vp9_tile_info_size();
+
+ misc->cpu = dma_alloc_coherent(vpu->dev, size, &misc->dma, GFP_KERNEL);
+ if (!misc->cpu)
+ goto err_misc;
+
+ misc->size = size;
+ memset(misc->cpu, 0, size);
+
+ init_v4l2_vp9_count_tbl(ctx);
+
+ return 0;
+
+err_misc:
+ dma_free_coherent(vpu->dev, segment_map->size, segment_map->cpu, segment_map->dma);
+
+err_segment_map:
+ dma_free_coherent(vpu->dev, tile_edge->size, tile_edge->cpu, tile_edge->dma);
+
+ return -ENOMEM;
+}
+
+void hantro_vp9_dec_exit(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct hantro_vp9_dec_hw_ctx *vp9_dec = &ctx->vp9_dec;
+ struct hantro_aux_buf *tile_edge = &vp9_dec->tile_edge;
+ struct hantro_aux_buf *segment_map = &vp9_dec->segment_map;
+ struct hantro_aux_buf *misc = &vp9_dec->misc;
+
+ dma_free_coherent(vpu->dev, misc->size, misc->cpu, misc->dma);
+ dma_free_coherent(vpu->dev, segment_map->size, segment_map->cpu, segment_map->dma);
+ dma_free_coherent(vpu->dev, tile_edge->size, tile_edge->cpu, tile_edge->dma);
+}
diff --git a/drivers/media/platform/verisilicon/hantro_vp9.h b/drivers/media/platform/verisilicon/hantro_vp9.h
new file mode 100644
index 000000000000..26b69275f098
--- /dev/null
+++ b/drivers/media/platform/verisilicon/hantro_vp9.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VP9 codec driver
+ *
+ * Copyright (C) 2021 Collabora Ltd.
+ */
+
+struct hantro_g2_mv_probs {
+ u8 joint[3];
+ u8 sign[2];
+ u8 class0_bit[2][1];
+ u8 fr[2][3];
+ u8 class0_hp[2];
+ u8 hp[2];
+ u8 classes[2][10];
+ u8 class0_fr[2][2][3];
+ u8 bits[2][10];
+};
+
+struct hantro_g2_probs {
+ u8 inter_mode[7][4];
+ u8 is_inter[4];
+ u8 uv_mode[10][8];
+ u8 tx8[2][1];
+ u8 tx16[2][2];
+ u8 tx32[2][3];
+ u8 y_mode_tail[4][1];
+ u8 y_mode[4][8];
+ u8 partition[2][16][4]; /* [keyframe][][], [inter][][] */
+ u8 uv_mode_tail[10][1];
+ u8 interp_filter[4][2];
+ u8 comp_mode[5];
+ u8 skip[3];
+
+ u8 pad1[1];
+
+ struct hantro_g2_mv_probs mv;
+
+ u8 single_ref[5][2];
+ u8 comp_ref[5];
+
+ u8 pad2[17];
+
+ u8 coef[4][2][2][6][6][4];
+};
+
+struct hantro_g2_all_probs {
+ u8 kf_y_mode_prob[10][10][8];
+
+ u8 kf_y_mode_prob_tail[10][10][1];
+ u8 ref_pred_probs[3];
+ u8 mb_segment_tree_probs[7];
+ u8 segment_pred_probs[3];
+ u8 ref_scores[4];
+ u8 prob_comppred[2];
+
+ u8 pad1[9];
+
+ u8 kf_uv_mode_prob[10][8];
+ u8 kf_uv_mode_prob_tail[10][1];
+
+ u8 pad2[6];
+
+ struct hantro_g2_probs probs;
+};
+
+struct mv_counts {
+ u32 joints[4];
+ u32 sign[2][2];
+ u32 classes[2][11];
+ u32 class0[2][2];
+ u32 bits[2][10][2];
+ u32 class0_fp[2][2][4];
+ u32 fp[2][4];
+ u32 class0_hp[2][2];
+ u32 hp[2][2];
+};
+
+struct symbol_counts {
+ u32 inter_mode_counts[7][3][2];
+ u32 sb_ymode_counts[4][10];
+ u32 uv_mode_counts[10][10];
+ u32 partition_counts[16][4];
+ u32 switchable_interp_counts[4][3];
+ u32 intra_inter_count[4][2];
+ u32 comp_inter_count[5][2];
+ u32 single_ref_count[5][2][2];
+ u32 comp_ref_count[5][2];
+ u32 tx32x32_count[2][4];
+ u32 tx16x16_count[2][3];
+ u32 tx8x8_count[2][2];
+ u32 mbskip_count[3][2];
+
+ struct mv_counts mv_counts;
+
+ u32 count_coeffs[2][2][6][6][4];
+ u32 count_coeffs8x8[2][2][6][6][4];
+ u32 count_coeffs16x16[2][2][6][6][4];
+ u32 count_coeffs32x32[2][2][6][6][4];
+
+ u32 count_eobs[4][2][2][6][6];
+};
diff --git a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
new file mode 100644
index 000000000000..77f574fdfa77
--- /dev/null
+++ b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2019 Pengutronix, Philipp Zabel <kernel@pengutronix.de>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+
+#include "hantro.h"
+#include "hantro_jpeg.h"
+#include "hantro_g1_regs.h"
+#include "hantro_g2_regs.h"
+
+#define CTRL_SOFT_RESET 0x00
+#define RESET_G1 BIT(1)
+#define RESET_G2 BIT(0)
+
+#define CTRL_CLOCK_ENABLE 0x04
+#define CLOCK_G1 BIT(1)
+#define CLOCK_G2 BIT(0)
+
+#define CTRL_G1_DEC_FUSE 0x08
+#define CTRL_G1_PP_FUSE 0x0c
+#define CTRL_G2_DEC_FUSE 0x10
+
+static void imx8m_soft_reset(struct hantro_dev *vpu, u32 reset_bits)
+{
+ u32 val;
+
+ /* Assert */
+ val = readl(vpu->ctrl_base + CTRL_SOFT_RESET);
+ val &= ~reset_bits;
+ writel(val, vpu->ctrl_base + CTRL_SOFT_RESET);
+
+ udelay(2);
+
+ /* Release */
+ val = readl(vpu->ctrl_base + CTRL_SOFT_RESET);
+ val |= reset_bits;
+ writel(val, vpu->ctrl_base + CTRL_SOFT_RESET);
+}
+
+static void imx8m_clk_enable(struct hantro_dev *vpu, u32 clock_bits)
+{
+ u32 val;
+
+ val = readl(vpu->ctrl_base + CTRL_CLOCK_ENABLE);
+ val |= clock_bits;
+ writel(val, vpu->ctrl_base + CTRL_CLOCK_ENABLE);
+}
+
+static int imx8mq_runtime_resume(struct hantro_dev *vpu)
+{
+ int ret;
+
+ ret = clk_bulk_prepare_enable(vpu->variant->num_clocks, vpu->clocks);
+ if (ret) {
+ dev_err(vpu->dev, "Failed to enable clocks\n");
+ return ret;
+ }
+
+ imx8m_soft_reset(vpu, RESET_G1 | RESET_G2);
+ imx8m_clk_enable(vpu, CLOCK_G1 | CLOCK_G2);
+
+ /* Set values of the fuse registers */
+ writel(0xffffffff, vpu->ctrl_base + CTRL_G1_DEC_FUSE);
+ writel(0xffffffff, vpu->ctrl_base + CTRL_G1_PP_FUSE);
+ writel(0xffffffff, vpu->ctrl_base + CTRL_G2_DEC_FUSE);
+
+ clk_bulk_disable_unprepare(vpu->variant->num_clocks, vpu->clocks);
+
+ return 0;
+}
+
+/*
+ * Supported formats.
+ */
+
+static const struct hantro_fmt imx8m_vpu_postproc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codec_mode = HANTRO_MODE_NONE,
+ .postprocessed = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt imx8m_vpu_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .codec_mode = HANTRO_MODE_H264_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt imx8m_vpu_g2_postproc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .postprocessed = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt imx8m_vpu_g2_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12_4L4,
+ .codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = TILE_MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = TILE_MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_HEVC_SLICE,
+ .codec_mode = HANTRO_MODE_HEVC_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = TILE_MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = TILE_MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP9_FRAME,
+ .codec_mode = HANTRO_MODE_VP9_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = TILE_MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = TILE_MB_DIM,
+ },
+ },
+};
+
+static irqreturn_t imx8m_vpu_g1_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vdpu_read(vpu, G1_REG_INTERRUPT);
+ state = (status & G1_REG_INTERRUPT_DEC_RDY_INT) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vdpu_write(vpu, 0, G1_REG_INTERRUPT);
+ vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
+
+static int imx8mq_vpu_hw_init(struct hantro_dev *vpu)
+{
+ vpu->ctrl_base = vpu->reg_bases[vpu->variant->num_regs - 1];
+
+ return 0;
+}
+
+static void imx8m_vpu_g1_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ imx8m_soft_reset(vpu, RESET_G1);
+}
+
+/*
+ * Supported codec ops.
+ */
+
+static const struct hantro_codec_ops imx8mq_vpu_codec_ops[] = {
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = hantro_g1_mpeg2_dec_run,
+ .reset = imx8m_vpu_g1_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = hantro_g1_vp8_dec_run,
+ .reset = imx8m_vpu_g1_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+ [HANTRO_MODE_H264_DEC] = {
+ .run = hantro_g1_h264_dec_run,
+ .reset = imx8m_vpu_g1_reset,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+};
+
+static const struct hantro_codec_ops imx8mq_vpu_g1_codec_ops[] = {
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = hantro_g1_mpeg2_dec_run,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = hantro_g1_vp8_dec_run,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+ [HANTRO_MODE_H264_DEC] = {
+ .run = hantro_g1_h264_dec_run,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+};
+
+static const struct hantro_codec_ops imx8mq_vpu_g2_codec_ops[] = {
+ [HANTRO_MODE_HEVC_DEC] = {
+ .run = hantro_g2_hevc_dec_run,
+ .init = hantro_hevc_dec_init,
+ .exit = hantro_hevc_dec_exit,
+ },
+ [HANTRO_MODE_VP9_DEC] = {
+ .run = hantro_g2_vp9_dec_run,
+ .done = hantro_g2_vp9_dec_done,
+ .init = hantro_vp9_dec_init,
+ .exit = hantro_vp9_dec_exit,
+ },
+};
+
+/*
+ * VPU variants.
+ */
+
+static const struct hantro_irq imx8mq_irqs[] = {
+ { "g1", imx8m_vpu_g1_irq },
+};
+
+static const struct hantro_irq imx8mq_g2_irqs[] = {
+ { "g2", hantro_g2_irq },
+};
+
+static const char * const imx8mq_clk_names[] = { "g1", "g2", "bus" };
+static const char * const imx8mq_reg_names[] = { "g1", "g2", "ctrl" };
+static const char * const imx8mq_g1_clk_names[] = { "g1" };
+static const char * const imx8mq_g2_clk_names[] = { "g2" };
+
+const struct hantro_variant imx8mq_vpu_variant = {
+ .dec_fmts = imx8m_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(imx8m_vpu_dec_fmts),
+ .postproc_fmts = imx8m_vpu_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(imx8m_vpu_postproc_fmts),
+ .postproc_ops = &hantro_g1_postproc_ops,
+ .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
+ HANTRO_H264_DECODER,
+ .codec_ops = imx8mq_vpu_codec_ops,
+ .init = imx8mq_vpu_hw_init,
+ .runtime_resume = imx8mq_runtime_resume,
+ .irqs = imx8mq_irqs,
+ .num_irqs = ARRAY_SIZE(imx8mq_irqs),
+ .clk_names = imx8mq_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_clk_names),
+ .reg_names = imx8mq_reg_names,
+ .num_regs = ARRAY_SIZE(imx8mq_reg_names)
+};
+
+const struct hantro_variant imx8mq_vpu_g1_variant = {
+ .dec_fmts = imx8m_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(imx8m_vpu_dec_fmts),
+ .postproc_fmts = imx8m_vpu_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(imx8m_vpu_postproc_fmts),
+ .postproc_ops = &hantro_g1_postproc_ops,
+ .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
+ HANTRO_H264_DECODER,
+ .codec_ops = imx8mq_vpu_g1_codec_ops,
+ .irqs = imx8mq_irqs,
+ .num_irqs = ARRAY_SIZE(imx8mq_irqs),
+ .clk_names = imx8mq_g1_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_g1_clk_names),
+};
+
+const struct hantro_variant imx8mq_vpu_g2_variant = {
+ .dec_offset = 0x0,
+ .dec_fmts = imx8m_vpu_g2_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(imx8m_vpu_g2_dec_fmts),
+ .postproc_fmts = imx8m_vpu_g2_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(imx8m_vpu_g2_postproc_fmts),
+ .postproc_ops = &hantro_g2_postproc_ops,
+ .codec = HANTRO_HEVC_DECODER | HANTRO_VP9_DECODER,
+ .codec_ops = imx8mq_vpu_g2_codec_ops,
+ .irqs = imx8mq_g2_irqs,
+ .num_irqs = ARRAY_SIZE(imx8mq_g2_irqs),
+ .clk_names = imx8mq_g2_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_g2_clk_names),
+};
+
+const struct hantro_variant imx8mm_vpu_g1_variant = {
+ .dec_fmts = imx8m_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(imx8m_vpu_dec_fmts),
+ .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
+ HANTRO_H264_DECODER,
+ .codec_ops = imx8mq_vpu_g1_codec_ops,
+ .irqs = imx8mq_irqs,
+ .num_irqs = ARRAY_SIZE(imx8mq_irqs),
+ .clk_names = imx8mq_g1_clk_names,
+ .num_clocks = ARRAY_SIZE(imx8mq_g1_clk_names),
+};
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu2_hw_h264_dec.c b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_h264_dec.c
new file mode 100644
index 000000000000..46c1a83bcc4e
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_h264_dec.c
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (c) 2014 Rockchip Electronics Co., Ltd.
+ * Hertz Wong <hertz.wong@rock-chips.com>
+ * Herman Chen <herman.chen@rock-chips.com>
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * Tomasz Figa <tfiga@chromium.org>
+ */
+
+#include <linux/types.h>
+#include <linux/sort.h>
+
+#include <media/v4l2-mem2mem.h>
+
+#include "hantro_hw.h"
+#include "hantro_v4l2.h"
+
+#define VDPU_SWREG(nr) ((nr) * 4)
+
+#define VDPU_REG_DEC_OUT_BASE VDPU_SWREG(63)
+#define VDPU_REG_RLC_VLC_BASE VDPU_SWREG(64)
+#define VDPU_REG_QTABLE_BASE VDPU_SWREG(61)
+#define VDPU_REG_DIR_MV_BASE VDPU_SWREG(62)
+#define VDPU_REG_REFER_BASE(i) (VDPU_SWREG(84 + (i)))
+#define VDPU_REG_DEC_E(v) ((v) ? BIT(0) : 0)
+
+#define VDPU_REG_DEC_ADV_PRE_DIS(v) ((v) ? BIT(11) : 0)
+#define VDPU_REG_DEC_SCMD_DIS(v) ((v) ? BIT(10) : 0)
+#define VDPU_REG_FILTERING_DIS(v) ((v) ? BIT(8) : 0)
+#define VDPU_REG_PIC_FIXED_QUANT(v) ((v) ? BIT(7) : 0)
+#define VDPU_REG_DEC_LATENCY(v) (((v) << 1) & GENMASK(6, 1))
+
+#define VDPU_REG_INIT_QP(v) (((v) << 25) & GENMASK(30, 25))
+#define VDPU_REG_STREAM_LEN(v) (((v) << 0) & GENMASK(23, 0))
+
+#define VDPU_REG_APF_THRESHOLD(v) (((v) << 17) & GENMASK(30, 17))
+#define VDPU_REG_STARTMB_X(v) (((v) << 8) & GENMASK(16, 8))
+#define VDPU_REG_STARTMB_Y(v) (((v) << 0) & GENMASK(7, 0))
+
+#define VDPU_REG_DEC_MODE(v) (((v) << 0) & GENMASK(3, 0))
+
+#define VDPU_REG_DEC_STRENDIAN_E(v) ((v) ? BIT(5) : 0)
+#define VDPU_REG_DEC_STRSWAP32_E(v) ((v) ? BIT(4) : 0)
+#define VDPU_REG_DEC_OUTSWAP32_E(v) ((v) ? BIT(3) : 0)
+#define VDPU_REG_DEC_INSWAP32_E(v) ((v) ? BIT(2) : 0)
+#define VDPU_REG_DEC_OUT_ENDIAN(v) ((v) ? BIT(1) : 0)
+#define VDPU_REG_DEC_IN_ENDIAN(v) ((v) ? BIT(0) : 0)
+
+#define VDPU_REG_DEC_DATA_DISC_E(v) ((v) ? BIT(22) : 0)
+#define VDPU_REG_DEC_MAX_BURST(v) (((v) << 16) & GENMASK(20, 16))
+#define VDPU_REG_DEC_AXI_WR_ID(v) (((v) << 8) & GENMASK(15, 8))
+#define VDPU_REG_DEC_AXI_RD_ID(v) (((v) << 0) & GENMASK(7, 0))
+
+#define VDPU_REG_START_CODE_E(v) ((v) ? BIT(22) : 0)
+#define VDPU_REG_CH_8PIX_ILEAV_E(v) ((v) ? BIT(21) : 0)
+#define VDPU_REG_RLC_MODE_E(v) ((v) ? BIT(20) : 0)
+#define VDPU_REG_PIC_INTERLACE_E(v) ((v) ? BIT(17) : 0)
+#define VDPU_REG_PIC_FIELDMODE_E(v) ((v) ? BIT(16) : 0)
+#define VDPU_REG_PIC_TOPFIELD_E(v) ((v) ? BIT(13) : 0)
+#define VDPU_REG_WRITE_MVS_E(v) ((v) ? BIT(10) : 0)
+#define VDPU_REG_SEQ_MBAFF_E(v) ((v) ? BIT(7) : 0)
+#define VDPU_REG_PICORD_COUNT_E(v) ((v) ? BIT(6) : 0)
+#define VDPU_REG_DEC_TIMEOUT_E(v) ((v) ? BIT(5) : 0)
+#define VDPU_REG_DEC_CLK_GATE_E(v) ((v) ? BIT(4) : 0)
+
+#define VDPU_REG_PRED_BC_TAP_0_0(v) (((v) << 22) & GENMASK(31, 22))
+#define VDPU_REG_PRED_BC_TAP_0_1(v) (((v) << 12) & GENMASK(21, 12))
+#define VDPU_REG_PRED_BC_TAP_0_2(v) (((v) << 2) & GENMASK(11, 2))
+
+#define VDPU_REG_REFBU_E(v) ((v) ? BIT(31) : 0)
+
+#define VDPU_REG_PINIT_RLIST_F9(v) (((v) << 25) & GENMASK(29, 25))
+#define VDPU_REG_PINIT_RLIST_F8(v) (((v) << 20) & GENMASK(24, 20))
+#define VDPU_REG_PINIT_RLIST_F7(v) (((v) << 15) & GENMASK(19, 15))
+#define VDPU_REG_PINIT_RLIST_F6(v) (((v) << 10) & GENMASK(14, 10))
+#define VDPU_REG_PINIT_RLIST_F5(v) (((v) << 5) & GENMASK(9, 5))
+#define VDPU_REG_PINIT_RLIST_F4(v) (((v) << 0) & GENMASK(4, 0))
+
+#define VDPU_REG_PINIT_RLIST_F15(v) (((v) << 25) & GENMASK(29, 25))
+#define VDPU_REG_PINIT_RLIST_F14(v) (((v) << 20) & GENMASK(24, 20))
+#define VDPU_REG_PINIT_RLIST_F13(v) (((v) << 15) & GENMASK(19, 15))
+#define VDPU_REG_PINIT_RLIST_F12(v) (((v) << 10) & GENMASK(14, 10))
+#define VDPU_REG_PINIT_RLIST_F11(v) (((v) << 5) & GENMASK(9, 5))
+#define VDPU_REG_PINIT_RLIST_F10(v) (((v) << 0) & GENMASK(4, 0))
+
+#define VDPU_REG_REFER1_NBR(v) (((v) << 16) & GENMASK(31, 16))
+#define VDPU_REG_REFER0_NBR(v) (((v) << 0) & GENMASK(15, 0))
+
+#define VDPU_REG_REFER3_NBR(v) (((v) << 16) & GENMASK(31, 16))
+#define VDPU_REG_REFER2_NBR(v) (((v) << 0) & GENMASK(15, 0))
+
+#define VDPU_REG_REFER5_NBR(v) (((v) << 16) & GENMASK(31, 16))
+#define VDPU_REG_REFER4_NBR(v) (((v) << 0) & GENMASK(15, 0))
+
+#define VDPU_REG_REFER7_NBR(v) (((v) << 16) & GENMASK(31, 16))
+#define VDPU_REG_REFER6_NBR(v) (((v) << 0) & GENMASK(15, 0))
+
+#define VDPU_REG_REFER9_NBR(v) (((v) << 16) & GENMASK(31, 16))
+#define VDPU_REG_REFER8_NBR(v) (((v) << 0) & GENMASK(15, 0))
+
+#define VDPU_REG_REFER11_NBR(v) (((v) << 16) & GENMASK(31, 16))
+#define VDPU_REG_REFER10_NBR(v) (((v) << 0) & GENMASK(15, 0))
+
+#define VDPU_REG_REFER13_NBR(v) (((v) << 16) & GENMASK(31, 16))
+#define VDPU_REG_REFER12_NBR(v) (((v) << 0) & GENMASK(15, 0))
+
+#define VDPU_REG_REFER15_NBR(v) (((v) << 16) & GENMASK(31, 16))
+#define VDPU_REG_REFER14_NBR(v) (((v) << 0) & GENMASK(15, 0))
+
+#define VDPU_REG_BINIT_RLIST_F5(v) (((v) << 25) & GENMASK(29, 25))
+#define VDPU_REG_BINIT_RLIST_F4(v) (((v) << 20) & GENMASK(24, 20))
+#define VDPU_REG_BINIT_RLIST_F3(v) (((v) << 15) & GENMASK(19, 15))
+#define VDPU_REG_BINIT_RLIST_F2(v) (((v) << 10) & GENMASK(14, 10))
+#define VDPU_REG_BINIT_RLIST_F1(v) (((v) << 5) & GENMASK(9, 5))
+#define VDPU_REG_BINIT_RLIST_F0(v) (((v) << 0) & GENMASK(4, 0))
+
+#define VDPU_REG_BINIT_RLIST_F11(v) (((v) << 25) & GENMASK(29, 25))
+#define VDPU_REG_BINIT_RLIST_F10(v) (((v) << 20) & GENMASK(24, 20))
+#define VDPU_REG_BINIT_RLIST_F9(v) (((v) << 15) & GENMASK(19, 15))
+#define VDPU_REG_BINIT_RLIST_F8(v) (((v) << 10) & GENMASK(14, 10))
+#define VDPU_REG_BINIT_RLIST_F7(v) (((v) << 5) & GENMASK(9, 5))
+#define VDPU_REG_BINIT_RLIST_F6(v) (((v) << 0) & GENMASK(4, 0))
+
+#define VDPU_REG_BINIT_RLIST_F15(v) (((v) << 15) & GENMASK(19, 15))
+#define VDPU_REG_BINIT_RLIST_F14(v) (((v) << 10) & GENMASK(14, 10))
+#define VDPU_REG_BINIT_RLIST_F13(v) (((v) << 5) & GENMASK(9, 5))
+#define VDPU_REG_BINIT_RLIST_F12(v) (((v) << 0) & GENMASK(4, 0))
+
+#define VDPU_REG_BINIT_RLIST_B5(v) (((v) << 25) & GENMASK(29, 25))
+#define VDPU_REG_BINIT_RLIST_B4(v) (((v) << 20) & GENMASK(24, 20))
+#define VDPU_REG_BINIT_RLIST_B3(v) (((v) << 15) & GENMASK(19, 15))
+#define VDPU_REG_BINIT_RLIST_B2(v) (((v) << 10) & GENMASK(14, 10))
+#define VDPU_REG_BINIT_RLIST_B1(v) (((v) << 5) & GENMASK(9, 5))
+#define VDPU_REG_BINIT_RLIST_B0(v) (((v) << 0) & GENMASK(4, 0))
+
+#define VDPU_REG_BINIT_RLIST_B11(v) (((v) << 25) & GENMASK(29, 25))
+#define VDPU_REG_BINIT_RLIST_B10(v) (((v) << 20) & GENMASK(24, 20))
+#define VDPU_REG_BINIT_RLIST_B9(v) (((v) << 15) & GENMASK(19, 15))
+#define VDPU_REG_BINIT_RLIST_B8(v) (((v) << 10) & GENMASK(14, 10))
+#define VDPU_REG_BINIT_RLIST_B7(v) (((v) << 5) & GENMASK(9, 5))
+#define VDPU_REG_BINIT_RLIST_B6(v) (((v) << 0) & GENMASK(4, 0))
+
+#define VDPU_REG_BINIT_RLIST_B15(v) (((v) << 15) & GENMASK(19, 15))
+#define VDPU_REG_BINIT_RLIST_B14(v) (((v) << 10) & GENMASK(14, 10))
+#define VDPU_REG_BINIT_RLIST_B13(v) (((v) << 5) & GENMASK(9, 5))
+#define VDPU_REG_BINIT_RLIST_B12(v) (((v) << 0) & GENMASK(4, 0))
+
+#define VDPU_REG_PINIT_RLIST_F3(v) (((v) << 15) & GENMASK(19, 15))
+#define VDPU_REG_PINIT_RLIST_F2(v) (((v) << 10) & GENMASK(14, 10))
+#define VDPU_REG_PINIT_RLIST_F1(v) (((v) << 5) & GENMASK(9, 5))
+#define VDPU_REG_PINIT_RLIST_F0(v) (((v) << 0) & GENMASK(4, 0))
+
+#define VDPU_REG_REFER_LTERM_E(v) (((v) << 0) & GENMASK(31, 0))
+
+#define VDPU_REG_REFER_VALID_E(v) (((v) << 0) & GENMASK(31, 0))
+
+#define VDPU_REG_STRM_START_BIT(v) (((v) << 0) & GENMASK(5, 0))
+
+#define VDPU_REG_CH_QP_OFFSET2(v) (((v) << 22) & GENMASK(26, 22))
+#define VDPU_REG_CH_QP_OFFSET(v) (((v) << 17) & GENMASK(21, 17))
+#define VDPU_REG_PIC_MB_HEIGHT_P(v) (((v) << 9) & GENMASK(16, 9))
+#define VDPU_REG_PIC_MB_WIDTH(v) (((v) << 0) & GENMASK(8, 0))
+
+#define VDPU_REG_WEIGHT_BIPR_IDC(v) (((v) << 16) & GENMASK(17, 16))
+#define VDPU_REG_REF_FRAMES(v) (((v) << 0) & GENMASK(4, 0))
+
+#define VDPU_REG_FILT_CTRL_PRES(v) ((v) ? BIT(31) : 0)
+#define VDPU_REG_RDPIC_CNT_PRES(v) ((v) ? BIT(30) : 0)
+#define VDPU_REG_FRAMENUM_LEN(v) (((v) << 16) & GENMASK(20, 16))
+#define VDPU_REG_FRAMENUM(v) (((v) << 0) & GENMASK(15, 0))
+
+#define VDPU_REG_REFPIC_MK_LEN(v) (((v) << 16) & GENMASK(26, 16))
+#define VDPU_REG_IDR_PIC_ID(v) (((v) << 0) & GENMASK(15, 0))
+
+#define VDPU_REG_PPS_ID(v) (((v) << 24) & GENMASK(31, 24))
+#define VDPU_REG_REFIDX1_ACTIVE(v) (((v) << 19) & GENMASK(23, 19))
+#define VDPU_REG_REFIDX0_ACTIVE(v) (((v) << 14) & GENMASK(18, 14))
+#define VDPU_REG_POC_LENGTH(v) (((v) << 0) & GENMASK(7, 0))
+
+#define VDPU_REG_IDR_PIC_E(v) ((v) ? BIT(8) : 0)
+#define VDPU_REG_DIR_8X8_INFER_E(v) ((v) ? BIT(7) : 0)
+#define VDPU_REG_BLACKWHITE_E(v) ((v) ? BIT(6) : 0)
+#define VDPU_REG_CABAC_E(v) ((v) ? BIT(5) : 0)
+#define VDPU_REG_WEIGHT_PRED_E(v) ((v) ? BIT(4) : 0)
+#define VDPU_REG_CONST_INTRA_E(v) ((v) ? BIT(3) : 0)
+#define VDPU_REG_8X8TRANS_FLAG_E(v) ((v) ? BIT(2) : 0)
+#define VDPU_REG_TYPE1_QUANT_E(v) ((v) ? BIT(1) : 0)
+#define VDPU_REG_FIELDPIC_FLAG_E(v) ((v) ? BIT(0) : 0)
+
+static void set_params(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *src_buf)
+{
+ const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
+ const struct v4l2_ctrl_h264_decode_params *dec_param = ctrls->decode;
+ const struct v4l2_ctrl_h264_sps *sps = ctrls->sps;
+ const struct v4l2_ctrl_h264_pps *pps = ctrls->pps;
+ struct hantro_dev *vpu = ctx->dev;
+ u32 reg;
+
+ reg = VDPU_REG_DEC_ADV_PRE_DIS(0) |
+ VDPU_REG_DEC_SCMD_DIS(0) |
+ VDPU_REG_FILTERING_DIS(0) |
+ VDPU_REG_PIC_FIXED_QUANT(0) |
+ VDPU_REG_DEC_LATENCY(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(50));
+
+ reg = VDPU_REG_INIT_QP(pps->pic_init_qp_minus26 + 26) |
+ VDPU_REG_STREAM_LEN(vb2_get_plane_payload(&src_buf->vb2_buf, 0));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(51));
+
+ reg = VDPU_REG_APF_THRESHOLD(8) |
+ VDPU_REG_STARTMB_X(0) |
+ VDPU_REG_STARTMB_Y(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(52));
+
+ reg = VDPU_REG_DEC_MODE(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(53));
+
+ reg = VDPU_REG_DEC_STRENDIAN_E(1) |
+ VDPU_REG_DEC_STRSWAP32_E(1) |
+ VDPU_REG_DEC_OUTSWAP32_E(1) |
+ VDPU_REG_DEC_INSWAP32_E(1) |
+ VDPU_REG_DEC_OUT_ENDIAN(1) |
+ VDPU_REG_DEC_IN_ENDIAN(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(54));
+
+ reg = VDPU_REG_DEC_DATA_DISC_E(0) |
+ VDPU_REG_DEC_MAX_BURST(16) |
+ VDPU_REG_DEC_AXI_WR_ID(0) |
+ VDPU_REG_DEC_AXI_RD_ID(0xff);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(56));
+
+ reg = VDPU_REG_START_CODE_E(1) |
+ VDPU_REG_CH_8PIX_ILEAV_E(0) |
+ VDPU_REG_RLC_MODE_E(0) |
+ VDPU_REG_PIC_INTERLACE_E(!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY) &&
+ (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD ||
+ dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC)) |
+ VDPU_REG_PIC_FIELDMODE_E(dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC) |
+ VDPU_REG_PIC_TOPFIELD_E(!(dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)) |
+ VDPU_REG_WRITE_MVS_E((sps->profile_idc > 66) && dec_param->nal_ref_idc) |
+ VDPU_REG_SEQ_MBAFF_E(sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD) |
+ VDPU_REG_PICORD_COUNT_E(sps->profile_idc > 66) |
+ VDPU_REG_DEC_TIMEOUT_E(1) |
+ VDPU_REG_DEC_CLK_GATE_E(1);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(57));
+
+ reg = VDPU_REG_PRED_BC_TAP_0_0(1) |
+ VDPU_REG_PRED_BC_TAP_0_1((u32)-5) |
+ VDPU_REG_PRED_BC_TAP_0_2(20);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(59));
+
+ reg = VDPU_REG_REFBU_E(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(65));
+
+ reg = VDPU_REG_STRM_START_BIT(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(109));
+
+ reg = VDPU_REG_CH_QP_OFFSET2(pps->second_chroma_qp_index_offset) |
+ VDPU_REG_CH_QP_OFFSET(pps->chroma_qp_index_offset) |
+ VDPU_REG_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->src_fmt.height)) |
+ VDPU_REG_PIC_MB_WIDTH(MB_WIDTH(ctx->src_fmt.width));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(110));
+
+ reg = VDPU_REG_WEIGHT_BIPR_IDC(pps->weighted_bipred_idc) |
+ VDPU_REG_REF_FRAMES(sps->max_num_ref_frames);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(111));
+
+ reg = VDPU_REG_FILT_CTRL_PRES(pps->flags & V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT) |
+ VDPU_REG_RDPIC_CNT_PRES(pps->flags & V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT) |
+ VDPU_REG_FRAMENUM_LEN(sps->log2_max_frame_num_minus4 + 4) |
+ VDPU_REG_FRAMENUM(dec_param->frame_num);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(112));
+
+ reg = VDPU_REG_REFPIC_MK_LEN(dec_param->dec_ref_pic_marking_bit_size) |
+ VDPU_REG_IDR_PIC_ID(dec_param->idr_pic_id);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(113));
+
+ reg = VDPU_REG_PPS_ID(pps->pic_parameter_set_id) |
+ VDPU_REG_REFIDX1_ACTIVE(pps->num_ref_idx_l1_default_active_minus1 + 1) |
+ VDPU_REG_REFIDX0_ACTIVE(pps->num_ref_idx_l0_default_active_minus1 + 1) |
+ VDPU_REG_POC_LENGTH(dec_param->pic_order_cnt_bit_size);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(114));
+
+ reg = VDPU_REG_IDR_PIC_E(dec_param->flags & V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC) |
+ VDPU_REG_DIR_8X8_INFER_E(sps->flags & V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE) |
+ VDPU_REG_BLACKWHITE_E(sps->profile_idc >= 100 && sps->chroma_format_idc == 0) |
+ VDPU_REG_CABAC_E(pps->flags & V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE) |
+ VDPU_REG_WEIGHT_PRED_E(pps->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED) |
+ VDPU_REG_CONST_INTRA_E(pps->flags & V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED) |
+ VDPU_REG_8X8TRANS_FLAG_E(pps->flags & V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE) |
+ VDPU_REG_TYPE1_QUANT_E(pps->flags & V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT) |
+ VDPU_REG_FIELDPIC_FLAG_E(!(sps->flags & V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(115));
+}
+
+static void set_ref(struct hantro_ctx *ctx)
+{
+ const struct v4l2_h264_reference *b0_reflist, *b1_reflist, *p_reflist;
+ struct hantro_dev *vpu = ctx->dev;
+ u32 reg;
+ int i;
+
+ b0_reflist = ctx->h264_dec.reflists.b0;
+ b1_reflist = ctx->h264_dec.reflists.b1;
+ p_reflist = ctx->h264_dec.reflists.p;
+
+ reg = VDPU_REG_PINIT_RLIST_F9(p_reflist[9].index) |
+ VDPU_REG_PINIT_RLIST_F8(p_reflist[8].index) |
+ VDPU_REG_PINIT_RLIST_F7(p_reflist[7].index) |
+ VDPU_REG_PINIT_RLIST_F6(p_reflist[6].index) |
+ VDPU_REG_PINIT_RLIST_F5(p_reflist[5].index) |
+ VDPU_REG_PINIT_RLIST_F4(p_reflist[4].index);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(74));
+
+ reg = VDPU_REG_PINIT_RLIST_F15(p_reflist[15].index) |
+ VDPU_REG_PINIT_RLIST_F14(p_reflist[14].index) |
+ VDPU_REG_PINIT_RLIST_F13(p_reflist[13].index) |
+ VDPU_REG_PINIT_RLIST_F12(p_reflist[12].index) |
+ VDPU_REG_PINIT_RLIST_F11(p_reflist[11].index) |
+ VDPU_REG_PINIT_RLIST_F10(p_reflist[10].index);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(75));
+
+ reg = VDPU_REG_REFER1_NBR(hantro_h264_get_ref_nbr(ctx, 1)) |
+ VDPU_REG_REFER0_NBR(hantro_h264_get_ref_nbr(ctx, 0));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(76));
+
+ reg = VDPU_REG_REFER3_NBR(hantro_h264_get_ref_nbr(ctx, 3)) |
+ VDPU_REG_REFER2_NBR(hantro_h264_get_ref_nbr(ctx, 2));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(77));
+
+ reg = VDPU_REG_REFER5_NBR(hantro_h264_get_ref_nbr(ctx, 5)) |
+ VDPU_REG_REFER4_NBR(hantro_h264_get_ref_nbr(ctx, 4));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(78));
+
+ reg = VDPU_REG_REFER7_NBR(hantro_h264_get_ref_nbr(ctx, 7)) |
+ VDPU_REG_REFER6_NBR(hantro_h264_get_ref_nbr(ctx, 6));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(79));
+
+ reg = VDPU_REG_REFER9_NBR(hantro_h264_get_ref_nbr(ctx, 9)) |
+ VDPU_REG_REFER8_NBR(hantro_h264_get_ref_nbr(ctx, 8));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(80));
+
+ reg = VDPU_REG_REFER11_NBR(hantro_h264_get_ref_nbr(ctx, 11)) |
+ VDPU_REG_REFER10_NBR(hantro_h264_get_ref_nbr(ctx, 10));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(81));
+
+ reg = VDPU_REG_REFER13_NBR(hantro_h264_get_ref_nbr(ctx, 13)) |
+ VDPU_REG_REFER12_NBR(hantro_h264_get_ref_nbr(ctx, 12));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(82));
+
+ reg = VDPU_REG_REFER15_NBR(hantro_h264_get_ref_nbr(ctx, 15)) |
+ VDPU_REG_REFER14_NBR(hantro_h264_get_ref_nbr(ctx, 14));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(83));
+
+ reg = VDPU_REG_BINIT_RLIST_F5(b0_reflist[5].index) |
+ VDPU_REG_BINIT_RLIST_F4(b0_reflist[4].index) |
+ VDPU_REG_BINIT_RLIST_F3(b0_reflist[3].index) |
+ VDPU_REG_BINIT_RLIST_F2(b0_reflist[2].index) |
+ VDPU_REG_BINIT_RLIST_F1(b0_reflist[1].index) |
+ VDPU_REG_BINIT_RLIST_F0(b0_reflist[0].index);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(100));
+
+ reg = VDPU_REG_BINIT_RLIST_F11(b0_reflist[11].index) |
+ VDPU_REG_BINIT_RLIST_F10(b0_reflist[10].index) |
+ VDPU_REG_BINIT_RLIST_F9(b0_reflist[9].index) |
+ VDPU_REG_BINIT_RLIST_F8(b0_reflist[8].index) |
+ VDPU_REG_BINIT_RLIST_F7(b0_reflist[7].index) |
+ VDPU_REG_BINIT_RLIST_F6(b0_reflist[6].index);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(101));
+
+ reg = VDPU_REG_BINIT_RLIST_F15(b0_reflist[15].index) |
+ VDPU_REG_BINIT_RLIST_F14(b0_reflist[14].index) |
+ VDPU_REG_BINIT_RLIST_F13(b0_reflist[13].index) |
+ VDPU_REG_BINIT_RLIST_F12(b0_reflist[12].index);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(102));
+
+ reg = VDPU_REG_BINIT_RLIST_B5(b1_reflist[5].index) |
+ VDPU_REG_BINIT_RLIST_B4(b1_reflist[4].index) |
+ VDPU_REG_BINIT_RLIST_B3(b1_reflist[3].index) |
+ VDPU_REG_BINIT_RLIST_B2(b1_reflist[2].index) |
+ VDPU_REG_BINIT_RLIST_B1(b1_reflist[1].index) |
+ VDPU_REG_BINIT_RLIST_B0(b1_reflist[0].index);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(103));
+
+ reg = VDPU_REG_BINIT_RLIST_B11(b1_reflist[11].index) |
+ VDPU_REG_BINIT_RLIST_B10(b1_reflist[10].index) |
+ VDPU_REG_BINIT_RLIST_B9(b1_reflist[9].index) |
+ VDPU_REG_BINIT_RLIST_B8(b1_reflist[8].index) |
+ VDPU_REG_BINIT_RLIST_B7(b1_reflist[7].index) |
+ VDPU_REG_BINIT_RLIST_B6(b1_reflist[6].index);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(104));
+
+ reg = VDPU_REG_BINIT_RLIST_B15(b1_reflist[15].index) |
+ VDPU_REG_BINIT_RLIST_B14(b1_reflist[14].index) |
+ VDPU_REG_BINIT_RLIST_B13(b1_reflist[13].index) |
+ VDPU_REG_BINIT_RLIST_B12(b1_reflist[12].index);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(105));
+
+ reg = VDPU_REG_PINIT_RLIST_F3(p_reflist[3].index) |
+ VDPU_REG_PINIT_RLIST_F2(p_reflist[2].index) |
+ VDPU_REG_PINIT_RLIST_F1(p_reflist[1].index) |
+ VDPU_REG_PINIT_RLIST_F0(p_reflist[0].index);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(106));
+
+ reg = VDPU_REG_REFER_LTERM_E(ctx->h264_dec.dpb_longterm);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(107));
+
+ reg = VDPU_REG_REFER_VALID_E(ctx->h264_dec.dpb_valid);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(108));
+
+ /* Set up addresses of DPB buffers. */
+ for (i = 0; i < HANTRO_H264_DPB_SIZE; i++) {
+ dma_addr_t dma_addr = hantro_h264_get_ref_buf(ctx, i);
+
+ vdpu_write_relaxed(vpu, dma_addr, VDPU_REG_REFER_BASE(i));
+ }
+}
+
+static void set_buffers(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *src_buf)
+{
+ const struct hantro_h264_dec_ctrls *ctrls = &ctx->h264_dec.ctrls;
+ struct vb2_v4l2_buffer *dst_buf;
+ struct hantro_dev *vpu = ctx->dev;
+ dma_addr_t src_dma, dst_dma;
+ size_t offset = 0;
+
+ /* Source (stream) buffer. */
+ src_dma = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ vdpu_write_relaxed(vpu, src_dma, VDPU_REG_RLC_VLC_BASE);
+
+ /* Destination (decoded frame) buffer. */
+ dst_buf = hantro_get_dst_buf(ctx);
+ dst_dma = hantro_get_dec_buf_addr(ctx, &dst_buf->vb2_buf);
+ /* Adjust dma addr to start at second line for bottom field */
+ if (ctrls->decode->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)
+ offset = ALIGN(ctx->src_fmt.width, MB_DIM);
+ vdpu_write_relaxed(vpu, dst_dma + offset, VDPU_REG_DEC_OUT_BASE);
+
+ /* Higher profiles require DMV buffer appended to reference frames. */
+ if (ctrls->sps->profile_idc > 66 && ctrls->decode->nal_ref_idc) {
+ unsigned int bytes_per_mb = 384;
+
+ /* DMV buffer for monochrome start directly after Y-plane */
+ if (ctrls->sps->profile_idc >= 100 &&
+ ctrls->sps->chroma_format_idc == 0)
+ bytes_per_mb = 256;
+ offset = bytes_per_mb * MB_WIDTH(ctx->src_fmt.width) *
+ MB_HEIGHT(ctx->src_fmt.height);
+
+ /*
+ * DMV buffer is split in two for field encoded frames,
+ * adjust offset for bottom field
+ */
+ if (ctrls->decode->flags & V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD)
+ offset += 32 * MB_WIDTH(ctx->src_fmt.width) *
+ MB_HEIGHT(ctx->src_fmt.height);
+ vdpu_write_relaxed(vpu, dst_dma + offset, VDPU_REG_DIR_MV_BASE);
+ }
+
+ /* Auxiliary buffer prepared in hantro_g1_h264_dec_prepare_table(). */
+ vdpu_write_relaxed(vpu, ctx->h264_dec.priv.dma, VDPU_REG_QTABLE_BASE);
+}
+
+int rockchip_vpu2_h264_dec_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf;
+ u32 reg;
+ int ret;
+
+ /* Prepare the H264 decoder context. */
+ ret = hantro_h264_dec_prepare_run(ctx);
+ if (ret)
+ return ret;
+
+ src_buf = hantro_get_src_buf(ctx);
+ set_params(ctx, src_buf);
+ set_ref(ctx);
+ set_buffers(ctx, src_buf);
+
+ hantro_end_prepare_run(ctx);
+
+ /* Start decoding! */
+ reg = vdpu_read(vpu, VDPU_SWREG(57)) | VDPU_REG_DEC_E(1);
+ vdpu_write(vpu, reg, VDPU_SWREG(57));
+
+ return 0;
+}
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu2_hw_jpeg_enc.c b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_jpeg_enc.c
new file mode 100644
index 000000000000..8395c4d48dd0
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_jpeg_enc.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ *
+ * JPEG encoder
+ * ------------
+ * The VPU JPEG encoder produces JPEG baseline sequential format.
+ * The quantization coefficients are 8-bit values, complying with
+ * the baseline specification. Therefore, it requires
+ * luma and chroma quantization tables. The hardware does entropy
+ * encoding using internal Huffman tables, as specified in the JPEG
+ * specification.
+ *
+ * In other words, only the luma and chroma quantization tables are
+ * required for the encoding operation.
+ *
+ * Quantization luma table values are written to registers
+ * VEPU_swreg_0-VEPU_swreg_15, and chroma table values to
+ * VEPU_swreg_16-VEPU_swreg_31. A special order is needed, neither
+ * zigzag, nor linear.
+ */
+
+#include <asm/unaligned.h>
+#include <media/v4l2-mem2mem.h>
+#include "hantro_jpeg.h"
+#include "hantro.h"
+#include "hantro_v4l2.h"
+#include "hantro_hw.h"
+#include "rockchip_vpu2_regs.h"
+
+#define VEPU_JPEG_QUANT_TABLE_COUNT 16
+
+static void rockchip_vpu2_set_src_img_ctrl(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx)
+{
+ u32 overfill_r, overfill_b;
+ u32 reg;
+
+ /*
+ * The format width and height are already macroblock aligned
+ * by .vidioc_s_fmt_vid_cap_mplane() callback. Destination
+ * format width and height can be further modified by
+ * .vidioc_s_selection(), and the width is 4-aligned.
+ */
+ overfill_r = ctx->src_fmt.width - ctx->dst_fmt.width;
+ overfill_b = ctx->src_fmt.height - ctx->dst_fmt.height;
+
+ reg = VEPU_REG_IN_IMG_CTRL_ROW_LEN(ctx->src_fmt.width);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_INPUT_LUMA_INFO);
+
+ reg = VEPU_REG_IN_IMG_CTRL_OVRFLR_D4(overfill_r / 4) |
+ VEPU_REG_IN_IMG_CTRL_OVRFLB(overfill_b);
+ /*
+ * This register controls the input crop, as the offset
+ * from the right/bottom within the last macroblock. The offset from the
+ * right must be divided by 4 and so the crop must be aligned to 4 pixels
+ * horizontally.
+ */
+ vepu_write_relaxed(vpu, reg, VEPU_REG_ENC_OVER_FILL_STRM_OFFSET);
+
+ reg = VEPU_REG_IN_IMG_CTRL_FMT(ctx->vpu_src_fmt->enc_fmt);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_ENC_CTRL1);
+}
+
+static void rockchip_vpu2_jpeg_enc_set_buffers(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ struct vb2_buffer *src_buf,
+ struct vb2_buffer *dst_buf)
+{
+ struct v4l2_pix_format_mplane *pix_fmt = &ctx->src_fmt;
+ dma_addr_t src[3];
+ u32 size_left;
+
+ size_left = vb2_plane_size(dst_buf, 0) - ctx->vpu_dst_fmt->header_size;
+ if (WARN_ON(vb2_plane_size(dst_buf, 0) < ctx->vpu_dst_fmt->header_size))
+ size_left = 0;
+
+ WARN_ON(pix_fmt->num_planes > 3);
+
+ vepu_write_relaxed(vpu, vb2_dma_contig_plane_dma_addr(dst_buf, 0) +
+ ctx->vpu_dst_fmt->header_size,
+ VEPU_REG_ADDR_OUTPUT_STREAM);
+ vepu_write_relaxed(vpu, size_left, VEPU_REG_STR_BUF_LIMIT);
+
+ if (pix_fmt->num_planes == 1) {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
+ } else if (pix_fmt->num_planes == 2) {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
+ vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
+ vepu_write_relaxed(vpu, src[1], VEPU_REG_ADDR_IN_PLANE_1);
+ } else {
+ src[0] = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ src[1] = vb2_dma_contig_plane_dma_addr(src_buf, 1);
+ src[2] = vb2_dma_contig_plane_dma_addr(src_buf, 2);
+ vepu_write_relaxed(vpu, src[0], VEPU_REG_ADDR_IN_PLANE_0);
+ vepu_write_relaxed(vpu, src[1], VEPU_REG_ADDR_IN_PLANE_1);
+ vepu_write_relaxed(vpu, src[2], VEPU_REG_ADDR_IN_PLANE_2);
+ }
+}
+
+static void
+rockchip_vpu2_jpeg_enc_set_qtable(struct hantro_dev *vpu,
+ unsigned char *luma_qtable,
+ unsigned char *chroma_qtable)
+{
+ u32 reg, i;
+ __be32 *luma_qtable_p;
+ __be32 *chroma_qtable_p;
+
+ luma_qtable_p = (__be32 *)luma_qtable;
+ chroma_qtable_p = (__be32 *)chroma_qtable;
+
+ /*
+ * Quantization table registers must be written in contiguous blocks.
+ * DO NOT collapse the below two "for" loops into one.
+ */
+ for (i = 0; i < VEPU_JPEG_QUANT_TABLE_COUNT; i++) {
+ reg = get_unaligned_be32(&luma_qtable_p[i]);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_LUMA_QUAT(i));
+ }
+
+ for (i = 0; i < VEPU_JPEG_QUANT_TABLE_COUNT; i++) {
+ reg = get_unaligned_be32(&chroma_qtable_p[i]);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_JPEG_CHROMA_QUAT(i));
+ }
+}
+
+int rockchip_vpu2_jpeg_enc_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct hantro_jpeg_ctx jpeg_ctx;
+ u32 reg;
+
+ src_buf = hantro_get_src_buf(ctx);
+ dst_buf = hantro_get_dst_buf(ctx);
+
+ hantro_start_prepare_run(ctx);
+
+ memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
+ jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
+ if (!jpeg_ctx.buffer)
+ return -ENOMEM;
+
+ jpeg_ctx.width = ctx->dst_fmt.width;
+ jpeg_ctx.height = ctx->dst_fmt.height;
+ jpeg_ctx.quality = ctx->jpeg_quality;
+ hantro_jpeg_header_assemble(&jpeg_ctx);
+
+ /* Switch to JPEG encoder mode before writing registers */
+ vepu_write_relaxed(vpu, VEPU_REG_ENCODE_FORMAT_JPEG,
+ VEPU_REG_ENCODE_START);
+
+ rockchip_vpu2_set_src_img_ctrl(vpu, ctx);
+ rockchip_vpu2_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf,
+ &dst_buf->vb2_buf);
+ rockchip_vpu2_jpeg_enc_set_qtable(vpu, jpeg_ctx.hw_luma_qtable,
+ jpeg_ctx.hw_chroma_qtable);
+
+ reg = VEPU_REG_OUTPUT_SWAP32
+ | VEPU_REG_OUTPUT_SWAP16
+ | VEPU_REG_OUTPUT_SWAP8
+ | VEPU_REG_INPUT_SWAP8
+ | VEPU_REG_INPUT_SWAP16
+ | VEPU_REG_INPUT_SWAP32;
+ /* Make sure that all registers are written at this point. */
+ vepu_write(vpu, reg, VEPU_REG_DATA_ENDIAN);
+
+ reg = VEPU_REG_AXI_CTRL_BURST_LEN(16);
+ vepu_write_relaxed(vpu, reg, VEPU_REG_AXI_CTRL);
+
+ reg = VEPU_REG_MB_WIDTH(MB_WIDTH(ctx->src_fmt.width))
+ | VEPU_REG_MB_HEIGHT(MB_HEIGHT(ctx->src_fmt.height))
+ | VEPU_REG_FRAME_TYPE_INTRA
+ | VEPU_REG_ENCODE_FORMAT_JPEG
+ | VEPU_REG_ENCODE_ENABLE;
+
+ /* Kick the watchdog and start encoding */
+ hantro_end_prepare_run(ctx);
+ vepu_write(vpu, reg, VEPU_REG_ENCODE_START);
+
+ return 0;
+}
+
+void rockchip_vpu2_jpeg_enc_done(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ u32 bytesused = vepu_read(vpu, VEPU_REG_STR_BUF_LIMIT) / 8;
+ struct vb2_v4l2_buffer *dst_buf = hantro_get_dst_buf(ctx);
+
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0,
+ ctx->vpu_dst_fmt->header_size + bytesused);
+}
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c
new file mode 100644
index 000000000000..b66737fab46b
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_mpeg2_dec.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/bitfield.h>
+#include <media/v4l2-mem2mem.h>
+#include "hantro.h"
+#include "hantro_hw.h"
+
+#define VDPU_SWREG(nr) ((nr) * 4)
+
+#define VDPU_REG_DEC_OUT_BASE VDPU_SWREG(63)
+#define VDPU_REG_RLC_VLC_BASE VDPU_SWREG(64)
+#define VDPU_REG_QTABLE_BASE VDPU_SWREG(61)
+#define VDPU_REG_REFER0_BASE VDPU_SWREG(131)
+#define VDPU_REG_REFER2_BASE VDPU_SWREG(134)
+#define VDPU_REG_REFER3_BASE VDPU_SWREG(135)
+#define VDPU_REG_REFER1_BASE VDPU_SWREG(148)
+#define VDPU_REG_DEC_E(v) ((v) ? BIT(0) : 0)
+
+#define VDPU_REG_DEC_ADV_PRE_DIS(v) ((v) ? BIT(11) : 0)
+#define VDPU_REG_DEC_SCMD_DIS(v) ((v) ? BIT(10) : 0)
+#define VDPU_REG_FILTERING_DIS(v) ((v) ? BIT(8) : 0)
+#define VDPU_REG_DEC_LATENCY(v) (((v) << 1) & GENMASK(6, 1))
+
+#define VDPU_REG_INIT_QP(v) (((v) << 25) & GENMASK(30, 25))
+#define VDPU_REG_STREAM_LEN(v) (((v) << 0) & GENMASK(23, 0))
+
+#define VDPU_REG_APF_THRESHOLD(v) (((v) << 17) & GENMASK(30, 17))
+#define VDPU_REG_STARTMB_X(v) (((v) << 8) & GENMASK(16, 8))
+#define VDPU_REG_STARTMB_Y(v) (((v) << 0) & GENMASK(7, 0))
+
+#define VDPU_REG_DEC_MODE(v) (((v) << 0) & GENMASK(3, 0))
+
+#define VDPU_REG_DEC_STRENDIAN_E(v) ((v) ? BIT(5) : 0)
+#define VDPU_REG_DEC_STRSWAP32_E(v) ((v) ? BIT(4) : 0)
+#define VDPU_REG_DEC_OUTSWAP32_E(v) ((v) ? BIT(3) : 0)
+#define VDPU_REG_DEC_INSWAP32_E(v) ((v) ? BIT(2) : 0)
+#define VDPU_REG_DEC_OUT_ENDIAN(v) ((v) ? BIT(1) : 0)
+#define VDPU_REG_DEC_IN_ENDIAN(v) ((v) ? BIT(0) : 0)
+
+#define VDPU_REG_DEC_DATA_DISC_E(v) ((v) ? BIT(22) : 0)
+#define VDPU_REG_DEC_MAX_BURST(v) (((v) << 16) & GENMASK(20, 16))
+#define VDPU_REG_DEC_AXI_WR_ID(v) (((v) << 8) & GENMASK(15, 8))
+#define VDPU_REG_DEC_AXI_RD_ID(v) (((v) << 0) & GENMASK(7, 0))
+
+#define VDPU_REG_RLC_MODE_E(v) ((v) ? BIT(20) : 0)
+#define VDPU_REG_PIC_INTERLACE_E(v) ((v) ? BIT(17) : 0)
+#define VDPU_REG_PIC_FIELDMODE_E(v) ((v) ? BIT(16) : 0)
+#define VDPU_REG_PIC_B_E(v) ((v) ? BIT(15) : 0)
+#define VDPU_REG_PIC_INTER_E(v) ((v) ? BIT(14) : 0)
+#define VDPU_REG_PIC_TOPFIELD_E(v) ((v) ? BIT(13) : 0)
+#define VDPU_REG_FWD_INTERLACE_E(v) ((v) ? BIT(12) : 0)
+#define VDPU_REG_WRITE_MVS_E(v) ((v) ? BIT(10) : 0)
+#define VDPU_REG_DEC_TIMEOUT_E(v) ((v) ? BIT(5) : 0)
+#define VDPU_REG_DEC_CLK_GATE_E(v) ((v) ? BIT(4) : 0)
+
+#define VDPU_REG_PIC_MB_WIDTH(v) (((v) << 23) & GENMASK(31, 23))
+#define VDPU_REG_PIC_MB_HEIGHT_P(v) (((v) << 11) & GENMASK(18, 11))
+#define VDPU_REG_ALT_SCAN_E(v) ((v) ? BIT(6) : 0)
+#define VDPU_REG_TOPFIELDFIRST_E(v) ((v) ? BIT(5) : 0)
+
+#define VDPU_REG_STRM_START_BIT(v) (((v) << 26) & GENMASK(31, 26))
+#define VDPU_REG_QSCALE_TYPE(v) ((v) ? BIT(24) : 0)
+#define VDPU_REG_CON_MV_E(v) ((v) ? BIT(4) : 0)
+#define VDPU_REG_INTRA_DC_PREC(v) (((v) << 2) & GENMASK(3, 2))
+#define VDPU_REG_INTRA_VLC_TAB(v) ((v) ? BIT(1) : 0)
+#define VDPU_REG_FRAME_PRED_DCT(v) ((v) ? BIT(0) : 0)
+
+#define VDPU_REG_ALT_SCAN_FLAG_E(v) ((v) ? BIT(19) : 0)
+#define VDPU_REG_FCODE_FWD_HOR(v) (((v) << 15) & GENMASK(18, 15))
+#define VDPU_REG_FCODE_FWD_VER(v) (((v) << 11) & GENMASK(14, 11))
+#define VDPU_REG_FCODE_BWD_HOR(v) (((v) << 7) & GENMASK(10, 7))
+#define VDPU_REG_FCODE_BWD_VER(v) (((v) << 3) & GENMASK(6, 3))
+#define VDPU_REG_MV_ACCURACY_FWD(v) ((v) ? BIT(2) : 0)
+#define VDPU_REG_MV_ACCURACY_BWD(v) ((v) ? BIT(1) : 0)
+
+static void
+rockchip_vpu2_mpeg2_dec_set_quantisation(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx)
+{
+ struct v4l2_ctrl_mpeg2_quantisation *q;
+
+ q = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_MPEG2_QUANTISATION);
+ hantro_mpeg2_dec_copy_qtable(ctx->mpeg2_dec.qtable.cpu, q);
+ vdpu_write_relaxed(vpu, ctx->mpeg2_dec.qtable.dma, VDPU_REG_QTABLE_BASE);
+}
+
+static void
+rockchip_vpu2_mpeg2_dec_set_buffers(struct hantro_dev *vpu,
+ struct hantro_ctx *ctx,
+ struct vb2_buffer *src_buf,
+ struct vb2_buffer *dst_buf,
+ const struct v4l2_ctrl_mpeg2_sequence *seq,
+ const struct v4l2_ctrl_mpeg2_picture *pic)
+{
+ dma_addr_t forward_addr = 0, backward_addr = 0;
+ dma_addr_t current_addr, addr;
+
+ switch (pic->picture_coding_type) {
+ case V4L2_MPEG2_PIC_CODING_TYPE_B:
+ backward_addr = hantro_get_ref(ctx, pic->backward_ref_ts);
+ fallthrough;
+ case V4L2_MPEG2_PIC_CODING_TYPE_P:
+ forward_addr = hantro_get_ref(ctx, pic->forward_ref_ts);
+ }
+
+ /* Source bitstream buffer */
+ addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
+ vdpu_write_relaxed(vpu, addr, VDPU_REG_RLC_VLC_BASE);
+
+ /* Destination frame buffer */
+ addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ current_addr = addr;
+
+ if (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD)
+ addr += ALIGN(ctx->dst_fmt.width, 16);
+ vdpu_write_relaxed(vpu, addr, VDPU_REG_DEC_OUT_BASE);
+
+ if (!forward_addr)
+ forward_addr = current_addr;
+ if (!backward_addr)
+ backward_addr = current_addr;
+
+ /* Set forward ref frame (top/bottom field) */
+ if (pic->picture_structure == V4L2_MPEG2_PIC_FRAME ||
+ pic->picture_coding_type == V4L2_MPEG2_PIC_CODING_TYPE_B ||
+ (pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD &&
+ pic->flags & V4L2_MPEG2_PIC_TOP_FIELD) ||
+ (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD &&
+ !(pic->flags & V4L2_MPEG2_PIC_TOP_FIELD))) {
+ vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER1_BASE);
+ } else if (pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD) {
+ vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, current_addr, VDPU_REG_REFER1_BASE);
+ } else if (pic->picture_structure == V4L2_MPEG2_PIC_BOTTOM_FIELD) {
+ vdpu_write_relaxed(vpu, current_addr, VDPU_REG_REFER0_BASE);
+ vdpu_write_relaxed(vpu, forward_addr, VDPU_REG_REFER1_BASE);
+ }
+
+ /* Set backward ref frame (top/bottom field) */
+ vdpu_write_relaxed(vpu, backward_addr, VDPU_REG_REFER2_BASE);
+ vdpu_write_relaxed(vpu, backward_addr, VDPU_REG_REFER3_BASE);
+}
+
+int rockchip_vpu2_mpeg2_dec_run(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ const struct v4l2_ctrl_mpeg2_sequence *seq;
+ const struct v4l2_ctrl_mpeg2_picture *pic;
+ u32 reg;
+
+ src_buf = hantro_get_src_buf(ctx);
+ dst_buf = hantro_get_dst_buf(ctx);
+
+ hantro_start_prepare_run(ctx);
+
+ seq = hantro_get_ctrl(ctx,
+ V4L2_CID_STATELESS_MPEG2_SEQUENCE);
+ pic = hantro_get_ctrl(ctx,
+ V4L2_CID_STATELESS_MPEG2_PICTURE);
+
+ reg = VDPU_REG_DEC_ADV_PRE_DIS(0) |
+ VDPU_REG_DEC_SCMD_DIS(0) |
+ VDPU_REG_FILTERING_DIS(1) |
+ VDPU_REG_DEC_LATENCY(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(50));
+
+ reg = VDPU_REG_INIT_QP(1) |
+ VDPU_REG_STREAM_LEN(vb2_get_plane_payload(&src_buf->vb2_buf, 0));
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(51));
+
+ reg = VDPU_REG_APF_THRESHOLD(8) |
+ VDPU_REG_STARTMB_X(0) |
+ VDPU_REG_STARTMB_Y(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(52));
+
+ reg = VDPU_REG_DEC_MODE(5);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(53));
+
+ reg = VDPU_REG_DEC_STRENDIAN_E(1) |
+ VDPU_REG_DEC_STRSWAP32_E(1) |
+ VDPU_REG_DEC_OUTSWAP32_E(1) |
+ VDPU_REG_DEC_INSWAP32_E(1) |
+ VDPU_REG_DEC_OUT_ENDIAN(1) |
+ VDPU_REG_DEC_IN_ENDIAN(1);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(54));
+
+ reg = VDPU_REG_DEC_DATA_DISC_E(0) |
+ VDPU_REG_DEC_MAX_BURST(16) |
+ VDPU_REG_DEC_AXI_WR_ID(0) |
+ VDPU_REG_DEC_AXI_RD_ID(0);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(56));
+
+ reg = VDPU_REG_RLC_MODE_E(0) |
+ VDPU_REG_PIC_INTERLACE_E(!(seq->flags & V4L2_MPEG2_SEQ_FLAG_PROGRESSIVE)) |
+ VDPU_REG_PIC_FIELDMODE_E(pic->picture_structure != V4L2_MPEG2_PIC_FRAME) |
+ VDPU_REG_PIC_B_E(pic->picture_coding_type == V4L2_MPEG2_PIC_CODING_TYPE_B) |
+ VDPU_REG_PIC_INTER_E(pic->picture_coding_type != V4L2_MPEG2_PIC_CODING_TYPE_I) |
+ VDPU_REG_PIC_TOPFIELD_E(pic->picture_structure == V4L2_MPEG2_PIC_TOP_FIELD) |
+ VDPU_REG_FWD_INTERLACE_E(0) |
+ VDPU_REG_WRITE_MVS_E(0) |
+ VDPU_REG_DEC_TIMEOUT_E(1) |
+ VDPU_REG_DEC_CLK_GATE_E(1);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(57));
+
+ reg = VDPU_REG_PIC_MB_WIDTH(MB_WIDTH(ctx->dst_fmt.width)) |
+ VDPU_REG_PIC_MB_HEIGHT_P(MB_HEIGHT(ctx->dst_fmt.height)) |
+ VDPU_REG_ALT_SCAN_E(pic->flags & V4L2_MPEG2_PIC_FLAG_ALT_SCAN) |
+ VDPU_REG_TOPFIELDFIRST_E(pic->flags & V4L2_MPEG2_PIC_FLAG_TOP_FIELD_FIRST);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(120));
+
+ reg = VDPU_REG_STRM_START_BIT(0) |
+ VDPU_REG_QSCALE_TYPE(pic->flags & V4L2_MPEG2_PIC_FLAG_Q_SCALE_TYPE) |
+ VDPU_REG_CON_MV_E(pic->flags & V4L2_MPEG2_PIC_FLAG_CONCEALMENT_MV) |
+ VDPU_REG_INTRA_DC_PREC(pic->intra_dc_precision) |
+ VDPU_REG_INTRA_VLC_TAB(pic->flags & V4L2_MPEG2_PIC_FLAG_INTRA_VLC) |
+ VDPU_REG_FRAME_PRED_DCT(pic->flags & V4L2_MPEG2_PIC_FLAG_FRAME_PRED_DCT);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(122));
+
+ reg = VDPU_REG_ALT_SCAN_FLAG_E(pic->flags & V4L2_MPEG2_PIC_FLAG_ALT_SCAN) |
+ VDPU_REG_FCODE_FWD_HOR(pic->f_code[0][0]) |
+ VDPU_REG_FCODE_FWD_VER(pic->f_code[0][1]) |
+ VDPU_REG_FCODE_BWD_HOR(pic->f_code[1][0]) |
+ VDPU_REG_FCODE_BWD_VER(pic->f_code[1][1]) |
+ VDPU_REG_MV_ACCURACY_FWD(1) |
+ VDPU_REG_MV_ACCURACY_BWD(1);
+ vdpu_write_relaxed(vpu, reg, VDPU_SWREG(136));
+
+ rockchip_vpu2_mpeg2_dec_set_quantisation(vpu, ctx);
+
+ rockchip_vpu2_mpeg2_dec_set_buffers(vpu, ctx, &src_buf->vb2_buf,
+ &dst_buf->vb2_buf, seq, pic);
+
+ /* Kick the watchdog and start decoding */
+ hantro_end_prepare_run(ctx);
+
+ reg = vdpu_read(vpu, VDPU_SWREG(57)) | VDPU_REG_DEC_E(1);
+ vdpu_write(vpu, reg, VDPU_SWREG(57));
+
+ return 0;
+}
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c
new file mode 100644
index 000000000000..d079075448c9
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_vpu2_hw_vp8_dec.c
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Rockchip VPU codec vp8 decode driver
+ *
+ * Copyright (C) 2014 Rockchip Electronics Co., Ltd.
+ * ZhiChao Yu <zhichao.yu@rock-chips.com>
+ *
+ * Copyright (C) 2014 Google LLC.
+ * Tomasz Figa <tfiga@chromium.org>
+ *
+ * Copyright (C) 2015 Rockchip Electronics Co., Ltd.
+ * Alpha Lin <alpha.lin@rock-chips.com>
+ */
+
+#include <media/v4l2-mem2mem.h>
+
+#include "hantro_hw.h"
+#include "hantro.h"
+#include "hantro_g1_regs.h"
+
+#define VDPU_REG_DEC_CTRL0 0x0c8
+#define VDPU_REG_STREAM_LEN 0x0cc
+#define VDPU_REG_DEC_FORMAT 0x0d4
+#define VDPU_REG_DEC_CTRL0_DEC_MODE(x) (((x) & 0xf) << 0)
+#define VDPU_REG_DATA_ENDIAN 0x0d8
+#define VDPU_REG_CONFIG_DEC_STRENDIAN_E BIT(5)
+#define VDPU_REG_CONFIG_DEC_STRSWAP32_E BIT(4)
+#define VDPU_REG_CONFIG_DEC_OUTSWAP32_E BIT(3)
+#define VDPU_REG_CONFIG_DEC_INSWAP32_E BIT(2)
+#define VDPU_REG_CONFIG_DEC_OUT_ENDIAN BIT(1)
+#define VDPU_REG_CONFIG_DEC_IN_ENDIAN BIT(0)
+#define VDPU_REG_AXI_CTRL 0x0e0
+#define VDPU_REG_CONFIG_DEC_MAX_BURST(x) (((x) & 0x1f) << 16)
+#define VDPU_REG_EN_FLAGS 0x0e4
+#define VDPU_REG_DEC_CTRL0_PIC_INTER_E BIT(14)
+#define VDPU_REG_CONFIG_DEC_TIMEOUT_E BIT(5)
+#define VDPU_REG_CONFIG_DEC_CLK_GATE_E BIT(4)
+#define VDPU_REG_PRED_FLT 0x0ec
+#define VDPU_REG_ADDR_QTABLE 0x0f4
+#define VDPU_REG_ADDR_DST 0x0fc
+#define VDPU_REG_ADDR_STR 0x100
+#define VDPU_REG_VP8_PIC_MB_SIZE 0x1e0
+#define VDPU_REG_VP8_DCT_START_BIT 0x1e4
+#define VDPU_REG_DEC_CTRL4_VC1_HEIGHT_EXT BIT(13)
+#define VDPU_REG_DEC_CTRL4_BILIN_MC_E BIT(12)
+#define VDPU_REG_VP8_CTRL0 0x1e8
+#define VDPU_REG_VP8_DATA_VAL 0x1f0
+#define VDPU_REG_PRED_FLT7 0x1f4
+#define VDPU_REG_PRED_FLT8 0x1f8
+#define VDPU_REG_PRED_FLT9 0x1fc
+#define VDPU_REG_PRED_FLT10 0x200
+#define VDPU_REG_FILTER_LEVEL 0x204
+#define VDPU_REG_VP8_QUANTER0 0x208
+#define VDPU_REG_VP8_ADDR_REF0 0x20c
+#define VDPU_REG_FILTER_MB_ADJ 0x210
+#define VDPU_REG_REF_PIC_FILT_TYPE_E BIT(31)
+#define VDPU_REG_REF_PIC_FILT_SHARPNESS(x) (((x) & 0x7) << 28)
+#define VDPU_REG_FILTER_REF_ADJ 0x214
+#define VDPU_REG_VP8_ADDR_REF2_5(i) (0x218 + ((i) * 0x4))
+#define VDPU_REG_VP8_GREF_SIGN_BIAS BIT(0)
+#define VDPU_REG_VP8_AREF_SIGN_BIAS BIT(0)
+#define VDPU_REG_VP8_DCT_BASE(i) \
+ (0x230 + ((((i) < 5) ? (i) : ((i) + 1)) * 0x4))
+#define VDPU_REG_VP8_ADDR_CTRL_PART 0x244
+#define VDPU_REG_VP8_SEGMENT_VAL 0x254
+#define VDPU_REG_FWD_PIC1_SEGMENT_BASE(x) ((x) << 0)
+#define VDPU_REG_FWD_PIC1_SEGMENT_UPD_E BIT(1)
+#define VDPU_REG_FWD_PIC1_SEGMENT_E BIT(0)
+#define VDPU_REG_VP8_DCT_START_BIT2 0x258
+#define VDPU_REG_VP8_QUANTER1 0x25c
+#define VDPU_REG_VP8_QUANTER2 0x260
+#define VDPU_REG_PRED_FLT1 0x264
+#define VDPU_REG_PRED_FLT2 0x268
+#define VDPU_REG_PRED_FLT3 0x26c
+#define VDPU_REG_PRED_FLT4 0x270
+#define VDPU_REG_PRED_FLT5 0x274
+#define VDPU_REG_PRED_FLT6 0x278
+
+static const struct hantro_reg vp8_dec_dct_base[8] = {
+ { VDPU_REG_ADDR_STR, 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(0), 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(1), 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(2), 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(3), 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(4), 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(5), 0, 0xffffffff },
+ { VDPU_REG_VP8_DCT_BASE(6), 0, 0xffffffff },
+};
+
+static const struct hantro_reg vp8_dec_lf_level[4] = {
+ { VDPU_REG_FILTER_LEVEL, 18, 0x3f },
+ { VDPU_REG_FILTER_LEVEL, 12, 0x3f },
+ { VDPU_REG_FILTER_LEVEL, 6, 0x3f },
+ { VDPU_REG_FILTER_LEVEL, 0, 0x3f },
+};
+
+static const struct hantro_reg vp8_dec_mb_adj[4] = {
+ { VDPU_REG_FILTER_MB_ADJ, 21, 0x7f },
+ { VDPU_REG_FILTER_MB_ADJ, 14, 0x7f },
+ { VDPU_REG_FILTER_MB_ADJ, 7, 0x7f },
+ { VDPU_REG_FILTER_MB_ADJ, 0, 0x7f },
+};
+
+static const struct hantro_reg vp8_dec_ref_adj[4] = {
+ { VDPU_REG_FILTER_REF_ADJ, 21, 0x7f },
+ { VDPU_REG_FILTER_REF_ADJ, 14, 0x7f },
+ { VDPU_REG_FILTER_REF_ADJ, 7, 0x7f },
+ { VDPU_REG_FILTER_REF_ADJ, 0, 0x7f },
+};
+
+static const struct hantro_reg vp8_dec_quant[4] = {
+ { VDPU_REG_VP8_QUANTER0, 11, 0x7ff },
+ { VDPU_REG_VP8_QUANTER0, 0, 0x7ff },
+ { VDPU_REG_VP8_QUANTER1, 11, 0x7ff },
+ { VDPU_REG_VP8_QUANTER1, 0, 0x7ff },
+};
+
+static const struct hantro_reg vp8_dec_quant_delta[5] = {
+ { VDPU_REG_VP8_QUANTER0, 27, 0x1f },
+ { VDPU_REG_VP8_QUANTER0, 22, 0x1f },
+ { VDPU_REG_VP8_QUANTER1, 27, 0x1f },
+ { VDPU_REG_VP8_QUANTER1, 22, 0x1f },
+ { VDPU_REG_VP8_QUANTER2, 27, 0x1f },
+};
+
+static const struct hantro_reg vp8_dec_dct_start_bits[8] = {
+ { VDPU_REG_VP8_CTRL0, 26, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT, 26, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT, 20, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT2, 24, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT2, 18, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT2, 12, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT2, 6, 0x3f },
+ { VDPU_REG_VP8_DCT_START_BIT2, 0, 0x3f },
+};
+
+static const struct hantro_reg vp8_dec_pred_bc_tap[8][6] = {
+ {
+ { 0, 0, 0},
+ { VDPU_REG_PRED_FLT, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT1, 22, 0x3ff },
+ { 0, 0, 0},
+ }, {
+ { 0, 0, 0},
+ { VDPU_REG_PRED_FLT1, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT1, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT2, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT2, 12, 0x3ff },
+ { 0, 0, 0},
+ }, {
+ { VDPU_REG_PRED_FLT10, 10, 0x3 },
+ { VDPU_REG_PRED_FLT2, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT3, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT3, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT3, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT10, 8, 0x3},
+ }, {
+ { 0, 0, 0},
+ { VDPU_REG_PRED_FLT4, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT4, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT4, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT5, 22, 0x3ff },
+ { 0, 0, 0},
+ }, {
+ { VDPU_REG_PRED_FLT10, 6, 0x3 },
+ { VDPU_REG_PRED_FLT5, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT5, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT6, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT6, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT10, 4, 0x3 },
+ }, {
+ { 0, 0, 0},
+ { VDPU_REG_PRED_FLT6, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT7, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT7, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT7, 2, 0x3ff },
+ { 0, 0, 0},
+ }, {
+ { VDPU_REG_PRED_FLT10, 2, 0x3 },
+ { VDPU_REG_PRED_FLT8, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT8, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT8, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT9, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT10, 0, 0x3 },
+ }, {
+ { 0, 0, 0},
+ { VDPU_REG_PRED_FLT9, 12, 0x3ff },
+ { VDPU_REG_PRED_FLT9, 2, 0x3ff },
+ { VDPU_REG_PRED_FLT10, 22, 0x3ff },
+ { VDPU_REG_PRED_FLT10, 12, 0x3ff },
+ { 0, 0, 0},
+ },
+};
+
+static const struct hantro_reg vp8_dec_mb_start_bit = {
+ .base = VDPU_REG_VP8_CTRL0,
+ .shift = 18,
+ .mask = 0x3f
+};
+
+static const struct hantro_reg vp8_dec_mb_aligned_data_len = {
+ .base = VDPU_REG_VP8_DATA_VAL,
+ .shift = 0,
+ .mask = 0x3fffff
+};
+
+static const struct hantro_reg vp8_dec_num_dct_partitions = {
+ .base = VDPU_REG_VP8_DATA_VAL,
+ .shift = 24,
+ .mask = 0xf
+};
+
+static const struct hantro_reg vp8_dec_stream_len = {
+ .base = VDPU_REG_STREAM_LEN,
+ .shift = 0,
+ .mask = 0xffffff
+};
+
+static const struct hantro_reg vp8_dec_mb_width = {
+ .base = VDPU_REG_VP8_PIC_MB_SIZE,
+ .shift = 23,
+ .mask = 0x1ff
+};
+
+static const struct hantro_reg vp8_dec_mb_height = {
+ .base = VDPU_REG_VP8_PIC_MB_SIZE,
+ .shift = 11,
+ .mask = 0xff
+};
+
+static const struct hantro_reg vp8_dec_mb_width_ext = {
+ .base = VDPU_REG_VP8_PIC_MB_SIZE,
+ .shift = 3,
+ .mask = 0x7
+};
+
+static const struct hantro_reg vp8_dec_mb_height_ext = {
+ .base = VDPU_REG_VP8_PIC_MB_SIZE,
+ .shift = 0,
+ .mask = 0x7
+};
+
+static const struct hantro_reg vp8_dec_bool_range = {
+ .base = VDPU_REG_VP8_CTRL0,
+ .shift = 0,
+ .mask = 0xff
+};
+
+static const struct hantro_reg vp8_dec_bool_value = {
+ .base = VDPU_REG_VP8_CTRL0,
+ .shift = 8,
+ .mask = 0xff
+};
+
+static const struct hantro_reg vp8_dec_filter_disable = {
+ .base = VDPU_REG_DEC_CTRL0,
+ .shift = 8,
+ .mask = 1
+};
+
+static const struct hantro_reg vp8_dec_skip_mode = {
+ .base = VDPU_REG_DEC_CTRL0,
+ .shift = 9,
+ .mask = 1
+};
+
+static const struct hantro_reg vp8_dec_start_dec = {
+ .base = VDPU_REG_EN_FLAGS,
+ .shift = 0,
+ .mask = 1
+};
+
+static void cfg_lf(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr)
+{
+ const struct v4l2_vp8_segment *seg = &hdr->segment;
+ const struct v4l2_vp8_loop_filter *lf = &hdr->lf;
+ struct hantro_dev *vpu = ctx->dev;
+ unsigned int i;
+ u32 reg;
+
+ if (!(seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED)) {
+ hantro_reg_write(vpu, &vp8_dec_lf_level[0], lf->level);
+ } else if (seg->flags & V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE) {
+ for (i = 0; i < 4; i++) {
+ u32 lf_level = clamp(lf->level + seg->lf_update[i],
+ 0, 63);
+
+ hantro_reg_write(vpu, &vp8_dec_lf_level[i], lf_level);
+ }
+ } else {
+ for (i = 0; i < 4; i++)
+ hantro_reg_write(vpu, &vp8_dec_lf_level[i],
+ seg->lf_update[i]);
+ }
+
+ reg = VDPU_REG_REF_PIC_FILT_SHARPNESS(lf->sharpness_level);
+ if (lf->flags & V4L2_VP8_LF_FILTER_TYPE_SIMPLE)
+ reg |= VDPU_REG_REF_PIC_FILT_TYPE_E;
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_FILTER_MB_ADJ);
+
+ if (lf->flags & V4L2_VP8_LF_ADJ_ENABLE) {
+ for (i = 0; i < 4; i++) {
+ hantro_reg_write(vpu, &vp8_dec_mb_adj[i],
+ lf->mb_mode_delta[i]);
+ hantro_reg_write(vpu, &vp8_dec_ref_adj[i],
+ lf->ref_frm_delta[i]);
+ }
+ }
+}
+
+static void cfg_qp(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr)
+{
+ const struct v4l2_vp8_quantization *q = &hdr->quant;
+ const struct v4l2_vp8_segment *seg = &hdr->segment;
+ struct hantro_dev *vpu = ctx->dev;
+ unsigned int i;
+
+ if (!(seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED)) {
+ hantro_reg_write(vpu, &vp8_dec_quant[0], q->y_ac_qi);
+ } else if (seg->flags & V4L2_VP8_SEGMENT_FLAG_DELTA_VALUE_MODE) {
+ for (i = 0; i < 4; i++) {
+ u32 quant = clamp(q->y_ac_qi + seg->quant_update[i],
+ 0, 127);
+
+ hantro_reg_write(vpu, &vp8_dec_quant[i], quant);
+ }
+ } else {
+ for (i = 0; i < 4; i++)
+ hantro_reg_write(vpu, &vp8_dec_quant[i],
+ seg->quant_update[i]);
+ }
+
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[0], q->y_dc_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[1], q->y2_dc_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[2], q->y2_ac_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[3], q->uv_dc_delta);
+ hantro_reg_write(vpu, &vp8_dec_quant_delta[4], q->uv_ac_delta);
+}
+
+static void cfg_parts(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_src;
+ u32 first_part_offset = V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) ? 10 : 3;
+ u32 mb_size, mb_offset_bytes, mb_offset_bits, mb_start_bits;
+ u32 dct_size_part_size, dct_part_offset;
+ dma_addr_t src_dma;
+ u32 dct_part_total_len = 0;
+ u32 count = 0;
+ unsigned int i;
+
+ vb2_src = hantro_get_src_buf(ctx);
+ src_dma = vb2_dma_contig_plane_dma_addr(&vb2_src->vb2_buf, 0);
+
+ /*
+ * Calculate control partition mb data info
+ * @first_part_header_bits: bits offset of mb data from first
+ * part start pos
+ * @mb_offset_bits: bits offset of mb data from src_dma
+ * base addr
+ * @mb_offset_byte: bytes offset of mb data from src_dma
+ * base addr
+ * @mb_start_bits: bits offset of mb data from mb data
+ * 64bits alignment addr
+ */
+ mb_offset_bits = first_part_offset * 8 +
+ hdr->first_part_header_bits + 8;
+ mb_offset_bytes = mb_offset_bits / 8;
+ mb_start_bits = mb_offset_bits -
+ (mb_offset_bytes & (~DEC_8190_ALIGN_MASK)) * 8;
+ mb_size = hdr->first_part_size -
+ (mb_offset_bytes - first_part_offset) +
+ (mb_offset_bytes & DEC_8190_ALIGN_MASK);
+
+ /* Macroblock data aligned base addr */
+ vdpu_write_relaxed(vpu, (mb_offset_bytes & (~DEC_8190_ALIGN_MASK)) +
+ src_dma, VDPU_REG_VP8_ADDR_CTRL_PART);
+ hantro_reg_write(vpu, &vp8_dec_mb_start_bit, mb_start_bits);
+ hantro_reg_write(vpu, &vp8_dec_mb_aligned_data_len, mb_size);
+
+ /*
+ * Calculate DCT partition info
+ * @dct_size_part_size: Containing sizes of DCT part, every DCT part
+ * has 3 bytes to store its size, except the last
+ * DCT part
+ * @dct_part_offset: bytes offset of DCT parts from src_dma base addr
+ * @dct_part_total_len: total size of all DCT parts
+ */
+ dct_size_part_size = (hdr->num_dct_parts - 1) * 3;
+ dct_part_offset = first_part_offset + hdr->first_part_size;
+ for (i = 0; i < hdr->num_dct_parts; i++)
+ dct_part_total_len += hdr->dct_part_sizes[i];
+ dct_part_total_len += dct_size_part_size;
+ dct_part_total_len += (dct_part_offset & DEC_8190_ALIGN_MASK);
+
+ /* Number of DCT partitions */
+ hantro_reg_write(vpu, &vp8_dec_num_dct_partitions,
+ hdr->num_dct_parts - 1);
+
+ /* DCT partition length */
+ hantro_reg_write(vpu, &vp8_dec_stream_len, dct_part_total_len);
+
+ /* DCT partitions base address */
+ for (i = 0; i < hdr->num_dct_parts; i++) {
+ u32 byte_offset = dct_part_offset + dct_size_part_size + count;
+ u32 base_addr = byte_offset + src_dma;
+
+ hantro_reg_write(vpu, &vp8_dec_dct_base[i],
+ base_addr & (~DEC_8190_ALIGN_MASK));
+
+ hantro_reg_write(vpu, &vp8_dec_dct_start_bits[i],
+ (byte_offset & DEC_8190_ALIGN_MASK) * 8);
+
+ count += hdr->dct_part_sizes[i];
+ }
+}
+
+/*
+ * prediction filter taps
+ * normal 6-tap filters
+ */
+static void cfg_tap(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ int i, j;
+
+ if ((hdr->version & 0x03) != 0)
+ return; /* Tap filter not used. */
+
+ for (i = 0; i < 8; i++) {
+ for (j = 0; j < 6; j++) {
+ if (vp8_dec_pred_bc_tap[i][j].base != 0)
+ hantro_reg_write(vpu,
+ &vp8_dec_pred_bc_tap[i][j],
+ hantro_vp8_dec_mc_filter[i][j]);
+ }
+ }
+}
+
+static void cfg_ref(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr,
+ struct vb2_v4l2_buffer *vb2_dst)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ dma_addr_t ref;
+
+ ref = hantro_get_ref(ctx, hdr->last_frame_ts);
+ if (!ref) {
+ vpu_debug(0, "failed to find last frame ts=%llu\n",
+ hdr->last_frame_ts);
+ ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ }
+ vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF0);
+
+ ref = hantro_get_ref(ctx, hdr->golden_frame_ts);
+ if (!ref && hdr->golden_frame_ts)
+ vpu_debug(0, "failed to find golden frame ts=%llu\n",
+ hdr->golden_frame_ts);
+ if (!ref)
+ ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ if (hdr->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_GOLDEN)
+ ref |= VDPU_REG_VP8_GREF_SIGN_BIAS;
+ vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF2_5(2));
+
+ ref = hantro_get_ref(ctx, hdr->alt_frame_ts);
+ if (!ref && hdr->alt_frame_ts)
+ vpu_debug(0, "failed to find alt frame ts=%llu\n",
+ hdr->alt_frame_ts);
+ if (!ref)
+ ref = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ if (hdr->flags & V4L2_VP8_FRAME_FLAG_SIGN_BIAS_ALT)
+ ref |= VDPU_REG_VP8_AREF_SIGN_BIAS;
+ vdpu_write_relaxed(vpu, ref, VDPU_REG_VP8_ADDR_REF2_5(3));
+}
+
+static void cfg_buffers(struct hantro_ctx *ctx,
+ const struct v4l2_ctrl_vp8_frame *hdr,
+ struct vb2_v4l2_buffer *vb2_dst)
+{
+ const struct v4l2_vp8_segment *seg = &hdr->segment;
+ struct hantro_dev *vpu = ctx->dev;
+ dma_addr_t dst_dma;
+ u32 reg;
+
+ /* Set probability table buffer address */
+ vdpu_write_relaxed(vpu, ctx->vp8_dec.prob_tbl.dma,
+ VDPU_REG_ADDR_QTABLE);
+
+ /* Set segment map address */
+ reg = VDPU_REG_FWD_PIC1_SEGMENT_BASE(ctx->vp8_dec.segment_map.dma);
+ if (seg->flags & V4L2_VP8_SEGMENT_FLAG_ENABLED) {
+ reg |= VDPU_REG_FWD_PIC1_SEGMENT_E;
+ if (seg->flags & V4L2_VP8_SEGMENT_FLAG_UPDATE_MAP)
+ reg |= VDPU_REG_FWD_PIC1_SEGMENT_UPD_E;
+ }
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_VP8_SEGMENT_VAL);
+
+ /* set output frame buffer address */
+ dst_dma = vb2_dma_contig_plane_dma_addr(&vb2_dst->vb2_buf, 0);
+ vdpu_write_relaxed(vpu, dst_dma, VDPU_REG_ADDR_DST);
+}
+
+int rockchip_vpu2_vp8_dec_run(struct hantro_ctx *ctx)
+{
+ const struct v4l2_ctrl_vp8_frame *hdr;
+ struct hantro_dev *vpu = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_dst;
+ size_t height = ctx->dst_fmt.height;
+ size_t width = ctx->dst_fmt.width;
+ u32 mb_width, mb_height;
+ u32 reg;
+
+ hantro_start_prepare_run(ctx);
+
+ hdr = hantro_get_ctrl(ctx, V4L2_CID_STATELESS_VP8_FRAME);
+ if (WARN_ON(!hdr))
+ return -EINVAL;
+
+ /* Reset segment_map buffer in keyframe */
+ if (V4L2_VP8_FRAME_IS_KEY_FRAME(hdr) && ctx->vp8_dec.segment_map.cpu)
+ memset(ctx->vp8_dec.segment_map.cpu, 0,
+ ctx->vp8_dec.segment_map.size);
+
+ hantro_vp8_prob_update(ctx, hdr);
+
+ /*
+ * Extensive testing shows that the hardware does not properly
+ * clear the internal state from previous a decoding run. This
+ * causes corruption in decoded frames for multi-instance use cases.
+ * A soft reset before programming the registers has been found
+ * to resolve those problems.
+ */
+ ctx->codec_ops->reset(ctx);
+
+ reg = VDPU_REG_CONFIG_DEC_TIMEOUT_E
+ | VDPU_REG_CONFIG_DEC_CLK_GATE_E;
+ if (!V4L2_VP8_FRAME_IS_KEY_FRAME(hdr))
+ reg |= VDPU_REG_DEC_CTRL0_PIC_INTER_E;
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_EN_FLAGS);
+
+ reg = VDPU_REG_CONFIG_DEC_STRENDIAN_E
+ | VDPU_REG_CONFIG_DEC_INSWAP32_E
+ | VDPU_REG_CONFIG_DEC_STRSWAP32_E
+ | VDPU_REG_CONFIG_DEC_OUTSWAP32_E
+ | VDPU_REG_CONFIG_DEC_IN_ENDIAN
+ | VDPU_REG_CONFIG_DEC_OUT_ENDIAN;
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_DATA_ENDIAN);
+
+ reg = VDPU_REG_CONFIG_DEC_MAX_BURST(16);
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_AXI_CTRL);
+
+ reg = VDPU_REG_DEC_CTRL0_DEC_MODE(10);
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_DEC_FORMAT);
+
+ if (!(hdr->flags & V4L2_VP8_FRAME_FLAG_MB_NO_SKIP_COEFF))
+ hantro_reg_write(vpu, &vp8_dec_skip_mode, 1);
+ if (hdr->lf.level == 0)
+ hantro_reg_write(vpu, &vp8_dec_filter_disable, 1);
+
+ /* Frame dimensions */
+ mb_width = MB_WIDTH(width);
+ mb_height = MB_HEIGHT(height);
+
+ hantro_reg_write(vpu, &vp8_dec_mb_width, mb_width);
+ hantro_reg_write(vpu, &vp8_dec_mb_height, mb_height);
+ hantro_reg_write(vpu, &vp8_dec_mb_width_ext, mb_width >> 9);
+ hantro_reg_write(vpu, &vp8_dec_mb_height_ext, mb_height >> 8);
+
+ /* Boolean decoder */
+ hantro_reg_write(vpu, &vp8_dec_bool_range, hdr->coder_state.range);
+ hantro_reg_write(vpu, &vp8_dec_bool_value, hdr->coder_state.value);
+
+ reg = vdpu_read(vpu, VDPU_REG_VP8_DCT_START_BIT);
+ if (hdr->version != 3)
+ reg |= VDPU_REG_DEC_CTRL4_VC1_HEIGHT_EXT;
+ if (hdr->version & 0x3)
+ reg |= VDPU_REG_DEC_CTRL4_BILIN_MC_E;
+ vdpu_write_relaxed(vpu, reg, VDPU_REG_VP8_DCT_START_BIT);
+
+ cfg_lf(ctx, hdr);
+ cfg_qp(ctx, hdr);
+ cfg_parts(ctx, hdr);
+ cfg_tap(ctx, hdr);
+
+ vb2_dst = hantro_get_dst_buf(ctx);
+ cfg_ref(ctx, hdr, vb2_dst);
+ cfg_buffers(ctx, hdr, vb2_dst);
+
+ hantro_end_prepare_run(ctx);
+
+ hantro_reg_write(vpu, &vp8_dec_start_dec, 1);
+
+ return 0;
+}
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu2_regs.h b/drivers/media/platform/verisilicon/rockchip_vpu2_regs.h
new file mode 100644
index 000000000000..49e40889545b
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_vpu2_regs.h
@@ -0,0 +1,600 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Alpha Lin <alpha.lin@rock-chips.com>
+ */
+
+#ifndef ROCKCHIP_VPU2_REGS_H_
+#define ROCKCHIP_VPU2_REGS_H_
+
+/* Encoder registers. */
+#define VEPU_REG_VP8_QUT_1ST(i) (0x000 + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_DC_Y2(x) (((x) & 0x3fff) << 16)
+#define VEPU_REG_VP8_QUT_DC_Y1(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_QUT_2ND(i) (0x004 + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_AC_Y1(x) (((x) & 0x3fff) << 16)
+#define VEPU_REG_VP8_QUT_DC_CHR(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_QUT_3RD(i) (0x008 + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_AC_CHR(x) (((x) & 0x3fff) << 16)
+#define VEPU_REG_VP8_QUT_AC_Y2(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_QUT_4TH(i) (0x00c + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_ZB_DC_CHR(x) (((x) & 0x1ff) << 18)
+#define VEPU_REG_VP8_QUT_ZB_DC_Y2(x) (((x) & 0x1ff) << 9)
+#define VEPU_REG_VP8_QUT_ZB_DC_Y1(x) (((x) & 0x1ff) << 0)
+#define VEPU_REG_VP8_QUT_5TH(i) (0x010 + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_ZB_AC_CHR(x) (((x) & 0x1ff) << 18)
+#define VEPU_REG_VP8_QUT_ZB_AC_Y2(x) (((x) & 0x1ff) << 9)
+#define VEPU_REG_VP8_QUT_ZB_AC_Y1(x) (((x) & 0x1ff) << 0)
+#define VEPU_REG_VP8_QUT_6TH(i) (0x014 + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_RND_DC_CHR(x) (((x) & 0xff) << 16)
+#define VEPU_REG_VP8_QUT_RND_DC_Y2(x) (((x) & 0xff) << 8)
+#define VEPU_REG_VP8_QUT_RND_DC_Y1(x) (((x) & 0xff) << 0)
+#define VEPU_REG_VP8_QUT_7TH(i) (0x018 + ((i) * 0x24))
+#define VEPU_REG_VP8_QUT_RND_AC_CHR(x) (((x) & 0xff) << 16)
+#define VEPU_REG_VP8_QUT_RND_AC_Y2(x) (((x) & 0xff) << 8)
+#define VEPU_REG_VP8_QUT_RND_AC_Y1(x) (((x) & 0xff) << 0)
+#define VEPU_REG_VP8_QUT_8TH(i) (0x01c + ((i) * 0x24))
+#define VEPU_REG_VP8_SEG_FILTER_LEVEL(x) (((x) & 0x3f) << 25)
+#define VEPU_REG_VP8_DEQUT_DC_CHR(x) (((x) & 0xff) << 17)
+#define VEPU_REG_VP8_DEQUT_DC_Y2(x) (((x) & 0x1ff) << 8)
+#define VEPU_REG_VP8_DEQUT_DC_Y1(x) (((x) & 0xff) << 0)
+#define VEPU_REG_VP8_QUT_9TH(i) (0x020 + ((i) * 0x24))
+#define VEPU_REG_VP8_DEQUT_AC_CHR(x) (((x) & 0x1ff) << 18)
+#define VEPU_REG_VP8_DEQUT_AC_Y2(x) (((x) & 0x1ff) << 9)
+#define VEPU_REG_VP8_DEQUT_AC_Y1(x) (((x) & 0x1ff) << 0)
+#define VEPU_REG_ADDR_VP8_SEG_MAP 0x06c
+#define VEPU_REG_VP8_INTRA_4X4_PENALTY(i) (0x070 + ((i) * 0x4))
+#define VEPU_REG_VP8_INTRA_4X4_PENALTY_0(x) (((x) & 0xfff) << 0)
+#define VEPU_REG_VP8_INTRA_4x4_PENALTY_1(x) (((x) & 0xfff) << 16)
+#define VEPU_REG_VP8_INTRA_16X16_PENALTY(i) (0x084 + ((i) * 0x4))
+#define VEPU_REG_VP8_INTRA_16X16_PENALTY_0(x) (((x) & 0xfff) << 0)
+#define VEPU_REG_VP8_INTRA_16X16_PENALTY_1(x) (((x) & 0xfff) << 16)
+#define VEPU_REG_VP8_CONTROL 0x0a0
+#define VEPU_REG_VP8_LF_MODE_DELTA_BPRED(x) (((x) & 0x1f) << 24)
+#define VEPU_REG_VP8_LF_REF_DELTA_INTRA_MB(x) (((x) & 0x7f) << 16)
+#define VEPU_REG_VP8_INTER_TYPE_BIT_COST(x) (((x) & 0xfff) << 0)
+#define VEPU_REG_VP8_REF_FRAME_VAL 0x0a4
+#define VEPU_REG_VP8_COEF_DMV_PENALTY(x) (((x) & 0xfff) << 16)
+#define VEPU_REG_VP8_REF_FRAME(x) (((x) & 0xfff) << 0)
+#define VEPU_REG_VP8_LOOP_FILTER_REF_DELTA 0x0a8
+#define VEPU_REG_VP8_LF_REF_DELTA_ALT_REF(x) (((x) & 0x7f) << 16)
+#define VEPU_REG_VP8_LF_REF_DELTA_LAST_REF(x) (((x) & 0x7f) << 8)
+#define VEPU_REG_VP8_LF_REF_DELTA_GOLDEN(x) (((x) & 0x7f) << 0)
+#define VEPU_REG_VP8_LOOP_FILTER_MODE_DELTA 0x0ac
+#define VEPU_REG_VP8_LF_MODE_DELTA_SPLITMV(x) (((x) & 0x7f) << 16)
+#define VEPU_REG_VP8_LF_MODE_DELTA_ZEROMV(x) (((x) & 0x7f) << 8)
+#define VEPU_REG_VP8_LF_MODE_DELTA_NEWMV(x) (((x) & 0x7f) << 0)
+#define VEPU_REG_JPEG_LUMA_QUAT(i) (0x000 + ((i) * 0x4))
+#define VEPU_REG_JPEG_CHROMA_QUAT(i) (0x040 + ((i) * 0x4))
+#define VEPU_REG_INTRA_SLICE_BITMAP(i) (0x0b0 + ((i) * 0x4))
+#define VEPU_REG_ADDR_VP8_DCT_PART(i) (0x0b0 + ((i) * 0x4))
+#define VEPU_REG_INTRA_AREA_CTRL 0x0b8
+#define VEPU_REG_INTRA_AREA_TOP(x) (((x) & 0xff) << 24)
+#define VEPU_REG_INTRA_AREA_BOTTOM(x) (((x) & 0xff) << 16)
+#define VEPU_REG_INTRA_AREA_LEFT(x) (((x) & 0xff) << 8)
+#define VEPU_REG_INTRA_AREA_RIGHT(x) (((x) & 0xff) << 0)
+#define VEPU_REG_CIR_INTRA_CTRL 0x0bc
+#define VEPU_REG_CIR_INTRA_FIRST_MB(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_CIR_INTRA_INTERVAL(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_ADDR_IN_PLANE_0 0x0c0
+#define VEPU_REG_ADDR_IN_PLANE_1 0x0c4
+#define VEPU_REG_ADDR_IN_PLANE_2 0x0c8
+#define VEPU_REG_STR_HDR_REM_MSB 0x0cc
+#define VEPU_REG_STR_HDR_REM_LSB 0x0d0
+#define VEPU_REG_STR_BUF_LIMIT 0x0d4
+#define VEPU_REG_AXI_CTRL 0x0d8
+#define VEPU_REG_AXI_CTRL_READ_ID(x) (((x) & 0xff) << 24)
+#define VEPU_REG_AXI_CTRL_WRITE_ID(x) (((x) & 0xff) << 16)
+#define VEPU_REG_AXI_CTRL_BURST_LEN(x) (((x) & 0x3f) << 8)
+#define VEPU_REG_AXI_CTRL_INCREMENT_MODE(x) (((x) & 0x01) << 2)
+#define VEPU_REG_AXI_CTRL_BIRST_DISCARD(x) (((x) & 0x01) << 1)
+#define VEPU_REG_AXI_CTRL_BIRST_DISABLE BIT(0)
+#define VEPU_QP_ADJUST_MAD_DELTA_ROI 0x0dc
+#define VEPU_REG_ROI_QP_DELTA_1 (((x) & 0xf) << 12)
+#define VEPU_REG_ROI_QP_DELTA_2 (((x) & 0xf) << 8)
+#define VEPU_REG_MAD_QP_ADJUSTMENT (((x) & 0xf) << 0)
+#define VEPU_REG_ADDR_REF_LUMA 0x0e0
+#define VEPU_REG_ADDR_REF_CHROMA 0x0e4
+#define VEPU_REG_QP_SUM_DIV2 0x0e8
+#define VEPU_REG_QP_SUM(x) (((x) & 0x001fffff) * 2)
+#define VEPU_REG_ENC_CTRL0 0x0ec
+#define VEPU_REG_DISABLE_QUARTER_PIXEL_MV BIT(28)
+#define VEPU_REG_DEBLOCKING_FILTER_MODE(x) (((x) & 0x3) << 24)
+#define VEPU_REG_CABAC_INIT_IDC(x) (((x) & 0x3) << 21)
+#define VEPU_REG_ENTROPY_CODING_MODE BIT(20)
+#define VEPU_REG_H264_TRANS8X8_MODE BIT(17)
+#define VEPU_REG_H264_INTER4X4_MODE BIT(16)
+#define VEPU_REG_H264_STREAM_MODE BIT(15)
+#define VEPU_REG_H264_SLICE_SIZE(x) (((x) & 0x7f) << 8)
+#define VEPU_REG_ENC_OVER_FILL_STRM_OFFSET 0x0f0
+#define VEPU_REG_STREAM_START_OFFSET(x) (((x) & 0x3f) << 16)
+#define VEPU_REG_SKIP_MACROBLOCK_PENALTY(x) (((x) & 0xff) << 8)
+#define VEPU_REG_IN_IMG_CTRL_OVRFLR_D4(x) (((x) & 0x3) << 4)
+#define VEPU_REG_IN_IMG_CTRL_OVRFLB(x) (((x) & 0xf) << 0)
+#define VEPU_REG_INPUT_LUMA_INFO 0x0f4
+#define VEPU_REG_IN_IMG_CHROMA_OFFSET(x) (((x) & 0x7) << 20)
+#define VEPU_REG_IN_IMG_LUMA_OFFSET(x) (((x) & 0x7) << 16)
+#define VEPU_REG_IN_IMG_CTRL_ROW_LEN(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_RLC_SUM 0x0f8
+#define VEPU_REG_RLC_SUM_OUT(x) (((x) & 0x007fffff) * 4)
+#define VEPU_REG_SPLIT_PENALTY_4X4 0x0f8
+#define VEPU_REG_VP8_SPLIT_PENALTY_4X4 (((x) & 0x1ff) << 19)
+#define VEPU_REG_ADDR_REC_LUMA 0x0fc
+#define VEPU_REG_ADDR_REC_CHROMA 0x100
+#define VEPU_REG_CHECKPOINT(i) (0x104 + ((i) * 0x4))
+#define VEPU_REG_CHECKPOINT_CHECK0(x) (((x) & 0xffff))
+#define VEPU_REG_CHECKPOINT_CHECK1(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_CHECKPOINT_RESULT(x) \
+ ((((x) >> (16 - 16 * ((i) & 1))) & 0xffff) * 32)
+#define VEPU_REG_VP8_SEG0_QUANT_AC_Y1 0x104
+#define VEPU_REG_VP8_SEG0_RND_AC_Y1(x) (((x) & 0xff) << 23)
+#define VEPU_REG_VP8_SEG0_ZBIN_AC_Y1(x) (((x) & 0x1ff) << 14)
+#define VEPU_REG_VP8_SEG0_QUT_AC_Y1(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_SEG0_QUANT_DC_Y2 0x108
+#define VEPU_REG_VP8_SEG0_RND_DC_Y2(x) (((x) & 0xff) << 23)
+#define VEPU_REG_VP8_SEG0_ZBIN_DC_Y2(x) (((x) & 0x1ff) << 14)
+#define VEPU_REG_VP8_SEG0_QUT_DC_Y2(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_SEG0_QUANT_AC_Y2 0x10c
+#define VEPU_REG_VP8_SEG0_RND_AC_Y2(x) (((x) & 0xff) << 23)
+#define VEPU_REG_VP8_SEG0_ZBIN_AC_Y2(x) (((x) & 0x1ff) << 14)
+#define VEPU_REG_VP8_SEG0_QUT_AC_Y2(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_SEG0_QUANT_DC_CHR 0x110
+#define VEPU_REG_VP8_SEG0_RND_DC_CHR(x) (((x) & 0xff) << 23)
+#define VEPU_REG_VP8_SEG0_ZBIN_DC_CHR(x) (((x) & 0x1ff) << 14)
+#define VEPU_REG_VP8_SEG0_QUT_DC_CHR(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_SEG0_QUANT_AC_CHR 0x114
+#define VEPU_REG_VP8_SEG0_RND_AC_CHR(x) (((x) & 0xff) << 23)
+#define VEPU_REG_VP8_SEG0_ZBIN_AC_CHR(x) (((x) & 0x1ff) << 14)
+#define VEPU_REG_VP8_SEG0_QUT_AC_CHR(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_VP8_SEG0_QUANT_DQUT 0x118
+#define VEPU_REG_VP8_MV_REF_IDX1(x) (((x) & 0x03) << 26)
+#define VEPU_REG_VP8_SEG0_DQUT_DC_Y2(x) (((x) & 0x1ff) << 17)
+#define VEPU_REG_VP8_SEG0_DQUT_AC_Y1(x) (((x) & 0x1ff) << 8)
+#define VEPU_REG_VP8_SEG0_DQUT_DC_Y1(x) (((x) & 0xff) << 0)
+#define VEPU_REG_CHKPT_WORD_ERR(i) (0x118 + ((i) * 0x4))
+#define VEPU_REG_CHKPT_WORD_ERR_CHK0(x) (((x) & 0xffff))
+#define VEPU_REG_CHKPT_WORD_ERR_CHK1(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_VP8_SEG0_QUANT_DQUT_1 0x11c
+#define VEPU_REG_VP8_SEGMENT_MAP_UPDATE BIT(30)
+#define VEPU_REG_VP8_SEGMENT_EN BIT(29)
+#define VEPU_REG_VP8_MV_REF_IDX2_EN BIT(28)
+#define VEPU_REG_VP8_MV_REF_IDX2(x) (((x) & 0x03) << 26)
+#define VEPU_REG_VP8_SEG0_DQUT_AC_CHR(x) (((x) & 0x1ff) << 17)
+#define VEPU_REG_VP8_SEG0_DQUT_DC_CHR(x) (((x) & 0xff) << 9)
+#define VEPU_REG_VP8_SEG0_DQUT_AC_Y2(x) (((x) & 0x1ff) << 0)
+#define VEPU_REG_VP8_BOOL_ENC_VALUE 0x120
+#define VEPU_REG_CHKPT_DELTA_QP 0x124
+#define VEPU_REG_CHKPT_DELTA_QP_CHK0(x) (((x) & 0x0f) << 0)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK1(x) (((x) & 0x0f) << 4)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK2(x) (((x) & 0x0f) << 8)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK3(x) (((x) & 0x0f) << 12)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK4(x) (((x) & 0x0f) << 16)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK5(x) (((x) & 0x0f) << 20)
+#define VEPU_REG_CHKPT_DELTA_QP_CHK6(x) (((x) & 0x0f) << 24)
+#define VEPU_REG_VP8_ENC_CTRL2 0x124
+#define VEPU_REG_VP8_ZERO_MV_PENALTY_FOR_REF2(x) (((x) & 0xff) << 24)
+#define VEPU_REG_VP8_FILTER_SHARPNESS(x) (((x) & 0x07) << 21)
+#define VEPU_REG_VP8_FILTER_LEVEL(x) (((x) & 0x3f) << 15)
+#define VEPU_REG_VP8_DCT_PARTITION_CNT(x) (((x) & 0x03) << 13)
+#define VEPU_REG_VP8_BOOL_ENC_VALUE_BITS(x) (((x) & 0x1f) << 8)
+#define VEPU_REG_VP8_BOOL_ENC_RANGE(x) (((x) & 0xff) << 0)
+#define VEPU_REG_ENC_CTRL1 0x128
+#define VEPU_REG_MAD_THRESHOLD(x) (((x) & 0x3f) << 24)
+#define VEPU_REG_COMPLETED_SLICES(x) (((x) & 0xff) << 16)
+#define VEPU_REG_IN_IMG_CTRL_FMT(x) (((x) & 0xf) << 4)
+#define VEPU_REG_IN_IMG_ROTATE_MODE(x) (((x) & 0x3) << 2)
+#define VEPU_REG_SIZE_TABLE_PRESENT BIT(0)
+#define VEPU_REG_INTRA_INTER_MODE 0x12c
+#define VEPU_REG_INTRA16X16_MODE(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_INTER_MODE(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_ENC_CTRL2 0x130
+#define VEPU_REG_PPS_INIT_QP(x) (((x) & 0x3f) << 26)
+#define VEPU_REG_SLICE_FILTER_ALPHA(x) (((x) & 0xf) << 22)
+#define VEPU_REG_SLICE_FILTER_BETA(x) (((x) & 0xf) << 18)
+#define VEPU_REG_CHROMA_QP_OFFSET(x) (((x) & 0x1f) << 13)
+#define VEPU_REG_FILTER_DISABLE BIT(5)
+#define VEPU_REG_IDR_PIC_ID(x) (((x) & 0xf) << 1)
+#define VEPU_REG_CONSTRAINED_INTRA_PREDICTION BIT(0)
+#define VEPU_REG_ADDR_OUTPUT_STREAM 0x134
+#define VEPU_REG_ADDR_OUTPUT_CTRL 0x138
+#define VEPU_REG_ADDR_NEXT_PIC 0x13c
+#define VEPU_REG_ADDR_MV_OUT 0x140
+#define VEPU_REG_ADDR_CABAC_TBL 0x144
+#define VEPU_REG_ROI1 0x148
+#define VEPU_REG_ROI1_TOP_MB(x) (((x) & 0xff) << 24)
+#define VEPU_REG_ROI1_BOTTOM_MB(x) (((x) & 0xff) << 16)
+#define VEPU_REG_ROI1_LEFT_MB(x) (((x) & 0xff) << 8)
+#define VEPU_REG_ROI1_RIGHT_MB(x) (((x) & 0xff) << 0)
+#define VEPU_REG_ROI2 0x14c
+#define VEPU_REG_ROI2_TOP_MB(x) (((x) & 0xff) << 24)
+#define VEPU_REG_ROI2_BOTTOM_MB(x) (((x) & 0xff) << 16)
+#define VEPU_REG_ROI2_LEFT_MB(x) (((x) & 0xff) << 8)
+#define VEPU_REG_ROI2_RIGHT_MB(x) (((x) & 0xff) << 0)
+#define VEPU_REG_STABLE_MATRIX(i) (0x150 + ((i) * 0x4))
+#define VEPU_REG_STABLE_MOTION_SUM 0x174
+#define VEPU_REG_STABILIZATION_OUTPUT 0x178
+#define VEPU_REG_STABLE_MIN_VALUE(x) (((x) & 0xffffff) << 8)
+#define VEPU_REG_STABLE_MODE_SEL(x) (((x) & 0x3) << 6)
+#define VEPU_REG_STABLE_HOR_GMV(x) (((x) & 0x3f) << 0)
+#define VEPU_REG_RGB2YUV_CONVERSION_COEF1 0x17c
+#define VEPU_REG_RGB2YUV_CONVERSION_COEFB(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_RGB2YUV_CONVERSION_COEFA(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_RGB2YUV_CONVERSION_COEF2 0x180
+#define VEPU_REG_RGB2YUV_CONVERSION_COEFE(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_RGB2YUV_CONVERSION_COEFC(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_RGB2YUV_CONVERSION_COEF3 0x184
+#define VEPU_REG_RGB2YUV_CONVERSION_COEFF(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_RGB_MASK_MSB 0x188
+#define VEPU_REG_RGB_MASK_B_MSB(x) (((x) & 0x1f) << 16)
+#define VEPU_REG_RGB_MASK_G_MSB(x) (((x) & 0x1f) << 8)
+#define VEPU_REG_RGB_MASK_R_MSB(x) (((x) & 0x1f) << 0)
+#define VEPU_REG_MV_PENALTY 0x18c
+#define VEPU_REG_1MV_PENALTY(x) (((x) & 0x3ff) << 21)
+#define VEPU_REG_QMV_PENALTY(x) (((x) & 0x3ff) << 11)
+#define VEPU_REG_4MV_PENALTY(x) (((x) & 0x3ff) << 1)
+#define VEPU_REG_SPLIT_MV_MODE_EN BIT(0)
+#define VEPU_REG_QP_VAL 0x190
+#define VEPU_REG_H264_LUMA_INIT_QP(x) (((x) & 0x3f) << 26)
+#define VEPU_REG_H264_QP_MAX(x) (((x) & 0x3f) << 20)
+#define VEPU_REG_H264_QP_MIN(x) (((x) & 0x3f) << 14)
+#define VEPU_REG_H264_CHKPT_DISTANCE(x) (((x) & 0xfff) << 0)
+#define VEPU_REG_VP8_SEG0_QUANT_DC_Y1 0x190
+#define VEPU_REG_VP8_SEG0_RND_DC_Y1(x) (((x) & 0xff) << 23)
+#define VEPU_REG_VP8_SEG0_ZBIN_DC_Y1(x) (((x) & 0x1ff) << 14)
+#define VEPU_REG_VP8_SEG0_QUT_DC_Y1(x) (((x) & 0x3fff) << 0)
+#define VEPU_REG_MVC_RELATE 0x198
+#define VEPU_REG_ZERO_MV_FAVOR_D2(x) (((x) & 0xf) << 20)
+#define VEPU_REG_PENALTY_4X4MV(x) (((x) & 0x1ff) << 11)
+#define VEPU_REG_MVC_VIEW_ID(x) (((x) & 0x7) << 8)
+#define VEPU_REG_MVC_ANCHOR_PIC_FLAG BIT(7)
+#define VEPU_REG_MVC_PRIORITY_ID(x) (((x) & 0x7) << 4)
+#define VEPU_REG_MVC_TEMPORAL_ID(x) (((x) & 0x7) << 1)
+#define VEPU_REG_MVC_INTER_VIEW_FLAG BIT(0)
+#define VEPU_REG_ENCODE_START 0x19c
+#define VEPU_REG_MB_HEIGHT(x) (((x) & 0x1ff) << 20)
+#define VEPU_REG_MB_WIDTH(x) (((x) & 0x1ff) << 8)
+#define VEPU_REG_FRAME_TYPE_INTER (0x0 << 6)
+#define VEPU_REG_FRAME_TYPE_INTRA (0x1 << 6)
+#define VEPU_REG_FRAME_TYPE_MVCINTER (0x2 << 6)
+#define VEPU_REG_ENCODE_FORMAT_JPEG (0x2 << 4)
+#define VEPU_REG_ENCODE_FORMAT_H264 (0x3 << 4)
+#define VEPU_REG_ENCODE_ENABLE BIT(0)
+#define VEPU_REG_MB_CTRL 0x1a0
+#define VEPU_REG_MB_CNT_OUT(x) (((x) & 0xffff) << 16)
+#define VEPU_REG_MB_CNT_SET(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_DATA_ENDIAN 0x1a4
+#define VEPU_REG_INPUT_SWAP8 BIT(31)
+#define VEPU_REG_INPUT_SWAP16 BIT(30)
+#define VEPU_REG_INPUT_SWAP32 BIT(29)
+#define VEPU_REG_OUTPUT_SWAP8 BIT(28)
+#define VEPU_REG_OUTPUT_SWAP16 BIT(27)
+#define VEPU_REG_OUTPUT_SWAP32 BIT(26)
+#define VEPU_REG_TEST_IRQ BIT(24)
+#define VEPU_REG_TEST_COUNTER(x) (((x) & 0xf) << 20)
+#define VEPU_REG_TEST_REG BIT(19)
+#define VEPU_REG_TEST_MEMORY BIT(18)
+#define VEPU_REG_TEST_LEN(x) (((x) & 0x3ffff) << 0)
+#define VEPU_REG_ENC_CTRL3 0x1a8
+#define VEPU_REG_PPS_ID(x) (((x) & 0xff) << 24)
+#define VEPU_REG_INTRA_PRED_MODE(x) (((x) & 0xff) << 16)
+#define VEPU_REG_FRAME_NUM(x) (((x) & 0xffff) << 0)
+#define VEPU_REG_ENC_CTRL4 0x1ac
+#define VEPU_REG_MV_PENALTY_16X8_8X16(x) (((x) & 0x3ff) << 20)
+#define VEPU_REG_MV_PENALTY_8X8(x) (((x) & 0x3ff) << 10)
+#define VEPU_REG_MV_PENALTY_8X4_4X8(x) (((x) & 0x3ff) << 0)
+#define VEPU_REG_ADDR_VP8_PROB_CNT 0x1b0
+#define VEPU_REG_INTERRUPT 0x1b4
+#define VEPU_REG_INTERRUPT_NON BIT(28)
+#define VEPU_REG_MV_WRITE_EN BIT(24)
+#define VEPU_REG_RECON_WRITE_DIS BIT(20)
+#define VEPU_REG_INTERRUPT_SLICE_READY_EN BIT(16)
+#define VEPU_REG_CLK_GATING_EN BIT(12)
+#define VEPU_REG_INTERRUPT_TIMEOUT_EN BIT(10)
+#define VEPU_REG_INTERRUPT_RESET BIT(9)
+#define VEPU_REG_INTERRUPT_DIS_BIT BIT(8)
+#define VEPU_REG_INTERRUPT_TIMEOUT BIT(6)
+#define VEPU_REG_INTERRUPT_BUFFER_FULL BIT(5)
+#define VEPU_REG_INTERRUPT_BUS_ERROR BIT(4)
+#define VEPU_REG_INTERRUPT_FUSE BIT(3)
+#define VEPU_REG_INTERRUPT_SLICE_READY BIT(2)
+#define VEPU_REG_INTERRUPT_FRAME_READY BIT(1)
+#define VEPU_REG_INTERRUPT_BIT BIT(0)
+#define VEPU_REG_DMV_PENALTY_TBL(i) (0x1E0 + ((i) * 0x4))
+#define VEPU_REG_DMV_PENALTY_TABLE_BIT(x, i) ((x) << (i) * 8)
+#define VEPU_REG_DMV_Q_PIXEL_PENALTY_TBL(i) (0x260 + ((i) * 0x4))
+#define VEPU_REG_DMV_Q_PIXEL_PENALTY_TABLE_BIT(x, i) ((x) << (i) * 8)
+
+/* vpu decoder register */
+#define VDPU_REG_DEC_CTRL0 0x0c8 // 50
+#define VDPU_REG_REF_BUF_CTRL2_REFBU2_PICID(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_REF_BUF_CTRL2_REFBU2_THR(x) (((x) & 0xfff) << 13)
+#define VDPU_REG_CONFIG_TILED_MODE_LSB BIT(12)
+#define VDPU_REG_CONFIG_DEC_ADV_PRE_DIS BIT(11)
+#define VDPU_REG_CONFIG_DEC_SCMD_DIS BIT(10)
+#define VDPU_REG_DEC_CTRL0_SKIP_MODE BIT(9)
+#define VDPU_REG_DEC_CTRL0_FILTERING_DIS BIT(8)
+#define VDPU_REG_DEC_CTRL0_PIC_FIXED_QUANT BIT(7)
+#define VDPU_REG_CONFIG_DEC_LATENCY(x) (((x) & 0x3f) << 1)
+#define VDPU_REG_CONFIG_TILED_MODE_MSB(x) BIT(0)
+#define VDPU_REG_CONFIG_DEC_OUT_TILED_E BIT(0)
+#define VDPU_REG_STREAM_LEN 0x0cc
+#define VDPU_REG_DEC_CTRL3_INIT_QP(x) (((x) & 0x3f) << 25)
+#define VDPU_REG_DEC_STREAM_LEN_HI BIT(24)
+#define VDPU_REG_DEC_CTRL3_STREAM_LEN(x) (((x) & 0xffffff) << 0)
+#define VDPU_REG_ERROR_CONCEALMENT 0x0d0
+#define VDPU_REG_REF_BUF_CTRL2_APF_THRESHOLD(x) (((x) & 0x3fff) << 17)
+#define VDPU_REG_ERR_CONC_STARTMB_X(x) (((x) & 0x1ff) << 8)
+#define VDPU_REG_ERR_CONC_STARTMB_Y(x) (((x) & 0xff) << 0)
+#define VDPU_REG_DEC_FORMAT 0x0d4
+#define VDPU_REG_DEC_CTRL0_DEC_MODE(x) (((x) & 0xf) << 0)
+#define VDPU_REG_DATA_ENDIAN 0x0d8
+#define VDPU_REG_CONFIG_DEC_STRENDIAN_E BIT(5)
+#define VDPU_REG_CONFIG_DEC_STRSWAP32_E BIT(4)
+#define VDPU_REG_CONFIG_DEC_OUTSWAP32_E BIT(3)
+#define VDPU_REG_CONFIG_DEC_INSWAP32_E BIT(2)
+#define VDPU_REG_CONFIG_DEC_OUT_ENDIAN BIT(1)
+#define VDPU_REG_CONFIG_DEC_IN_ENDIAN BIT(0)
+#define VDPU_REG_INTERRUPT 0x0dc
+#define VDPU_REG_INTERRUPT_DEC_TIMEOUT BIT(13)
+#define VDPU_REG_INTERRUPT_DEC_ERROR_INT BIT(12)
+#define VDPU_REG_INTERRUPT_DEC_PIC_INF BIT(10)
+#define VDPU_REG_INTERRUPT_DEC_SLICE_INT BIT(9)
+#define VDPU_REG_INTERRUPT_DEC_ASO_INT BIT(8)
+#define VDPU_REG_INTERRUPT_DEC_BUFFER_INT BIT(6)
+#define VDPU_REG_INTERRUPT_DEC_BUS_INT BIT(5)
+#define VDPU_REG_INTERRUPT_DEC_RDY_INT BIT(4)
+#define VDPU_REG_INTERRUPT_DEC_IRQ_DIS BIT(1)
+#define VDPU_REG_INTERRUPT_DEC_IRQ BIT(0)
+#define VDPU_REG_AXI_CTRL 0x0e0
+#define VDPU_REG_AXI_DEC_SEL BIT(23)
+#define VDPU_REG_CONFIG_DEC_DATA_DISC_E BIT(22)
+#define VDPU_REG_PARAL_BUS_E(x) BIT(21)
+#define VDPU_REG_CONFIG_DEC_MAX_BURST(x) (((x) & 0x1f) << 16)
+#define VDPU_REG_DEC_CTRL0_DEC_AXI_WR_ID(x) (((x) & 0xff) << 8)
+#define VDPU_REG_CONFIG_DEC_AXI_RD_ID(x) (((x) & 0xff) << 0)
+#define VDPU_REG_EN_FLAGS 0x0e4
+#define VDPU_REG_AHB_HLOCK_E BIT(31)
+#define VDPU_REG_CACHE_E BIT(29)
+#define VDPU_REG_PREFETCH_SINGLE_CHANNEL_E BIT(28)
+#define VDPU_REG_INTRA_3_CYCLE_ENHANCE BIT(27)
+#define VDPU_REG_INTRA_DOUBLE_SPEED BIT(26)
+#define VDPU_REG_INTER_DOUBLE_SPEED BIT(25)
+#define VDPU_REG_DEC_CTRL3_START_CODE_E BIT(22)
+#define VDPU_REG_DEC_CTRL3_CH_8PIX_ILEAV_E BIT(21)
+#define VDPU_REG_DEC_CTRL0_RLC_MODE_E BIT(20)
+#define VDPU_REG_DEC_CTRL0_DIVX3_E BIT(19)
+#define VDPU_REG_DEC_CTRL0_PJPEG_E BIT(18)
+#define VDPU_REG_DEC_CTRL0_PIC_INTERLACE_E BIT(17)
+#define VDPU_REG_DEC_CTRL0_PIC_FIELDMODE_E BIT(16)
+#define VDPU_REG_DEC_CTRL0_PIC_B_E BIT(15)
+#define VDPU_REG_DEC_CTRL0_PIC_INTER_E BIT(14)
+#define VDPU_REG_DEC_CTRL0_PIC_TOPFIELD_E BIT(13)
+#define VDPU_REG_DEC_CTRL0_FWD_INTERLACE_E BIT(12)
+#define VDPU_REG_DEC_CTRL0_SORENSON_E BIT(11)
+#define VDPU_REG_DEC_CTRL0_WRITE_MVS_E BIT(10)
+#define VDPU_REG_DEC_CTRL0_REF_TOPFIELD_E BIT(9)
+#define VDPU_REG_DEC_CTRL0_REFTOPFIRST_E BIT(8)
+#define VDPU_REG_DEC_CTRL0_SEQ_MBAFF_E BIT(7)
+#define VDPU_REG_DEC_CTRL0_PICORD_COUNT_E BIT(6)
+#define VDPU_REG_CONFIG_DEC_TIMEOUT_E BIT(5)
+#define VDPU_REG_CONFIG_DEC_CLK_GATE_E BIT(4)
+#define VDPU_REG_DEC_CTRL0_DEC_OUT_DIS BIT(2)
+#define VDPU_REG_REF_BUF_CTRL2_REFBU2_BUF_E BIT(1)
+#define VDPU_REG_INTERRUPT_DEC_E BIT(0)
+#define VDPU_REG_SOFT_RESET 0x0e8
+#define VDPU_REG_PRED_FLT 0x0ec
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_0(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_1(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_2(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_ADDITIONAL_CHROMA_ADDRESS 0x0f0
+#define VDPU_REG_ADDR_QTABLE 0x0f4
+#define VDPU_REG_DIRECT_MV_ADDR 0x0f8
+#define VDPU_REG_ADDR_DST 0x0fc
+#define VDPU_REG_ADDR_STR 0x100
+#define VDPU_REG_REFBUF_RELATED 0x104
+#define VDPU_REG_FWD_PIC(i) (0x128 + ((i) * 0x4))
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F5(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F4(x) (((x) & 0x1f) << 20)
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F3(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F2(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F1(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_FWD_PIC_PINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_REF_PIC(i) (0x130 + ((i) * 0x4))
+#define VDPU_REG_REF_PIC_REFER1_NBR(x) (((x) & 0xffff) << 16)
+#define VDPU_REG_REF_PIC_REFER0_NBR(x) (((x) & 0xffff) << 0)
+#define VDPU_REG_H264_ADDR_REF(i) (0x150 + ((i) * 0x4))
+#define VDPU_REG_ADDR_REF_FIELD_E BIT(1)
+#define VDPU_REG_ADDR_REF_TOPC_E BIT(0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST0 0x190
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F5(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F4(x) (((x) & 0x1f) << 20)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F3(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F2(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F1(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST1 0x194
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F11(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F10(x) (((x) & 0x1f) << 20)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F9(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F8(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F7(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F6(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST2 0x198
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F15(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F14(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F13(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_F12(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST3 0x19c
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B5(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B4(x) (((x) & 0x1f) << 20)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B3(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B2(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B1(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B0(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST4 0x1a0
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B11(x) (((x) & 0x1f) << 25)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B10(x) (((x) & 0x1f) << 20)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B9(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B8(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B7(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B6(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST5 0x1a4
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B15(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B14(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B13(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_REF_PIC_BINIT_RLIST_B12(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_INITIAL_REF_PIC_LIST6 0x1a8
+#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F3(x) (((x) & 0x1f) << 15)
+#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F2(x) (((x) & 0x1f) << 10)
+#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F1(x) (((x) & 0x1f) << 5)
+#define VDPU_REG_BD_P_REF_PIC_PINIT_RLIST_F0(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_LT_REF 0x1ac
+#define VDPU_REG_VALID_REF 0x1b0
+#define VDPU_REG_H264_PIC_MB_SIZE 0x1b8
+#define VDPU_REG_DEC_CTRL2_CH_QP_OFFSET2(x) (((x) & 0x1f) << 22)
+#define VDPU_REG_DEC_CTRL2_CH_QP_OFFSET(x) (((x) & 0x1f) << 17)
+#define VDPU_REG_DEC_CTRL1_PIC_MB_HEIGHT_P(x) (((x) & 0xff) << 9)
+#define VDPU_REG_DEC_CTRL1_PIC_MB_WIDTH(x) (((x) & 0x1ff) << 0)
+#define VDPU_REG_H264_CTRL 0x1bc
+#define VDPU_REG_DEC_CTRL4_WEIGHT_BIPR_IDC(x) (((x) & 0x3) << 16)
+#define VDPU_REG_DEC_CTRL1_REF_FRAMES(x) (((x) & 0x1f) << 0)
+#define VDPU_REG_CURRENT_FRAME 0x1c0
+#define VDPU_REG_DEC_CTRL5_FILT_CTRL_PRES BIT(31)
+#define VDPU_REG_DEC_CTRL5_RDPIC_CNT_PRES BIT(30)
+#define VDPU_REG_DEC_CTRL4_FRAMENUM_LEN(x) (((x) & 0x1f) << 16)
+#define VDPU_REG_DEC_CTRL4_FRAMENUM(x) (((x) & 0xffff) << 0)
+#define VDPU_REG_REF_FRAME 0x1c4
+#define VDPU_REG_DEC_CTRL5_REFPIC_MK_LEN(x) (((x) & 0x7ff) << 16)
+#define VDPU_REG_DEC_CTRL5_IDR_PIC_ID(x) (((x) & 0xffff) << 0)
+#define VDPU_REG_DEC_CTRL6 0x1c8
+#define VDPU_REG_DEC_CTRL6_PPS_ID(x) (((x) & 0xff) << 24)
+#define VDPU_REG_DEC_CTRL6_REFIDX1_ACTIVE(x) (((x) & 0x1f) << 19)
+#define VDPU_REG_DEC_CTRL6_REFIDX0_ACTIVE(x) (((x) & 0x1f) << 14)
+#define VDPU_REG_DEC_CTRL6_POC_LENGTH(x) (((x) & 0xff) << 0)
+#define VDPU_REG_ENABLE_FLAG 0x1cc
+#define VDPU_REG_DEC_CTRL5_IDR_PIC_E BIT(8)
+#define VDPU_REG_DEC_CTRL4_DIR_8X8_INFER_E BIT(7)
+#define VDPU_REG_DEC_CTRL4_BLACKWHITE_E BIT(6)
+#define VDPU_REG_DEC_CTRL4_CABAC_E BIT(5)
+#define VDPU_REG_DEC_CTRL4_WEIGHT_PRED_E BIT(4)
+#define VDPU_REG_DEC_CTRL5_CONST_INTRA_E BIT(3)
+#define VDPU_REG_DEC_CTRL5_8X8TRANS_FLAG_E BIT(2)
+#define VDPU_REG_DEC_CTRL2_TYPE1_QUANT_E BIT(1)
+#define VDPU_REG_DEC_CTRL2_FIELDPIC_FLAG_E BIT(0)
+#define VDPU_REG_VP8_PIC_MB_SIZE 0x1e0
+#define VDPU_REG_DEC_PIC_MB_WIDTH(x) (((x) & 0x1ff) << 23)
+#define VDPU_REG_DEC_MB_WIDTH_OFF(x) (((x) & 0xf) << 19)
+#define VDPU_REG_DEC_PIC_MB_HEIGHT_P(x) (((x) & 0xff) << 11)
+#define VDPU_REG_DEC_MB_HEIGHT_OFF(x) (((x) & 0xf) << 7)
+#define VDPU_REG_DEC_CTRL1_PIC_MB_W_EXT(x) (((x) & 0x7) << 3)
+#define VDPU_REG_DEC_CTRL1_PIC_MB_H_EXT(x) (((x) & 0x7) << 0)
+#define VDPU_REG_VP8_DCT_START_BIT 0x1e4
+#define VDPU_REG_DEC_CTRL4_DCT1_START_BIT(x) (((x) & 0x3f) << 26)
+#define VDPU_REG_DEC_CTRL4_DCT2_START_BIT(x) (((x) & 0x3f) << 20)
+#define VDPU_REG_DEC_CTRL4_VC1_HEIGHT_EXT BIT(13)
+#define VDPU_REG_DEC_CTRL4_BILIN_MC_E BIT(12)
+#define VDPU_REG_VP8_CTRL0 0x1e8
+#define VDPU_REG_DEC_CTRL2_STRM_START_BIT(x) (((x) & 0x3f) << 26)
+#define VDPU_REG_DEC_CTRL2_STRM1_START_BIT(x) (((x) & 0x3f) << 18)
+#define VDPU_REG_DEC_CTRL2_BOOLEAN_VALUE(x) (((x) & 0xff) << 8)
+#define VDPU_REG_DEC_CTRL2_BOOLEAN_RANGE(x) (((x) & 0xff) << 0)
+#define VDPU_REG_VP8_DATA_VAL 0x1f0
+#define VDPU_REG_DEC_CTRL6_COEFFS_PART_AM(x) (((x) & 0xf) << 24)
+#define VDPU_REG_DEC_CTRL6_STREAM1_LEN(x) (((x) & 0xffffff) << 0)
+#define VDPU_REG_PRED_FLT7 0x1f4
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_5_1(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_5_2(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_5_3(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT8 0x1f8
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_6_0(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_6_1(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_6_2(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT9 0x1fc
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_6_3(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_7_0(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_7_1(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT10 0x200
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_7_2(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_7_3(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_2_M1(x) (((x) & 0x3) << 10)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_2_4(x) (((x) & 0x3) << 8)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_4_M1(x) (((x) & 0x3) << 6)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_4_4(x) (((x) & 0x3) << 4)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_6_M1(x) (((x) & 0x3) << 2)
+#define VDPU_REG_BD_REF_PIC_PRED_TAP_6_4(x) (((x) & 0x3) << 0)
+#define VDPU_REG_FILTER_LEVEL 0x204
+#define VDPU_REG_REF_PIC_LF_LEVEL_0(x) (((x) & 0x3f) << 18)
+#define VDPU_REG_REF_PIC_LF_LEVEL_1(x) (((x) & 0x3f) << 12)
+#define VDPU_REG_REF_PIC_LF_LEVEL_2(x) (((x) & 0x3f) << 6)
+#define VDPU_REG_REF_PIC_LF_LEVEL_3(x) (((x) & 0x3f) << 0)
+#define VDPU_REG_VP8_QUANTER0 0x208
+#define VDPU_REG_REF_PIC_QUANT_DELTA_0(x) (((x) & 0x1f) << 27)
+#define VDPU_REG_REF_PIC_QUANT_DELTA_1(x) (((x) & 0x1f) << 22)
+#define VDPU_REG_REF_PIC_QUANT_0(x) (((x) & 0x7ff) << 11)
+#define VDPU_REG_REF_PIC_QUANT_1(x) (((x) & 0x7ff) << 0)
+#define VDPU_REG_VP8_ADDR_REF0 0x20c
+#define VDPU_REG_FILTER_MB_ADJ 0x210
+#define VDPU_REG_REF_PIC_FILT_TYPE_E BIT(31)
+#define VDPU_REG_REF_PIC_FILT_SHARPNESS(x) (((x) & 0x7) << 28)
+#define VDPU_REG_FILT_MB_ADJ_0(x) (((x) & 0x7f) << 21)
+#define VDPU_REG_FILT_MB_ADJ_1(x) (((x) & 0x7f) << 14)
+#define VDPU_REG_FILT_MB_ADJ_2(x) (((x) & 0x7f) << 7)
+#define VDPU_REG_FILT_MB_ADJ_3(x) (((x) & 0x7f) << 0)
+#define VDPU_REG_FILTER_REF_ADJ 0x214
+#define VDPU_REG_REF_PIC_ADJ_0(x) (((x) & 0x7f) << 21)
+#define VDPU_REG_REF_PIC_ADJ_1(x) (((x) & 0x7f) << 14)
+#define VDPU_REG_REF_PIC_ADJ_2(x) (((x) & 0x7f) << 7)
+#define VDPU_REG_REF_PIC_ADJ_3(x) (((x) & 0x7f) << 0)
+#define VDPU_REG_VP8_ADDR_REF2_5(i) (0x218 + ((i) * 0x4))
+#define VDPU_REG_VP8_GREF_SIGN_BIAS BIT(0)
+#define VDPU_REG_VP8_AREF_SIGN_BIAS BIT(0)
+#define VDPU_REG_VP8_DCT_BASE(i) (0x230 + ((i) * 0x4))
+#define VDPU_REG_VP8_ADDR_CTRL_PART 0x244
+#define VDPU_REG_VP8_ADDR_REF1 0x250
+#define VDPU_REG_VP8_SEGMENT_VAL 0x254
+#define VDPU_REG_FWD_PIC1_SEGMENT_BASE(x) ((x) << 0)
+#define VDPU_REG_FWD_PIC1_SEGMENT_UPD_E BIT(1)
+#define VDPU_REG_FWD_PIC1_SEGMENT_E BIT(0)
+#define VDPU_REG_VP8_DCT_START_BIT2 0x258
+#define VDPU_REG_DEC_CTRL7_DCT3_START_BIT(x) (((x) & 0x3f) << 24)
+#define VDPU_REG_DEC_CTRL7_DCT4_START_BIT(x) (((x) & 0x3f) << 18)
+#define VDPU_REG_DEC_CTRL7_DCT5_START_BIT(x) (((x) & 0x3f) << 12)
+#define VDPU_REG_DEC_CTRL7_DCT6_START_BIT(x) (((x) & 0x3f) << 6)
+#define VDPU_REG_DEC_CTRL7_DCT7_START_BIT(x) (((x) & 0x3f) << 0)
+#define VDPU_REG_VP8_QUANTER1 0x25c
+#define VDPU_REG_REF_PIC_QUANT_DELTA_2(x) (((x) & 0x1f) << 27)
+#define VDPU_REG_REF_PIC_QUANT_DELTA_3(x) (((x) & 0x1f) << 22)
+#define VDPU_REG_REF_PIC_QUANT_2(x) (((x) & 0x7ff) << 11)
+#define VDPU_REG_REF_PIC_QUANT_3(x) (((x) & 0x7ff) << 0)
+#define VDPU_REG_VP8_QUANTER2 0x260
+#define VDPU_REG_REF_PIC_QUANT_DELTA_4(x) (((x) & 0x1f) << 27)
+#define VDPU_REG_REF_PIC_QUANT_4(x) (((x) & 0x7ff) << 11)
+#define VDPU_REG_REF_PIC_QUANT_5(x) (((x) & 0x7ff) << 0)
+#define VDPU_REG_PRED_FLT1 0x264
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_0_3(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_1_0(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_1_1(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT2 0x268
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_1_2(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_1_3(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_2_0(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT3 0x26c
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_2_1(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_2_2(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_2_3(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT4 0x270
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_3_0(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_3_1(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_3_2(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT5 0x274
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_3_3(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_4_0(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_4_1(x) (((x) & 0x3ff) << 2)
+#define VDPU_REG_PRED_FLT6 0x278
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_4_2(x) (((x) & 0x3ff) << 22)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_4_3(x) (((x) & 0x3ff) << 12)
+#define VDPU_REG_PRED_FLT_PRED_BC_TAP_5_0(x) (((x) & 0x3ff) << 2)
+
+#endif /* ROCKCHIP_VPU2_REGS_H_ */
diff --git a/drivers/media/platform/verisilicon/rockchip_vpu_hw.c b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
new file mode 100644
index 000000000000..8de6fd2e8eef
--- /dev/null
+++ b/drivers/media/platform/verisilicon/rockchip_vpu_hw.c
@@ -0,0 +1,680 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VPU codec driver
+ *
+ * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
+ * Jeffy Chen <jeffy.chen@rock-chips.com>
+ */
+
+#include <linux/clk.h>
+
+#include "hantro.h"
+#include "hantro_jpeg.h"
+#include "hantro_g1_regs.h"
+#include "hantro_h1_regs.h"
+#include "rockchip_vpu2_regs.h"
+
+#define RK3066_ACLK_MAX_FREQ (300 * 1000 * 1000)
+#define RK3288_ACLK_MAX_FREQ (400 * 1000 * 1000)
+
+/*
+ * Supported formats.
+ */
+
+static const struct hantro_fmt rockchip_vpu_enc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUV420M,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = ROCKCHIP_VPU_ENC_FMT_YUV420P,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_NV12M,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = ROCKCHIP_VPU_ENC_FMT_YUV420SP,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = ROCKCHIP_VPU_ENC_FMT_YUYV422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .codec_mode = HANTRO_MODE_NONE,
+ .enc_fmt = ROCKCHIP_VPU_ENC_FMT_UYVY422,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_JPEG,
+ .codec_mode = HANTRO_MODE_JPEG_ENC,
+ .max_depth = 2,
+ .header_size = JPEG_HEADER_SIZE,
+ .frmsize = {
+ .min_width = 96,
+ .max_width = 8192,
+ .step_width = MB_DIM,
+ .min_height = 32,
+ .max_height = 8192,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt rockchip_vpu1_postproc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codec_mode = HANTRO_MODE_NONE,
+ .postprocessed = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt rk3066_vpu_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .codec_mode = HANTRO_MODE_H264_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt rk3288_vpu_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_4K_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_4K_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .codec_mode = HANTRO_MODE_H264_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_4K_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_4K_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt rockchip_vdpu2_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .codec_mode = HANTRO_MODE_H264_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt rk3399_vpu_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_FHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_FHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static irqreturn_t rockchip_vpu1_vepu_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vepu_read(vpu, H1_REG_INTERRUPT);
+ state = (status & H1_REG_INTERRUPT_FRAME_RDY) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vepu_write(vpu, 0, H1_REG_INTERRUPT);
+ vepu_write(vpu, 0, H1_REG_AXI_CTRL);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_vpu2_vdpu_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vdpu_read(vpu, VDPU_REG_INTERRUPT);
+ state = (status & VDPU_REG_INTERRUPT_DEC_IRQ) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vdpu_write(vpu, 0, VDPU_REG_INTERRUPT);
+ vdpu_write(vpu, 0, VDPU_REG_AXI_CTRL);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rockchip_vpu2_vepu_irq(int irq, void *dev_id)
+{
+ struct hantro_dev *vpu = dev_id;
+ enum vb2_buffer_state state;
+ u32 status;
+
+ status = vepu_read(vpu, VEPU_REG_INTERRUPT);
+ state = (status & VEPU_REG_INTERRUPT_FRAME_READY) ?
+ VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR;
+
+ vepu_write(vpu, 0, VEPU_REG_INTERRUPT);
+ vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
+
+ hantro_irq_done(vpu, state);
+
+ return IRQ_HANDLED;
+}
+
+static int rk3036_vpu_hw_init(struct hantro_dev *vpu)
+{
+ /* Bump ACLK to max. possible freq. to improve performance. */
+ clk_set_rate(vpu->clocks[0].clk, RK3066_ACLK_MAX_FREQ);
+ return 0;
+}
+
+static int rk3066_vpu_hw_init(struct hantro_dev *vpu)
+{
+ /* Bump ACLKs to max. possible freq. to improve performance. */
+ clk_set_rate(vpu->clocks[0].clk, RK3066_ACLK_MAX_FREQ);
+ clk_set_rate(vpu->clocks[2].clk, RK3066_ACLK_MAX_FREQ);
+ return 0;
+}
+
+static int rockchip_vpu_hw_init(struct hantro_dev *vpu)
+{
+ /* Bump ACLK to max. possible freq. to improve performance. */
+ clk_set_rate(vpu->clocks[0].clk, RK3288_ACLK_MAX_FREQ);
+ return 0;
+}
+
+static void rk3066_vpu_dec_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vdpu_write(vpu, G1_REG_INTERRUPT_DEC_IRQ_DIS, G1_REG_INTERRUPT);
+ vdpu_write(vpu, G1_REG_CONFIG_DEC_CLK_GATE_E, G1_REG_CONFIG);
+}
+
+static void rockchip_vpu1_enc_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vepu_write(vpu, H1_REG_INTERRUPT_DIS_BIT, H1_REG_INTERRUPT);
+ vepu_write(vpu, 0, H1_REG_ENC_CTRL);
+ vepu_write(vpu, 0, H1_REG_AXI_CTRL);
+}
+
+static void rockchip_vpu2_dec_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vdpu_write(vpu, VDPU_REG_INTERRUPT_DEC_IRQ_DIS, VDPU_REG_INTERRUPT);
+ vdpu_write(vpu, 0, VDPU_REG_EN_FLAGS);
+ vdpu_write(vpu, 1, VDPU_REG_SOFT_RESET);
+}
+
+static void rockchip_vpu2_enc_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ vepu_write(vpu, VEPU_REG_INTERRUPT_DIS_BIT, VEPU_REG_INTERRUPT);
+ vepu_write(vpu, 0, VEPU_REG_ENCODE_START);
+ vepu_write(vpu, 0, VEPU_REG_AXI_CTRL);
+}
+
+/*
+ * Supported codec ops.
+ */
+static const struct hantro_codec_ops rk3036_vpu_codec_ops[] = {
+ [HANTRO_MODE_H264_DEC] = {
+ .run = hantro_g1_h264_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = hantro_g1_mpeg2_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = hantro_g1_vp8_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+};
+
+static const struct hantro_codec_ops rk3066_vpu_codec_ops[] = {
+ [HANTRO_MODE_JPEG_ENC] = {
+ .run = hantro_h1_jpeg_enc_run,
+ .reset = rockchip_vpu1_enc_reset,
+ .done = hantro_h1_jpeg_enc_done,
+ },
+ [HANTRO_MODE_H264_DEC] = {
+ .run = hantro_g1_h264_dec_run,
+ .reset = rk3066_vpu_dec_reset,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = hantro_g1_mpeg2_dec_run,
+ .reset = rk3066_vpu_dec_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = hantro_g1_vp8_dec_run,
+ .reset = rk3066_vpu_dec_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+};
+
+static const struct hantro_codec_ops rk3288_vpu_codec_ops[] = {
+ [HANTRO_MODE_JPEG_ENC] = {
+ .run = hantro_h1_jpeg_enc_run,
+ .reset = rockchip_vpu1_enc_reset,
+ .done = hantro_h1_jpeg_enc_done,
+ },
+ [HANTRO_MODE_H264_DEC] = {
+ .run = hantro_g1_h264_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = hantro_g1_mpeg2_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = hantro_g1_vp8_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+};
+
+static const struct hantro_codec_ops rk3399_vpu_codec_ops[] = {
+ [HANTRO_MODE_JPEG_ENC] = {
+ .run = rockchip_vpu2_jpeg_enc_run,
+ .reset = rockchip_vpu2_enc_reset,
+ .done = rockchip_vpu2_jpeg_enc_done,
+ },
+ [HANTRO_MODE_H264_DEC] = {
+ .run = rockchip_vpu2_h264_dec_run,
+ .reset = rockchip_vpu2_dec_reset,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = rockchip_vpu2_mpeg2_dec_run,
+ .reset = rockchip_vpu2_dec_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = rockchip_vpu2_vp8_dec_run,
+ .reset = rockchip_vpu2_dec_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+};
+
+static const struct hantro_codec_ops rk3568_vepu_codec_ops[] = {
+ [HANTRO_MODE_JPEG_ENC] = {
+ .run = rockchip_vpu2_jpeg_enc_run,
+ .reset = rockchip_vpu2_enc_reset,
+ .done = rockchip_vpu2_jpeg_enc_done,
+ },
+};
+
+/*
+ * VPU variant.
+ */
+
+static const struct hantro_irq rockchip_vdpu1_irqs[] = {
+ { "vdpu", hantro_g1_irq },
+};
+
+static const struct hantro_irq rockchip_vpu1_irqs[] = {
+ { "vepu", rockchip_vpu1_vepu_irq },
+ { "vdpu", hantro_g1_irq },
+};
+
+static const struct hantro_irq rockchip_vdpu2_irqs[] = {
+ { "vdpu", rockchip_vpu2_vdpu_irq },
+};
+
+static const struct hantro_irq rockchip_vpu2_irqs[] = {
+ { "vepu", rockchip_vpu2_vepu_irq },
+ { "vdpu", rockchip_vpu2_vdpu_irq },
+};
+
+static const struct hantro_irq rk3568_vepu_irqs[] = {
+ { "vepu", rockchip_vpu2_vepu_irq },
+};
+
+static const char * const rk3066_vpu_clk_names[] = {
+ "aclk_vdpu", "hclk_vdpu",
+ "aclk_vepu", "hclk_vepu"
+};
+
+static const char * const rockchip_vpu_clk_names[] = {
+ "aclk", "hclk"
+};
+
+/* VDPU1/VEPU1 */
+
+const struct hantro_variant rk3036_vpu_variant = {
+ .dec_offset = 0x400,
+ .dec_fmts = rk3066_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rk3066_vpu_dec_fmts),
+ .postproc_fmts = rockchip_vpu1_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(rockchip_vpu1_postproc_fmts),
+ .postproc_ops = &hantro_g1_postproc_ops,
+ .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
+ HANTRO_H264_DECODER,
+ .codec_ops = rk3036_vpu_codec_ops,
+ .irqs = rockchip_vdpu1_irqs,
+ .num_irqs = ARRAY_SIZE(rockchip_vdpu1_irqs),
+ .init = rk3036_vpu_hw_init,
+ .clk_names = rockchip_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
+};
+
+/*
+ * Despite this variant has separate clocks for decoder and encoder,
+ * it's still required to enable all four of them for either decoding
+ * or encoding and we can't split it in separate g1/h1 variants.
+ */
+const struct hantro_variant rk3066_vpu_variant = {
+ .enc_offset = 0x0,
+ .enc_fmts = rockchip_vpu_enc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts),
+ .dec_offset = 0x400,
+ .dec_fmts = rk3066_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rk3066_vpu_dec_fmts),
+ .postproc_fmts = rockchip_vpu1_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(rockchip_vpu1_postproc_fmts),
+ .postproc_ops = &hantro_g1_postproc_ops,
+ .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
+ HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
+ .codec_ops = rk3066_vpu_codec_ops,
+ .irqs = rockchip_vpu1_irqs,
+ .num_irqs = ARRAY_SIZE(rockchip_vpu1_irqs),
+ .init = rk3066_vpu_hw_init,
+ .clk_names = rk3066_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rk3066_vpu_clk_names)
+};
+
+const struct hantro_variant rk3288_vpu_variant = {
+ .enc_offset = 0x0,
+ .enc_fmts = rockchip_vpu_enc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts),
+ .dec_offset = 0x400,
+ .dec_fmts = rk3288_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rk3288_vpu_dec_fmts),
+ .postproc_fmts = rockchip_vpu1_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(rockchip_vpu1_postproc_fmts),
+ .postproc_ops = &hantro_g1_postproc_ops,
+ .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
+ HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
+ .codec_ops = rk3288_vpu_codec_ops,
+ .irqs = rockchip_vpu1_irqs,
+ .num_irqs = ARRAY_SIZE(rockchip_vpu1_irqs),
+ .init = rockchip_vpu_hw_init,
+ .clk_names = rockchip_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
+};
+
+/* VDPU2/VEPU2 */
+
+const struct hantro_variant rk3328_vpu_variant = {
+ .dec_offset = 0x400,
+ .dec_fmts = rockchip_vdpu2_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rockchip_vdpu2_dec_fmts),
+ .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
+ HANTRO_H264_DECODER,
+ .codec_ops = rk3399_vpu_codec_ops,
+ .irqs = rockchip_vdpu2_irqs,
+ .num_irqs = ARRAY_SIZE(rockchip_vdpu2_irqs),
+ .init = rockchip_vpu_hw_init,
+ .clk_names = rockchip_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names),
+};
+
+/*
+ * H.264 decoding explicitly disabled in RK3399.
+ * This ensures userspace applications use the Rockchip VDEC core,
+ * which has better performance.
+ */
+const struct hantro_variant rk3399_vpu_variant = {
+ .enc_offset = 0x0,
+ .enc_fmts = rockchip_vpu_enc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts),
+ .dec_offset = 0x400,
+ .dec_fmts = rk3399_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rk3399_vpu_dec_fmts),
+ .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
+ HANTRO_VP8_DECODER,
+ .codec_ops = rk3399_vpu_codec_ops,
+ .irqs = rockchip_vpu2_irqs,
+ .num_irqs = ARRAY_SIZE(rockchip_vpu2_irqs),
+ .init = rockchip_vpu_hw_init,
+ .clk_names = rockchip_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
+};
+
+const struct hantro_variant rk3568_vepu_variant = {
+ .enc_offset = 0x0,
+ .enc_fmts = rockchip_vpu_enc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts),
+ .codec = HANTRO_JPEG_ENCODER,
+ .codec_ops = rk3568_vepu_codec_ops,
+ .irqs = rk3568_vepu_irqs,
+ .num_irqs = ARRAY_SIZE(rk3568_vepu_irqs),
+ .init = rockchip_vpu_hw_init,
+ .clk_names = rockchip_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
+};
+
+const struct hantro_variant rk3568_vpu_variant = {
+ .dec_offset = 0x400,
+ .dec_fmts = rockchip_vdpu2_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rockchip_vdpu2_dec_fmts),
+ .codec = HANTRO_MPEG2_DECODER |
+ HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
+ .codec_ops = rk3399_vpu_codec_ops,
+ .irqs = rockchip_vdpu2_irqs,
+ .num_irqs = ARRAY_SIZE(rockchip_vdpu2_irqs),
+ .init = rockchip_vpu_hw_init,
+ .clk_names = rockchip_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
+};
+
+const struct hantro_variant px30_vpu_variant = {
+ .enc_offset = 0x0,
+ .enc_fmts = rockchip_vpu_enc_fmts,
+ .num_enc_fmts = ARRAY_SIZE(rockchip_vpu_enc_fmts),
+ .dec_offset = 0x400,
+ .dec_fmts = rockchip_vdpu2_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(rockchip_vdpu2_dec_fmts),
+ .codec = HANTRO_JPEG_ENCODER | HANTRO_MPEG2_DECODER |
+ HANTRO_VP8_DECODER | HANTRO_H264_DECODER,
+ .codec_ops = rk3399_vpu_codec_ops,
+ .irqs = rockchip_vpu2_irqs,
+ .num_irqs = ARRAY_SIZE(rockchip_vpu2_irqs),
+ .init = rk3036_vpu_hw_init,
+ .clk_names = rockchip_vpu_clk_names,
+ .num_clocks = ARRAY_SIZE(rockchip_vpu_clk_names)
+};
diff --git a/drivers/media/platform/verisilicon/sama5d4_vdec_hw.c b/drivers/media/platform/verisilicon/sama5d4_vdec_hw.c
new file mode 100644
index 000000000000..b205e2db5b04
--- /dev/null
+++ b/drivers/media/platform/verisilicon/sama5d4_vdec_hw.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Hantro VDEC driver
+ *
+ * Copyright (C) 2021 Collabora Ltd, Emil Velikov <emil.velikov@collabora.com>
+ */
+
+#include "hantro.h"
+
+/*
+ * Supported formats.
+ */
+
+static const struct hantro_fmt sama5d4_vdec_postproc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .codec_mode = HANTRO_MODE_NONE,
+ .postprocessed = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_HD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_HD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+static const struct hantro_fmt sama5d4_vdec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_HD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_HD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MPEG2_SLICE,
+ .codec_mode = HANTRO_MODE_MPEG2_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_HD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_HD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8_FRAME,
+ .codec_mode = HANTRO_MODE_VP8_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_HD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_HD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .codec_mode = HANTRO_MODE_H264_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_HD_WIDTH,
+ .step_width = MB_DIM,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_HD_HEIGHT,
+ .step_height = MB_DIM,
+ },
+ },
+};
+
+/*
+ * Supported codec ops.
+ */
+
+static const struct hantro_codec_ops sama5d4_vdec_codec_ops[] = {
+ [HANTRO_MODE_MPEG2_DEC] = {
+ .run = hantro_g1_mpeg2_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_mpeg2_dec_init,
+ .exit = hantro_mpeg2_dec_exit,
+ },
+ [HANTRO_MODE_VP8_DEC] = {
+ .run = hantro_g1_vp8_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_vp8_dec_init,
+ .exit = hantro_vp8_dec_exit,
+ },
+ [HANTRO_MODE_H264_DEC] = {
+ .run = hantro_g1_h264_dec_run,
+ .reset = hantro_g1_reset,
+ .init = hantro_h264_dec_init,
+ .exit = hantro_h264_dec_exit,
+ },
+};
+
+static const struct hantro_irq sama5d4_irqs[] = {
+ { "vdec", hantro_g1_irq },
+};
+
+static const char * const sama5d4_clk_names[] = { "vdec_clk" };
+
+const struct hantro_variant sama5d4_vdec_variant = {
+ .dec_fmts = sama5d4_vdec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(sama5d4_vdec_fmts),
+ .postproc_fmts = sama5d4_vdec_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(sama5d4_vdec_postproc_fmts),
+ .postproc_ops = &hantro_g1_postproc_ops,
+ .codec = HANTRO_MPEG2_DECODER | HANTRO_VP8_DECODER |
+ HANTRO_H264_DECODER,
+ .codec_ops = sama5d4_vdec_codec_ops,
+ .irqs = sama5d4_irqs,
+ .num_irqs = ARRAY_SIZE(sama5d4_irqs),
+ .clk_names = sama5d4_clk_names,
+ .num_clocks = ARRAY_SIZE(sama5d4_clk_names),
+};
diff --git a/drivers/media/platform/verisilicon/sunxi_vpu_hw.c b/drivers/media/platform/verisilicon/sunxi_vpu_hw.c
new file mode 100644
index 000000000000..02ce8b064a8f
--- /dev/null
+++ b/drivers/media/platform/verisilicon/sunxi_vpu_hw.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Allwinner Hantro G2 VPU codec driver
+ *
+ * Copyright (C) 2021 Jernej Skrabec <jernej.skrabec@gmail.com>
+ */
+
+#include <linux/clk.h>
+
+#include "hantro.h"
+
+static const struct hantro_fmt sunxi_vpu_postproc_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .codec_mode = HANTRO_MODE_NONE,
+ .postprocessed = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = 32,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = 32,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_P010,
+ .codec_mode = HANTRO_MODE_NONE,
+ .postprocessed = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = 32,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = 32,
+ },
+ },
+};
+
+static const struct hantro_fmt sunxi_vpu_dec_fmts[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV12_4L4,
+ .codec_mode = HANTRO_MODE_NONE,
+ .match_depth = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = 32,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = 32,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_P010_4L4,
+ .codec_mode = HANTRO_MODE_NONE,
+ .match_depth = true,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = 32,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = 32,
+ },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP9_FRAME,
+ .codec_mode = HANTRO_MODE_VP9_DEC,
+ .max_depth = 2,
+ .frmsize = {
+ .min_width = FMT_MIN_WIDTH,
+ .max_width = FMT_UHD_WIDTH,
+ .step_width = 32,
+ .min_height = FMT_MIN_HEIGHT,
+ .max_height = FMT_UHD_HEIGHT,
+ .step_height = 32,
+ },
+ },
+};
+
+static int sunxi_vpu_hw_init(struct hantro_dev *vpu)
+{
+ clk_set_rate(vpu->clocks[0].clk, 300000000);
+
+ return 0;
+}
+
+static void sunxi_vpu_reset(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+
+ reset_control_reset(vpu->resets);
+}
+
+static const struct hantro_codec_ops sunxi_vpu_codec_ops[] = {
+ [HANTRO_MODE_VP9_DEC] = {
+ .run = hantro_g2_vp9_dec_run,
+ .done = hantro_g2_vp9_dec_done,
+ .reset = sunxi_vpu_reset,
+ .init = hantro_vp9_dec_init,
+ .exit = hantro_vp9_dec_exit,
+ },
+};
+
+static const struct hantro_irq sunxi_irqs[] = {
+ { NULL, hantro_g2_irq },
+};
+
+static const char * const sunxi_clk_names[] = { "mod", "bus" };
+
+const struct hantro_variant sunxi_vpu_variant = {
+ .dec_fmts = sunxi_vpu_dec_fmts,
+ .num_dec_fmts = ARRAY_SIZE(sunxi_vpu_dec_fmts),
+ .postproc_fmts = sunxi_vpu_postproc_fmts,
+ .num_postproc_fmts = ARRAY_SIZE(sunxi_vpu_postproc_fmts),
+ .postproc_ops = &hantro_g2_postproc_ops,
+ .codec = HANTRO_VP9_DECODER,
+ .codec_ops = sunxi_vpu_codec_ops,
+ .init = sunxi_vpu_hw_init,
+ .irqs = sunxi_irqs,
+ .num_irqs = ARRAY_SIZE(sunxi_irqs),
+ .clk_names = sunxi_clk_names,
+ .num_clocks = ARRAY_SIZE(sunxi_clk_names),
+ .double_buffer = 1,
+ .legacy_regs = 1,
+ .late_postproc = 1,
+};
diff --git a/drivers/media/platform/xilinx/xilinx-csi2rxss.c b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
index cf8e892c47f0..29b53febc2e7 100644
--- a/drivers/media/platform/xilinx/xilinx-csi2rxss.c
+++ b/drivers/media/platform/xilinx/xilinx-csi2rxss.c
@@ -188,6 +188,7 @@ static const u32 xcsi2dt_mbus_lut[][2] = {
{ MIPI_CSI2_DT_RAW12, MEDIA_BUS_FMT_SBGGR12_1X12 },
{ MIPI_CSI2_DT_RAW12, MEDIA_BUS_FMT_SGBRG12_1X12 },
{ MIPI_CSI2_DT_RAW12, MEDIA_BUS_FMT_SGRBG12_1X12 },
+ { MIPI_CSI2_DT_RAW12, MEDIA_BUS_FMT_Y12_1X12 },
{ MIPI_CSI2_DT_RAW16, MEDIA_BUS_FMT_SRGGB16_1X16 },
{ MIPI_CSI2_DT_RAW16, MEDIA_BUS_FMT_SBGGR16_1X16 },
{ MIPI_CSI2_DT_RAW16, MEDIA_BUS_FMT_SGBRG16_1X16 },
diff --git a/drivers/media/platform/xilinx/xilinx-vip.c b/drivers/media/platform/xilinx/xilinx-vip.c
index a0073122798f..5b214bf7f93a 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.c
+++ b/drivers/media/platform/xilinx/xilinx-vip.c
@@ -40,6 +40,8 @@ static const struct xvip_video_format xvip_video_formats[] = {
1, V4L2_PIX_FMT_SGBRG8 },
{ XVIP_VF_MONO_SENSOR, 8, "bggr", MEDIA_BUS_FMT_SBGGR8_1X8,
1, V4L2_PIX_FMT_SBGGR8 },
+ { XVIP_VF_MONO_SENSOR, 12, "mono", MEDIA_BUS_FMT_Y12_1X12,
+ 2, V4L2_PIX_FMT_Y12 },
};
/**
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index f34f8b077e03..0a16c218a50a 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -471,7 +471,7 @@ static int xvip_graph_dma_init(struct xvip_composite_device *xdev)
{
struct device_node *ports;
struct device_node *port;
- int ret;
+ int ret = 0;
ports = of_get_child_by_name(xdev->dev->of_node, "ports");
if (ports == NULL) {
@@ -481,13 +481,14 @@ static int xvip_graph_dma_init(struct xvip_composite_device *xdev)
for_each_child_of_node(ports, port) {
ret = xvip_graph_dma_init_one(xdev, port);
- if (ret < 0) {
+ if (ret) {
of_node_put(port);
- return ret;
+ break;
}
}
- return 0;
+ of_node_put(ports);
+ return ret;
}
static void xvip_graph_cleanup(struct xvip_composite_device *xdev)