mirror of
https://git.openwrt.org/openwrt/openwrt.git
synced 2024-11-25 06:26:15 +00:00
51bbc8114b
1. Update target/linux/generic/config-6.6 for new ksym 2. Refresh patches Changelog: https://cdn.kernel.org/pub/linux/kernel/v6.x/ChangeLog-6.6.57 Added: generic/backport-6.6/777-netfilter-xtables-fix-typo-causing-some-targets-to-not-load-on-IPv6.patch[1] Manually rebased: generic/hack-6.6/645-netfilter-connmark-introduce-set-dscpmark.patch Removed upstreamed: gemini/patches-6.6/0001-net-ethernet-cortina-Drop-TSO-support.patch[2] gemini/patches-6.6/0004-net-ethernet-cortina-Restore-TSO-support.patch[3] All other patches automatically rebased. 1. https://lore.kernel.org/all/20241019-xtables-typos-v2-1-6b8b1735dc8e@0upti.me/ 2 https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v6.6.57&id=452c0740d72c6a77a41f6ddc318a48f18c3d2346 3. https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=v6.6.57&id=611f74b0e7fb93ee2366d9d7edca546806b220e9 Build system: x86/64 Build-tested: x86/64/AMD Cezanne, flogic/xiaomi_redmi-router-ax6000-ubootmod, ramips/tplink_archer-a6-v3 Run-tested: x86/64/AMD Cezanne, flogic/xiaomi_redmi-router-ax6000-ubootmod, ramips/tplink_archer-a6-v3 Signed-off-by: John Audia <therealgraysky@proton.me> Link: https://github.com/openwrt/openwrt/pull/16726 Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
1029 lines
30 KiB
Diff
1029 lines
30 KiB
Diff
From 6546211b121bf4a931c9bcffb879dfed1911b8b1 Mon Sep 17 00:00:00 2001
|
|
From: Maxime Ripard <maxime@cerno.tech>
|
|
Date: Fri, 17 Feb 2023 13:07:36 +0100
|
|
Subject: [PATCH 0585/1085] drm/vc4: Introduce generation number enum
|
|
|
|
With the introduction of the BCM2712 support, we will get yet another
|
|
generation of display engine to support.
|
|
|
|
The binary check of whether it's VC5 or not thus doesn't work anymore,
|
|
especially since some parts of the driver will have changed with BCM2711,
|
|
and some others with BCM2712.
|
|
|
|
Let's introduce an enum to store the generation the driver is running
|
|
on, which should provide more flexibility.
|
|
|
|
Signed-off-by: Maxime Ripard <maxime@cerno.tech>
|
|
---
|
|
drivers/gpu/drm/vc4/tests/vc4_mock.c | 12 +++---
|
|
drivers/gpu/drm/vc4/vc4_bo.c | 28 ++++++------
|
|
drivers/gpu/drm/vc4/vc4_crtc.c | 14 +++---
|
|
drivers/gpu/drm/vc4/vc4_drv.c | 22 ++++++----
|
|
drivers/gpu/drm/vc4/vc4_drv.h | 7 ++-
|
|
drivers/gpu/drm/vc4/vc4_gem.c | 24 +++++------
|
|
drivers/gpu/drm/vc4/vc4_hdmi.c | 2 +-
|
|
drivers/gpu/drm/vc4/vc4_hvs.c | 50 ++++++++++++----------
|
|
drivers/gpu/drm/vc4/vc4_irq.c | 10 ++---
|
|
drivers/gpu/drm/vc4/vc4_kms.c | 14 +++---
|
|
drivers/gpu/drm/vc4/vc4_perfmon.c | 20 ++++-----
|
|
drivers/gpu/drm/vc4/vc4_plane.c | 12 +++---
|
|
drivers/gpu/drm/vc4/vc4_render_cl.c | 2 +-
|
|
drivers/gpu/drm/vc4/vc4_v3d.c | 10 ++---
|
|
drivers/gpu/drm/vc4/vc4_validate.c | 8 ++--
|
|
drivers/gpu/drm/vc4/vc4_validate_shaders.c | 2 +-
|
|
16 files changed, 126 insertions(+), 111 deletions(-)
|
|
|
|
--- a/drivers/gpu/drm/vc4/tests/vc4_mock.c
|
|
+++ b/drivers/gpu/drm/vc4/tests/vc4_mock.c
|
|
@@ -160,11 +160,11 @@ static void kunit_action_drm_dev_unregis
|
|
drm_dev_unregister(drm);
|
|
}
|
|
|
|
-static struct vc4_dev *__mock_device(struct kunit *test, bool is_vc5)
|
|
+static struct vc4_dev *__mock_device(struct kunit *test, enum vc4_gen gen)
|
|
{
|
|
struct drm_device *drm;
|
|
- const struct drm_driver *drv = is_vc5 ? &vc5_drm_driver : &vc4_drm_driver;
|
|
- const struct vc4_mock_desc *desc = is_vc5 ? &vc5_mock : &vc4_mock;
|
|
+ const struct drm_driver *drv = (gen == VC4_GEN_5) ? &vc5_drm_driver : &vc4_drm_driver;
|
|
+ const struct vc4_mock_desc *desc = (gen == VC4_GEN_5) ? &vc5_mock : &vc4_mock;
|
|
struct vc4_dev *vc4;
|
|
struct device *dev;
|
|
int ret;
|
|
@@ -178,7 +178,7 @@ static struct vc4_dev *__mock_device(str
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4);
|
|
|
|
vc4->dev = dev;
|
|
- vc4->is_vc5 = is_vc5;
|
|
+ vc4->gen = gen;
|
|
|
|
vc4->hvs = __vc4_hvs_alloc(vc4, NULL);
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vc4->hvs);
|
|
@@ -203,10 +203,10 @@ static struct vc4_dev *__mock_device(str
|
|
|
|
struct vc4_dev *vc4_mock_device(struct kunit *test)
|
|
{
|
|
- return __mock_device(test, false);
|
|
+ return __mock_device(test, VC4_GEN_4);
|
|
}
|
|
|
|
struct vc4_dev *vc5_mock_device(struct kunit *test)
|
|
{
|
|
- return __mock_device(test, true);
|
|
+ return __mock_device(test, VC4_GEN_5);
|
|
}
|
|
--- a/drivers/gpu/drm/vc4/vc4_bo.c
|
|
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
|
|
@@ -251,7 +251,7 @@ void vc4_bo_add_to_purgeable_pool(struct
|
|
{
|
|
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return;
|
|
|
|
mutex_lock(&vc4->purgeable.lock);
|
|
@@ -265,7 +265,7 @@ static void vc4_bo_remove_from_purgeable
|
|
{
|
|
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return;
|
|
|
|
/* list_del_init() is used here because the caller might release
|
|
@@ -396,7 +396,7 @@ struct drm_gem_object *vc4_create_object
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
|
struct vc4_bo *bo;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
|
|
@@ -427,7 +427,7 @@ struct vc4_bo *vc4_bo_create(struct drm_
|
|
struct drm_gem_dma_object *dma_obj;
|
|
struct vc4_bo *bo;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
if (size == 0)
|
|
@@ -496,7 +496,7 @@ int vc4_bo_dumb_create(struct drm_file *
|
|
struct vc4_bo *bo = NULL;
|
|
int ret;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
ret = vc4_dumb_fixup_args(args);
|
|
@@ -622,7 +622,7 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo)
|
|
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
|
|
int ret;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
/* Fast path: if the BO is already retained by someone, no need to
|
|
@@ -661,7 +661,7 @@ void vc4_bo_dec_usecnt(struct vc4_bo *bo
|
|
{
|
|
struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev);
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return;
|
|
|
|
/* Fast path: if the BO is still retained by someone, no need to test
|
|
@@ -783,7 +783,7 @@ int vc4_create_bo_ioctl(struct drm_devic
|
|
struct vc4_bo *bo = NULL;
|
|
int ret;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
ret = vc4_grab_bin_bo(vc4, vc4file);
|
|
@@ -813,7 +813,7 @@ int vc4_mmap_bo_ioctl(struct drm_device
|
|
struct drm_vc4_mmap_bo *args = data;
|
|
struct drm_gem_object *gem_obj;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
gem_obj = drm_gem_object_lookup(file_priv, args->handle);
|
|
@@ -839,7 +839,7 @@ vc4_create_shader_bo_ioctl(struct drm_de
|
|
struct vc4_bo *bo = NULL;
|
|
int ret;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
if (args->size == 0)
|
|
@@ -918,7 +918,7 @@ int vc4_set_tiling_ioctl(struct drm_devi
|
|
struct vc4_bo *bo;
|
|
bool t_format;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
if (args->flags != 0)
|
|
@@ -964,7 +964,7 @@ int vc4_get_tiling_ioctl(struct drm_devi
|
|
struct drm_gem_object *gem_obj;
|
|
struct vc4_bo *bo;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
if (args->flags != 0 || args->modifier != 0)
|
|
@@ -1007,7 +1007,7 @@ int vc4_bo_cache_init(struct drm_device
|
|
int ret;
|
|
int i;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
/* Create the initial set of BO labels that the kernel will
|
|
@@ -1071,7 +1071,7 @@ int vc4_label_bo_ioctl(struct drm_device
|
|
struct drm_gem_object *gem_obj;
|
|
int ret = 0, label;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
if (!args->len)
|
|
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
|
|
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
|
|
@@ -264,7 +264,7 @@ static u32 vc4_get_fifo_full_level(struc
|
|
* Removing 1 from the FIFO full level however
|
|
* seems to completely remove that issue.
|
|
*/
|
|
- if (!vc4->is_vc5)
|
|
+ if (vc4->gen == VC4_GEN_4)
|
|
return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1;
|
|
|
|
return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX;
|
|
@@ -446,7 +446,7 @@ static void vc4_crtc_config_pv(struct dr
|
|
if (is_dsi)
|
|
CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep);
|
|
|
|
- if (vc4->is_vc5)
|
|
+ if (vc4->gen == VC4_GEN_5)
|
|
CRTC_WRITE(PV_MUX_CFG,
|
|
VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP,
|
|
PV_MUX_CFG_RGB_PIXEL_MUX_MODE));
|
|
@@ -937,7 +937,7 @@ static int vc4_async_set_fence_cb(struct
|
|
struct dma_fence *fence;
|
|
int ret;
|
|
|
|
- if (!vc4->is_vc5) {
|
|
+ if (vc4->gen == VC4_GEN_4) {
|
|
struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
|
|
|
|
return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno,
|
|
@@ -1024,7 +1024,7 @@ static int vc4_async_page_flip(struct dr
|
|
struct vc4_bo *bo = to_vc4_bo(&dma_bo->base);
|
|
int ret;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
/*
|
|
@@ -1067,7 +1067,7 @@ int vc4_page_flip(struct drm_crtc *crtc,
|
|
struct drm_device *dev = crtc->dev;
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
|
|
|
- if (vc4->is_vc5)
|
|
+ if (vc4->gen == VC4_GEN_5)
|
|
return vc5_async_page_flip(crtc, fb, event, flags);
|
|
else
|
|
return vc4_async_page_flip(crtc, fb, event, flags);
|
|
@@ -1356,13 +1356,13 @@ int __vc4_crtc_init(struct drm_device *d
|
|
|
|
drm_crtc_helper_add(crtc, crtc_helper_funcs);
|
|
|
|
- if (!vc4->is_vc5) {
|
|
+ if (vc4->gen == VC4_GEN_4) {
|
|
drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r));
|
|
drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size);
|
|
}
|
|
|
|
|
|
- if (!vc4->is_vc5) {
|
|
+ if (vc4->gen == VC4_GEN_4) {
|
|
/* We support CTM, but only for one CRTC at a time. It's therefore
|
|
* implemented as private driver state in vc4_kms, not here.
|
|
*/
|
|
--- a/drivers/gpu/drm/vc4/vc4_drv.c
|
|
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
|
|
@@ -98,7 +98,7 @@ static int vc4_get_param_ioctl(struct dr
|
|
if (args->pad != 0)
|
|
return -EINVAL;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
if (!vc4->v3d)
|
|
@@ -147,7 +147,7 @@ static int vc4_open(struct drm_device *d
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
|
struct vc4_file *vc4file;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
vc4file = kzalloc(sizeof(*vc4file), GFP_KERNEL);
|
|
@@ -165,7 +165,7 @@ static void vc4_close(struct drm_device
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
|
struct vc4_file *vc4file = file->driver_priv;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return;
|
|
|
|
if (vc4file->bin_bo_used)
|
|
@@ -304,13 +304,17 @@ static int vc4_drm_bind(struct device *d
|
|
struct vc4_dev *vc4;
|
|
struct device_node *node;
|
|
struct drm_crtc *crtc;
|
|
- bool is_vc5;
|
|
+ enum vc4_gen gen;
|
|
int ret = 0;
|
|
|
|
dev->coherent_dma_mask = DMA_BIT_MASK(32);
|
|
|
|
- is_vc5 = of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5");
|
|
- if (is_vc5)
|
|
+ if (of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5"))
|
|
+ gen = VC4_GEN_5;
|
|
+ else
|
|
+ gen = VC4_GEN_4;
|
|
+
|
|
+ if (gen == VC4_GEN_5)
|
|
driver = &vc5_drm_driver;
|
|
else
|
|
driver = &vc4_drm_driver;
|
|
@@ -328,13 +332,13 @@ static int vc4_drm_bind(struct device *d
|
|
vc4 = devm_drm_dev_alloc(dev, driver, struct vc4_dev, base);
|
|
if (IS_ERR(vc4))
|
|
return PTR_ERR(vc4);
|
|
- vc4->is_vc5 = is_vc5;
|
|
+ vc4->gen = gen;
|
|
vc4->dev = dev;
|
|
|
|
drm = &vc4->base;
|
|
platform_set_drvdata(pdev, drm);
|
|
|
|
- if (!is_vc5) {
|
|
+ if (gen == VC4_GEN_4) {
|
|
ret = drmm_mutex_init(drm, &vc4->bin_bo_lock);
|
|
if (ret)
|
|
return ret;
|
|
@@ -348,7 +352,7 @@ static int vc4_drm_bind(struct device *d
|
|
if (ret)
|
|
return ret;
|
|
|
|
- if (!is_vc5) {
|
|
+ if (gen == VC4_GEN_4) {
|
|
ret = vc4_gem_init(drm);
|
|
if (ret)
|
|
return ret;
|
|
--- a/drivers/gpu/drm/vc4/vc4_drv.h
|
|
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
|
|
@@ -80,11 +80,16 @@ struct vc4_perfmon {
|
|
u64 counters[];
|
|
};
|
|
|
|
+enum vc4_gen {
|
|
+ VC4_GEN_4,
|
|
+ VC4_GEN_5,
|
|
+};
|
|
+
|
|
struct vc4_dev {
|
|
struct drm_device base;
|
|
struct device *dev;
|
|
|
|
- bool is_vc5;
|
|
+ enum vc4_gen gen;
|
|
|
|
unsigned int irq;
|
|
|
|
--- a/drivers/gpu/drm/vc4/vc4_gem.c
|
|
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
|
|
@@ -76,7 +76,7 @@ vc4_get_hang_state_ioctl(struct drm_devi
|
|
u32 i;
|
|
int ret = 0;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
if (!vc4->v3d) {
|
|
@@ -389,7 +389,7 @@ vc4_wait_for_seqno(struct drm_device *de
|
|
unsigned long timeout_expire;
|
|
DEFINE_WAIT(wait);
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
if (vc4->finished_seqno >= seqno)
|
|
@@ -474,7 +474,7 @@ vc4_submit_next_bin_job(struct drm_devic
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
|
struct vc4_exec_info *exec;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return;
|
|
|
|
again:
|
|
@@ -522,7 +522,7 @@ vc4_submit_next_render_job(struct drm_de
|
|
if (!exec)
|
|
return;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return;
|
|
|
|
/* A previous RCL may have written to one of our textures, and
|
|
@@ -543,7 +543,7 @@ vc4_move_job_to_render(struct drm_device
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
|
bool was_empty = list_empty(&vc4->render_job_list);
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return;
|
|
|
|
list_move_tail(&exec->head, &vc4->render_job_list);
|
|
@@ -970,7 +970,7 @@ vc4_job_handle_completed(struct vc4_dev
|
|
unsigned long irqflags;
|
|
struct vc4_seqno_cb *cb, *cb_temp;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return;
|
|
|
|
spin_lock_irqsave(&vc4->job_lock, irqflags);
|
|
@@ -1009,7 +1009,7 @@ int vc4_queue_seqno_cb(struct drm_device
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
|
unsigned long irqflags;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
cb->func = func;
|
|
@@ -1065,7 +1065,7 @@ vc4_wait_seqno_ioctl(struct drm_device *
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
|
struct drm_vc4_wait_seqno *args = data;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno,
|
|
@@ -1082,7 +1082,7 @@ vc4_wait_bo_ioctl(struct drm_device *dev
|
|
struct drm_gem_object *gem_obj;
|
|
struct vc4_bo *bo;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
if (args->pad != 0)
|
|
@@ -1131,7 +1131,7 @@ vc4_submit_cl_ioctl(struct drm_device *d
|
|
args->shader_rec_size,
|
|
args->bo_handle_count);
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
if (!vc4->v3d) {
|
|
@@ -1268,7 +1268,7 @@ int vc4_gem_init(struct drm_device *dev)
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
|
int ret;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
vc4->dma_fence_context = dma_fence_context_alloc(1);
|
|
@@ -1327,7 +1327,7 @@ int vc4_gem_madvise_ioctl(struct drm_dev
|
|
struct vc4_bo *bo;
|
|
int ret;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
switch (args->madv) {
|
|
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
|
|
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
|
|
@@ -2586,7 +2586,7 @@ static int vc4_hdmi_audio_prepare(struct
|
|
VC4_HDMI_AUDIO_PACKET_CEA_MASK);
|
|
|
|
/* Set the MAI threshold */
|
|
- if (vc4->is_vc5)
|
|
+ if (vc4->gen >= VC4_GEN_5)
|
|
HDMI_WRITE(HDMI_MAI_THR,
|
|
VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICHIGH) |
|
|
VC4_SET_FIELD(0x10, VC4_HD_MAI_THR_PANICLOW) |
|
|
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
|
|
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
|
|
@@ -416,7 +416,7 @@ static void vc4_hvs_irq_enable_eof(const
|
|
unsigned int channel)
|
|
{
|
|
struct vc4_dev *vc4 = hvs->vc4;
|
|
- u32 irq_mask = vc4->is_vc5 ?
|
|
+ u32 irq_mask = vc4->gen == VC4_GEN_5 ?
|
|
SCALER5_DISPCTRL_DSPEIEOF(channel) :
|
|
SCALER_DISPCTRL_DSPEIEOF(channel);
|
|
|
|
@@ -428,7 +428,7 @@ static void vc4_hvs_irq_clear_eof(const
|
|
unsigned int channel)
|
|
{
|
|
struct vc4_dev *vc4 = hvs->vc4;
|
|
- u32 irq_mask = vc4->is_vc5 ?
|
|
+ u32 irq_mask = vc4->gen == VC4_GEN_5 ?
|
|
SCALER5_DISPCTRL_DSPEIEOF(channel) :
|
|
SCALER_DISPCTRL_DSPEIEOF(channel);
|
|
|
|
@@ -620,7 +620,7 @@ int vc4_hvs_get_fifo_from_output(struct
|
|
u32 reg;
|
|
int ret;
|
|
|
|
- if (!vc4->is_vc5)
|
|
+ if (vc4->gen == VC4_GEN_4)
|
|
return output;
|
|
|
|
/*
|
|
@@ -701,7 +701,7 @@ static int vc4_hvs_init_channel(struct v
|
|
dispctrl = SCALER_DISPCTRLX_ENABLE;
|
|
dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(chan));
|
|
|
|
- if (!vc4->is_vc5) {
|
|
+ if (vc4->gen == VC4_GEN_4) {
|
|
dispctrl |= VC4_SET_FIELD(mode->hdisplay,
|
|
SCALER_DISPCTRLX_WIDTH) |
|
|
VC4_SET_FIELD(mode->vdisplay,
|
|
@@ -732,7 +732,7 @@ static int vc4_hvs_init_channel(struct v
|
|
/* Reload the LUT, since the SRAMs would have been disabled if
|
|
* all CRTCs had SCALER_DISPBKGND_GAMMA unset at once.
|
|
*/
|
|
- if (!vc4->is_vc5)
|
|
+ if (vc4->gen == VC4_GEN_4)
|
|
vc4_hvs_lut_load(hvs, vc4_crtc);
|
|
else
|
|
vc5_hvs_lut_load(hvs, vc4_crtc);
|
|
@@ -782,7 +782,7 @@ static int vc4_hvs_gamma_check(struct dr
|
|
struct drm_device *dev = crtc->dev;
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
|
|
|
- if (!vc4->is_vc5)
|
|
+ if (vc4->gen == VC4_GEN_4)
|
|
return 0;
|
|
|
|
if (!crtc_state->color_mgmt_changed)
|
|
@@ -1036,7 +1036,7 @@ void vc4_hvs_atomic_flush(struct drm_crt
|
|
u32 dispbkgndx = HVS_READ(SCALER_DISPBKGNDX(channel));
|
|
|
|
if (crtc->state->gamma_lut) {
|
|
- if (!vc4->is_vc5) {
|
|
+ if (vc4->gen == VC4_GEN_4) {
|
|
vc4_hvs_update_gamma_lut(hvs, vc4_crtc);
|
|
dispbkgndx |= SCALER_DISPBKGND_GAMMA;
|
|
} else {
|
|
@@ -1053,7 +1053,7 @@ void vc4_hvs_atomic_flush(struct drm_crt
|
|
* should already be disabling/enabling the pipeline
|
|
* when gamma changes.
|
|
*/
|
|
- if (!vc4->is_vc5)
|
|
+ if (vc4->gen == VC4_GEN_4)
|
|
dispbkgndx &= ~SCALER_DISPBKGND_GAMMA;
|
|
}
|
|
HVS_WRITE(SCALER_DISPBKGNDX(channel), dispbkgndx);
|
|
@@ -1069,7 +1069,8 @@ void vc4_hvs_atomic_flush(struct drm_crt
|
|
|
|
void vc4_hvs_mask_underrun(struct vc4_hvs *hvs, int channel)
|
|
{
|
|
- struct drm_device *drm = &hvs->vc4->base;
|
|
+ struct vc4_dev *vc4 = hvs->vc4;
|
|
+ struct drm_device *drm = &vc4->base;
|
|
u32 dispctrl;
|
|
int idx;
|
|
|
|
@@ -1077,8 +1078,9 @@ void vc4_hvs_mask_underrun(struct vc4_hv
|
|
return;
|
|
|
|
dispctrl = HVS_READ(SCALER_DISPCTRL);
|
|
- dispctrl &= ~(hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
|
|
- SCALER_DISPCTRL_DSPEISLUR(channel));
|
|
+ dispctrl &= ~((vc4->gen == VC4_GEN_5) ?
|
|
+ SCALER5_DISPCTRL_DSPEISLUR(channel) :
|
|
+ SCALER_DISPCTRL_DSPEISLUR(channel));
|
|
|
|
HVS_WRITE(SCALER_DISPCTRL, dispctrl);
|
|
|
|
@@ -1087,7 +1089,8 @@ void vc4_hvs_mask_underrun(struct vc4_hv
|
|
|
|
void vc4_hvs_unmask_underrun(struct vc4_hvs *hvs, int channel)
|
|
{
|
|
- struct drm_device *drm = &hvs->vc4->base;
|
|
+ struct vc4_dev *vc4 = hvs->vc4;
|
|
+ struct drm_device *drm = &vc4->base;
|
|
u32 dispctrl;
|
|
int idx;
|
|
|
|
@@ -1095,8 +1098,9 @@ void vc4_hvs_unmask_underrun(struct vc4_
|
|
return;
|
|
|
|
dispctrl = HVS_READ(SCALER_DISPCTRL);
|
|
- dispctrl |= (hvs->vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
|
|
- SCALER_DISPCTRL_DSPEISLUR(channel));
|
|
+ dispctrl |= ((vc4->gen == VC4_GEN_5) ?
|
|
+ SCALER5_DISPCTRL_DSPEISLUR(channel) :
|
|
+ SCALER_DISPCTRL_DSPEISLUR(channel));
|
|
|
|
HVS_WRITE(SCALER_DISPSTAT,
|
|
SCALER_DISPSTAT_EUFLOW(channel));
|
|
@@ -1139,8 +1143,10 @@ static irqreturn_t vc4_hvs_irq_handler(i
|
|
control = HVS_READ(SCALER_DISPCTRL);
|
|
|
|
for (channel = 0; channel < SCALER_CHANNELS_COUNT; channel++) {
|
|
- dspeislur = vc4->is_vc5 ? SCALER5_DISPCTRL_DSPEISLUR(channel) :
|
|
- SCALER_DISPCTRL_DSPEISLUR(channel);
|
|
+ dspeislur = (vc4->gen == VC4_GEN_5) ?
|
|
+ SCALER5_DISPCTRL_DSPEISLUR(channel) :
|
|
+ SCALER_DISPCTRL_DSPEISLUR(channel);
|
|
+
|
|
/* Interrupt masking is not always honored, so check it here. */
|
|
if (status & SCALER_DISPSTAT_EUFLOW(channel) &&
|
|
control & dspeislur) {
|
|
@@ -1176,7 +1182,7 @@ int vc4_hvs_debugfs_init(struct drm_mino
|
|
if (!vc4->hvs)
|
|
return -ENODEV;
|
|
|
|
- if (!vc4->is_vc5) {
|
|
+ if (vc4->gen == VC4_GEN_4) {
|
|
debugfs_create_bool("hvs_load_tracker", S_IRUGO | S_IWUSR,
|
|
minor->debugfs_root,
|
|
&vc4->load_tracker_enabled);
|
|
@@ -1225,7 +1231,7 @@ struct vc4_hvs *__vc4_hvs_alloc(struct v
|
|
* between planes when they don't overlap on the screen, but
|
|
* for now we just allocate globally.
|
|
*/
|
|
- if (!vc4->is_vc5)
|
|
+ if (vc4->gen == VC4_GEN_4)
|
|
/* 48k words of 2x12-bit pixels */
|
|
drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024);
|
|
else
|
|
@@ -1259,7 +1265,7 @@ static int vc4_hvs_bind(struct device *d
|
|
hvs->regset.regs = hvs_regs;
|
|
hvs->regset.nregs = ARRAY_SIZE(hvs_regs);
|
|
|
|
- if (vc4->is_vc5) {
|
|
+ if (vc4->gen == VC4_GEN_5) {
|
|
struct rpi_firmware *firmware;
|
|
struct device_node *node;
|
|
unsigned int max_rate;
|
|
@@ -1297,7 +1303,7 @@ static int vc4_hvs_bind(struct device *d
|
|
}
|
|
}
|
|
|
|
- if (!vc4->is_vc5)
|
|
+ if (vc4->gen == VC4_GEN_4)
|
|
hvs->dlist = hvs->regs + SCALER_DLIST_START;
|
|
else
|
|
hvs->dlist = hvs->regs + SCALER5_DLIST_START;
|
|
@@ -1338,7 +1344,7 @@ static int vc4_hvs_bind(struct device *d
|
|
SCALER_DISPCTRL_DISPEIRQ(1) |
|
|
SCALER_DISPCTRL_DISPEIRQ(2);
|
|
|
|
- if (!vc4->is_vc5)
|
|
+ if (vc4->gen == VC4_GEN_4)
|
|
dispctrl &= ~(SCALER_DISPCTRL_DMAEIRQ |
|
|
SCALER_DISPCTRL_SLVWREIRQ |
|
|
SCALER_DISPCTRL_SLVRDEIRQ |
|
|
@@ -1393,7 +1399,7 @@ static int vc4_hvs_bind(struct device *d
|
|
|
|
/* Recompute Composite Output Buffer (COB) allocations for the displays
|
|
*/
|
|
- if (!vc4->is_vc5) {
|
|
+ if (vc4->gen == VC4_GEN_4) {
|
|
/* The COB is 20736 pixels, or just over 10 lines at 2048 wide.
|
|
* The bottom 2048 pixels are full 32bpp RGBA (intended for the
|
|
* TXP composing RGBA to memory), whilst the remainder are only
|
|
--- a/drivers/gpu/drm/vc4/vc4_irq.c
|
|
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
|
|
@@ -263,7 +263,7 @@ vc4_irq_enable(struct drm_device *dev)
|
|
{
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return;
|
|
|
|
if (!vc4->v3d)
|
|
@@ -280,7 +280,7 @@ vc4_irq_disable(struct drm_device *dev)
|
|
{
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return;
|
|
|
|
if (!vc4->v3d)
|
|
@@ -303,7 +303,7 @@ int vc4_irq_install(struct drm_device *d
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
|
int ret;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
if (irq == IRQ_NOTCONNECTED)
|
|
@@ -324,7 +324,7 @@ void vc4_irq_uninstall(struct drm_device
|
|
{
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return;
|
|
|
|
vc4_irq_disable(dev);
|
|
@@ -337,7 +337,7 @@ void vc4_irq_reset(struct drm_device *de
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
|
unsigned long irqflags;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return;
|
|
|
|
/* Acknowledge any stale IRQs. */
|
|
--- a/drivers/gpu/drm/vc4/vc4_kms.c
|
|
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
|
|
@@ -372,7 +372,7 @@ static void vc4_atomic_commit_tail(struc
|
|
old_hvs_state->fifo_state[channel].pending_commit = NULL;
|
|
}
|
|
|
|
- if (vc4->is_vc5 && !vc4->firmware_kms) {
|
|
+ if (vc4->gen == VC4_GEN_5 && !vc4->firmware_kms) {
|
|
unsigned long state_rate = max(old_hvs_state->core_clock_rate,
|
|
new_hvs_state->core_clock_rate);
|
|
unsigned long core_rate = clamp_t(unsigned long, state_rate,
|
|
@@ -392,7 +392,7 @@ static void vc4_atomic_commit_tail(struc
|
|
vc4_ctm_commit(vc4, state);
|
|
|
|
if (!vc4->firmware_kms) {
|
|
- if (vc4->is_vc5)
|
|
+ if (vc4->gen == VC4_GEN_5)
|
|
vc5_hvs_pv_muxing_commit(vc4, state);
|
|
else
|
|
vc4_hvs_pv_muxing_commit(vc4, state);
|
|
@@ -411,7 +411,7 @@ static void vc4_atomic_commit_tail(struc
|
|
|
|
drm_atomic_helper_cleanup_planes(dev, state);
|
|
|
|
- if (vc4->is_vc5 && !vc4->firmware_kms) {
|
|
+ if (vc4->gen == VC4_GEN_5 && !vc4->firmware_kms) {
|
|
unsigned long core_rate = min_t(unsigned long,
|
|
hvs->max_core_rate,
|
|
new_hvs_state->core_clock_rate);
|
|
@@ -476,7 +476,7 @@ static struct drm_framebuffer *vc4_fb_cr
|
|
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
|
struct drm_mode_fb_cmd2 mode_cmd_local;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return ERR_PTR(-ENODEV);
|
|
|
|
/* If the user didn't specify a modifier, use the
|
|
@@ -1059,7 +1059,7 @@ int vc4_kms_load(struct drm_device *dev)
|
|
* the BCM2711, but the load tracker computations are used for
|
|
* the core clock rate calculation.
|
|
*/
|
|
- if (!vc4->is_vc5) {
|
|
+ if (vc4->gen == VC4_GEN_4) {
|
|
/* Start with the load tracker enabled. Can be
|
|
* disabled through the debugfs load_tracker file.
|
|
*/
|
|
@@ -1075,7 +1075,7 @@ int vc4_kms_load(struct drm_device *dev)
|
|
return ret;
|
|
}
|
|
|
|
- if (vc4->is_vc5) {
|
|
+ if (vc4->gen == VC4_GEN_5) {
|
|
dev->mode_config.max_width = 7680;
|
|
dev->mode_config.max_height = 7680;
|
|
} else {
|
|
@@ -1083,7 +1083,7 @@ int vc4_kms_load(struct drm_device *dev)
|
|
dev->mode_config.max_height = 2048;
|
|
}
|
|
|
|
- dev->mode_config.funcs = vc4->is_vc5 ? &vc5_mode_funcs : &vc4_mode_funcs;
|
|
+ dev->mode_config.funcs = (vc4->gen > VC4_GEN_4) ? &vc5_mode_funcs : &vc4_mode_funcs;
|
|
dev->mode_config.helper_private = &vc4_mode_config_helpers;
|
|
dev->mode_config.preferred_depth = 24;
|
|
dev->mode_config.async_page_flip = true;
|
|
--- a/drivers/gpu/drm/vc4/vc4_perfmon.c
|
|
+++ b/drivers/gpu/drm/vc4/vc4_perfmon.c
|
|
@@ -23,7 +23,7 @@ void vc4_perfmon_get(struct vc4_perfmon
|
|
return;
|
|
|
|
vc4 = perfmon->dev;
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return;
|
|
|
|
refcount_inc(&perfmon->refcnt);
|
|
@@ -37,7 +37,7 @@ void vc4_perfmon_put(struct vc4_perfmon
|
|
return;
|
|
|
|
vc4 = perfmon->dev;
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return;
|
|
|
|
if (refcount_dec_and_test(&perfmon->refcnt))
|
|
@@ -49,7 +49,7 @@ void vc4_perfmon_start(struct vc4_dev *v
|
|
unsigned int i;
|
|
u32 mask;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return;
|
|
|
|
if (WARN_ON_ONCE(!perfmon || vc4->active_perfmon))
|
|
@@ -69,7 +69,7 @@ void vc4_perfmon_stop(struct vc4_dev *vc
|
|
{
|
|
unsigned int i;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return;
|
|
|
|
if (WARN_ON_ONCE(!vc4->active_perfmon ||
|
|
@@ -90,7 +90,7 @@ struct vc4_perfmon *vc4_perfmon_find(str
|
|
struct vc4_dev *vc4 = vc4file->dev;
|
|
struct vc4_perfmon *perfmon;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return NULL;
|
|
|
|
mutex_lock(&vc4file->perfmon.lock);
|
|
@@ -105,7 +105,7 @@ void vc4_perfmon_open_file(struct vc4_fi
|
|
{
|
|
struct vc4_dev *vc4 = vc4file->dev;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return;
|
|
|
|
mutex_init(&vc4file->perfmon.lock);
|
|
@@ -131,7 +131,7 @@ void vc4_perfmon_close_file(struct vc4_f
|
|
{
|
|
struct vc4_dev *vc4 = vc4file->dev;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return;
|
|
|
|
mutex_lock(&vc4file->perfmon.lock);
|
|
@@ -151,7 +151,7 @@ int vc4_perfmon_create_ioctl(struct drm_
|
|
unsigned int i;
|
|
int ret;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
if (!vc4->v3d) {
|
|
@@ -205,7 +205,7 @@ int vc4_perfmon_destroy_ioctl(struct drm
|
|
struct drm_vc4_perfmon_destroy *req = data;
|
|
struct vc4_perfmon *perfmon;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
if (!vc4->v3d) {
|
|
@@ -233,7 +233,7 @@ int vc4_perfmon_get_values_ioctl(struct
|
|
struct vc4_perfmon *perfmon;
|
|
int ret;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
if (!vc4->v3d) {
|
|
--- a/drivers/gpu/drm/vc4/vc4_plane.c
|
|
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
|
|
@@ -633,10 +633,10 @@ static u32 vc4_lbm_size(struct drm_plane
|
|
}
|
|
|
|
/* Align it to 64 or 128 (hvs5) bytes */
|
|
- lbm = roundup(lbm, vc4->is_vc5 ? 128 : 64);
|
|
+ lbm = roundup(lbm, vc4->gen == VC4_GEN_5 ? 128 : 64);
|
|
|
|
/* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */
|
|
- lbm /= vc4->is_vc5 ? 4 : 2;
|
|
+ lbm /= vc4->gen == VC4_GEN_5 ? 4 : 2;
|
|
|
|
return lbm;
|
|
}
|
|
@@ -760,7 +760,7 @@ static int vc4_plane_allocate_lbm(struct
|
|
ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm,
|
|
&vc4_state->lbm,
|
|
lbm_size,
|
|
- vc4->is_vc5 ? 64 : 32,
|
|
+ vc4->gen == VC4_GEN_5 ? 64 : 32,
|
|
0, 0);
|
|
spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags);
|
|
|
|
@@ -1141,7 +1141,7 @@ static int vc4_plane_mode_set(struct drm
|
|
mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE &&
|
|
fb->format->has_alpha;
|
|
|
|
- if (!vc4->is_vc5) {
|
|
+ if (vc4->gen == VC4_GEN_4) {
|
|
/* Control word */
|
|
vc4_dlist_write(vc4_state,
|
|
SCALER_CTL0_VALID |
|
|
@@ -1713,7 +1713,7 @@ struct drm_plane *vc4_plane_init(struct
|
|
};
|
|
|
|
for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) {
|
|
- if (!hvs_formats[i].hvs5_only || vc4->is_vc5) {
|
|
+ if (!hvs_formats[i].hvs5_only || vc4->gen == VC4_GEN_5) {
|
|
formats[num_formats] = hvs_formats[i].drm;
|
|
num_formats++;
|
|
}
|
|
@@ -1728,7 +1728,7 @@ struct drm_plane *vc4_plane_init(struct
|
|
return ERR_CAST(vc4_plane);
|
|
plane = &vc4_plane->base;
|
|
|
|
- if (vc4->is_vc5)
|
|
+ if (vc4->gen == VC4_GEN_5)
|
|
drm_plane_helper_add(plane, &vc5_plane_helper_funcs);
|
|
else
|
|
drm_plane_helper_add(plane, &vc4_plane_helper_funcs);
|
|
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
|
|
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
|
|
@@ -599,7 +599,7 @@ int vc4_get_rcl(struct drm_device *dev,
|
|
bool has_bin = args->bin_cl_size != 0;
|
|
int ret;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
if (args->min_x_tile > args->max_x_tile ||
|
|
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
|
|
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
|
|
@@ -127,7 +127,7 @@ static int vc4_v3d_debugfs_ident(struct
|
|
int
|
|
vc4_v3d_pm_get(struct vc4_dev *vc4)
|
|
{
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
mutex_lock(&vc4->power_lock);
|
|
@@ -148,7 +148,7 @@ vc4_v3d_pm_get(struct vc4_dev *vc4)
|
|
void
|
|
vc4_v3d_pm_put(struct vc4_dev *vc4)
|
|
{
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return;
|
|
|
|
mutex_lock(&vc4->power_lock);
|
|
@@ -178,7 +178,7 @@ int vc4_v3d_get_bin_slot(struct vc4_dev
|
|
uint64_t seqno = 0;
|
|
struct vc4_exec_info *exec;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
try_again:
|
|
@@ -325,7 +325,7 @@ int vc4_v3d_bin_bo_get(struct vc4_dev *v
|
|
{
|
|
int ret = 0;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
mutex_lock(&vc4->bin_bo_lock);
|
|
@@ -360,7 +360,7 @@ static void bin_bo_release(struct kref *
|
|
|
|
void vc4_v3d_bin_bo_put(struct vc4_dev *vc4)
|
|
{
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return;
|
|
|
|
mutex_lock(&vc4->bin_bo_lock);
|
|
--- a/drivers/gpu/drm/vc4/vc4_validate.c
|
|
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
|
|
@@ -109,7 +109,7 @@ vc4_use_bo(struct vc4_exec_info *exec, u
|
|
struct drm_gem_dma_object *obj;
|
|
struct vc4_bo *bo;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return NULL;
|
|
|
|
if (hindex >= exec->bo_count) {
|
|
@@ -169,7 +169,7 @@ vc4_check_tex_size(struct vc4_exec_info
|
|
uint32_t utile_w = utile_width(cpp);
|
|
uint32_t utile_h = utile_height(cpp);
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return false;
|
|
|
|
/* The shaded vertex format stores signed 12.4 fixed point
|
|
@@ -495,7 +495,7 @@ vc4_validate_bin_cl(struct drm_device *d
|
|
uint32_t dst_offset = 0;
|
|
uint32_t src_offset = 0;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
while (src_offset < len) {
|
|
@@ -942,7 +942,7 @@ vc4_validate_shader_recs(struct drm_devi
|
|
uint32_t i;
|
|
int ret = 0;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return -ENODEV;
|
|
|
|
for (i = 0; i < exec->shader_state_count; i++) {
|
|
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
|
|
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
|
|
@@ -786,7 +786,7 @@ vc4_validate_shader(struct drm_gem_dma_o
|
|
struct vc4_validated_shader_info *validated_shader = NULL;
|
|
struct vc4_shader_validation_state validation_state;
|
|
|
|
- if (WARN_ON_ONCE(vc4->is_vc5))
|
|
+ if (WARN_ON_ONCE(vc4->gen == VC4_GEN_5))
|
|
return NULL;
|
|
|
|
memset(&validation_state, 0, sizeof(validation_state));
|