Re: [PATCH v2 09/11] drm/mediatek: Add secure flow support to mediatek-drm

From: Jason-JH Lin (林睿祥)
Date: Sun Nov 05 2023 - 08:05:17 EST


On Tue, 2023-10-31 at 06:01 +0000, CK Hu (胡俊光) wrote:
> Hi, Jason:
>
> On Mon, 2023-10-23 at 12:45 +0800, Jason-JH.Lin wrote:
> > To add secure flow support for mediatek-drm, each crtc have to
> > create a secure cmdq mailbox channel. Then cmdq packets with
> > display HW configuration will be sent to secure cmdq mailbox
> > channel
> > and configured in the secure world.
> >
> > Each crtc have to use secure cmdq interface to configure some
> > secure
> > settings for display HW before sending cmdq packets to secure cmdq
> > mailbox channel.
> >
> > If any of fb get from current drm_atomic_state is secure, then crtc
> > will switch to the secure flow to configure display HW.
> > If all fbs are not secure in current drm_atomic_state, then crtc
> > will
> > switch to the normal flow.
> >
> > Signed-off-by: Jason-JH.Lin <jason-jh.lin@xxxxxxxxxxxx>
> > ---
> > drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 272
> > ++++++++++++++++++++++-
> > drivers/gpu/drm/mediatek/mtk_drm_crtc.h | 1 +
> > drivers/gpu/drm/mediatek/mtk_drm_plane.c | 7 +
> > 3 files changed, 269 insertions(+), 11 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
> > b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
> > index b6fa4ad2f94d..6c2cf339b923 100644
> > --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
> > +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
> > @@ -56,6 +56,11 @@ struct mtk_drm_crtc {
> > u32 cmdq_event;
> > u32 cmdq_vblank_cnt;
> > wait_queue_head_t cb_blocking_queue;
> > +
> > + struct cmdq_client sec_cmdq_client;
> > + struct cmdq_pkt sec_cmdq_handle;
> > + bool sec_cmdq_working;
> > + wait_queue_head_t sec_cb_blocking_queue;
> > #endif
> >
> > struct device *mmsys_dev;
> > @@ -67,6 +72,7 @@ struct mtk_drm_crtc {
> > /* lock for display hardware access */
> > struct mutex hw_lock;
> > bool config_updating;
> > + bool sec_on;
> > };
> >
> > struct mtk_crtc_state {
> > @@ -109,6 +115,154 @@ static void mtk_drm_finish_page_flip(struct
> > mtk_drm_crtc *mtk_crtc)
> > }
> > }
> >
> > +void mtk_crtc_disable_secure_state(struct drm_crtc *crtc)
> > +{
> > +#if IS_REACHABLE(CONFIG_MTK_CMDQ)
> > + enum cmdq_sec_scenario sec_scn = CMDQ_MAX_SEC_COUNT;
> > + int i;
> > + struct mtk_ddp_comp *ddp_first_comp;
> > + struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
> > + u64 sec_engine = 0; /* for hw engine write output secure fb */
> > + u64 sec_port = 0; /* for larb port read input secure fb */
> > +
> > + mutex_lock(&mtk_crtc->hw_lock);
> > +
> > + if (!mtk_crtc->sec_cmdq_client.chan) {
> > + pr_err("crtc-%d secure mbox channel is NULL\n",
> > drm_crtc_index(crtc));
> > + goto err;
> > + }
> > +
> > + if (!mtk_crtc->sec_on) {
> > + pr_debug("crtc-%d is already disabled!\n",
> > drm_crtc_index(crtc));
> > + goto err;
> > + }
> > +
> > + mbox_flush(mtk_crtc->sec_cmdq_client.chan, 0);
> > + mtk_crtc->sec_cmdq_handle.cmd_buf_size = 0;
> > +
> > + if (mtk_crtc->sec_cmdq_handle.sec_data) {
> > + struct cmdq_sec_data *sec_data;
> > +
> > + sec_data = mtk_crtc->sec_cmdq_handle.sec_data;
> > + sec_data->addrMetadataCount = 0;
> > + sec_data->addrMetadatas = (uintptr_t)NULL;
> > + }
> > +
> > + /*
> > + * Secure path only support DL mode, so we just wait
> > + * the first path frame done here
> > + */
> > + cmdq_pkt_wfe(&mtk_crtc->sec_cmdq_handle, mtk_crtc->cmdq_event,
> > false);
> > +
> > + ddp_first_comp = mtk_crtc->ddp_comp[0];
> > + for (i = 0; i < mtk_crtc->layer_nr; i++) {
> > + struct drm_plane *plane = &mtk_crtc->planes[i];
> > +
> > + sec_port |=
> > mtk_ddp_comp_layer_get_sec_port(ddp_first_comp, i);
>
> sec_port is useless, so remove it.
>
> > +
> > + /* make sure secure layer off before switching secure
> > state */
> > + if (!mtk_plane_fb_is_secure(plane->state->fb)) {
> > + struct mtk_plane_state *plane_state =
> > to_mtk_plane_state(plane->state);
> > +
> > + plane_state->pending.enable = false;
> > + mtk_ddp_comp_layer_config(ddp_first_comp, i,
> > plane_state,
> > + &mtk_crtc-
> > > sec_cmdq_handle);
> >
> > + }
> > + }
> > +
> > + /* Disable secure path */
> > + if (drm_crtc_index(crtc) == 0)
> > + sec_scn = CMDQ_SEC_PRIMARY_DISP_DISABLE;
> > + else if (drm_crtc_index(crtc) == 1)
> > + sec_scn = CMDQ_SEC_SUB_DISP_DISABLE;
> > +
> > + cmdq_sec_pkt_set_data(&mtk_crtc->sec_cmdq_handle, sec_engine,
> > sec_engine, sec_scn);
> > +
> > + cmdq_pkt_finalize(&mtk_crtc->sec_cmdq_handle);
> > + dma_sync_single_for_device(mtk_crtc->sec_cmdq_client.chan-
> > > mbox->dev,
> >
> > + mtk_crtc->sec_cmdq_handle.pa_base,
> > + mtk_crtc-
> > > sec_cmdq_handle.cmd_buf_size,
> >
> > + DMA_TO_DEVICE);
> > +
> > + mtk_crtc->sec_cmdq_working = true;
> > + mbox_send_message(mtk_crtc->sec_cmdq_client.chan, &mtk_crtc-
> > > sec_cmdq_handle);
> >
> > + mbox_client_txdone(mtk_crtc->sec_cmdq_client.chan, 0);
> > +
> > + // Wait for sec state to be disabled by cmdq
> > + wait_event_timeout(mtk_crtc->sec_cb_blocking_queue,
> > + !mtk_crtc->sec_cmdq_working,
> > + msecs_to_jiffies(500));
> > +
> > + mtk_crtc->sec_on = false;
> > + pr_debug("crtc-%d disable secure plane!\n",
> > drm_crtc_index(crtc));
> > +
> > +err:
> > + mutex_unlock(&mtk_crtc->hw_lock);
> > +#endif
> > +}
> > +
> > +#if IS_REACHABLE(CONFIG_MTK_CMDQ)
> > +static void mtk_crtc_enable_secure_state(struct drm_crtc *crtc)
> > +{
> > + enum cmdq_sec_scenario sec_scn = CMDQ_MAX_SEC_COUNT;
> > + int i;
> > + struct mtk_ddp_comp *ddp_first_comp;
> > + struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
> > + u64 sec_engine = 0; /* for hw engine write output secure fb */
> > + u64 sec_port = 0; /* for larb port read input secure fb */
> > +
> > + cmdq_pkt_wfe(&mtk_crtc->sec_cmdq_handle, mtk_crtc->cmdq_event,
> > false);
> > +
> > + ddp_first_comp = mtk_crtc->ddp_comp[0];
> > + for (i = 0; i < mtk_crtc->layer_nr; i++)
> > + if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR)
> > + sec_port |=
> > mtk_ddp_comp_layer_get_sec_port(ddp_first_comp, i);
> > +
> > + if (drm_crtc_index(crtc) == 0)
> > + sec_scn = CMDQ_SEC_PRIMARY_DISP;
> > + else if (drm_crtc_index(crtc) == 1)
> > + sec_scn = CMDQ_SEC_SUB_DISP;
> > +
> > + cmdq_sec_pkt_set_data(&mtk_crtc->sec_cmdq_handle, sec_engine,
> > sec_port, sec_scn);
>
> In cmdq driver, sec_engine means engine which need dapc. You set 0 to
> sec_engine, does it mean that no engine is protected by dapc? If OVL
> is
> not protected by dapc, I think we could use cmdq normal thread to
> write
> OVL register instead of cmdq secure thread.
>

We enable DPAC protection for the engine that is able to write data to
the DRAM address set on their register, such as WDMA and WROT, to avoid
their register being set to the normal DRAM address.

We enable larb port protection for the engine that is able to read data
from the DRAM address, such as OVL, RDMA and MDP_RDMA, to avoid secure
DRAM being read by the non-secure larb port. So we don't need toenable
DAPC for these engines.

No mater DAPC protection or larb port protection, they both need to use
sec_engine to tell TEE which engines need to be protected.

But OVL is a special HW engine, it can only set its DISP_REG_OVL_SECURE
register in [PATCH v2 07/11] to enable its larb port protection, so OVL
no need to set the sec_engine. But we'll move that part to the TEE
secure world, so that means OVL sec_engine will be set here in the next
version.

> It's weird that normal world could decide which engine is protected
> by
> dapc. If hacker set 0 for output engine, then outout engine would not
> be protected?
>
If hacker set 0 for output engine, TEE world will check that output
engine didn't set sec_engine from normal world by verifying instruction
where the output engine instruction set the secure handle.

We still need to set sec_engine to check that all the sec_engine fags
are matched to the scenario and instruction verification in the secure
world.

Regards,
Jason-JH.Lin

> Regards,
> CK
>
> > +
> > + pr_debug("crtc-%d enable secure plane!\n",
> > drm_crtc_index(crtc));
> > +}
> > +#endif
> >