Re: [PATCH v7 07/10] tmp, tmp_tis: Implement usage counter for locality

From: Jarkko Sakkinen
Date: Thu Jun 30 2022 - 19:31:51 EST


On Fri, Jul 01, 2022 at 02:29:47AM +0300, Jarkko Sakkinen wrote:
> On Thu, Jun 30, 2022 at 01:26:50AM +0200, Lino Sanfilippo wrote:
> > From: Lino Sanfilippo <l.sanfilippo@xxxxxxxxxx>
> >
> > Implement a usage counter for the (default) locality used by the TPM TIS
> > driver:
> > Request the locality from the TPM if it has not been claimed yet, otherwise
> > only increment the counter. Also release the locality if the counter is 0
> > otherwise only decrement the counter. Ensure thread-safety by protecting
> > the counter with a mutex.
> >
> > This allows to request and release the locality from a thread and the
> > interrupt handler at the same time without the danger to interfere with
> > each other.
> >
> > By doing this refactor the names of the amended functions to use the proper
> > prefix.
> >
> > Signed-off-by: Lino Sanfilippo <l.sanfilippo@xxxxxxxxxx>
> > Tested-by: Michael Niew??hner <linux@xxxxxxxxxxxxxx>
> > ---
> > drivers/char/tpm/tpm_tis_core.c | 75 ++++++++++++++++++++++-----------
> > drivers/char/tpm/tpm_tis_core.h | 2 +
> > 2 files changed, 53 insertions(+), 24 deletions(-)
> >
> > diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
> > index bd4eeb0b2192..e50a2c78de9f 100644
> > --- a/drivers/char/tpm/tpm_tis_core.c
> > +++ b/drivers/char/tpm/tpm_tis_core.c
> > @@ -165,16 +165,27 @@ static bool check_locality(struct tpm_chip *chip, int l)
> > return false;
> > }
> >
> > -static int release_locality(struct tpm_chip *chip, int l)
> > +static int tpm_tis_release_locality_locked(struct tpm_tis_data *priv, int l)
> > +{
> > + tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
> > +
> > + return 0;
> > +}
> > +
> > +static int tpm_tis_release_locality(struct tpm_chip *chip, int l)
> > {
> > struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
> >
> > - tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY);
> > + mutex_lock(&priv->locality_count_mutex);
> > + priv->locality_count--;
> > + if (priv->locality_count == 0)
> > + tpm_tis_release_locality_locked(priv, l);
> > + mutex_unlock(&priv->locality_count_mutex);
> >
> > return 0;
> > }
> >
> > -static int request_locality(struct tpm_chip *chip, int l)
> > +static int tpm_tis_request_locality_locked(struct tpm_chip *chip, int l)
> > {
> > struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
> > unsigned long stop, timeout;
> > @@ -215,6 +226,20 @@ static int request_locality(struct tpm_chip *chip, int l)
> > return -1;
> > }
> >
> > +static int tpm_tis_request_locality(struct tpm_chip *chip, int l)
> > +{
> > + struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
> > + int ret = 0;
> > +
> > + mutex_lock(&priv->locality_count_mutex);
> > + if (priv->locality_count == 0)
> > + ret = tpm_tis_request_locality_locked(chip, l);
> > + if (!ret)
> > + priv->locality_count++;
> > + mutex_unlock(&priv->locality_count_mutex);
> > + return ret;
> > +}
> > +
> > static u8 tpm_tis_status(struct tpm_chip *chip)
> > {
> > struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
> > @@ -668,7 +693,7 @@ static int probe_itpm(struct tpm_chip *chip)
> > if (vendor != TPM_VID_INTEL)
> > return 0;
> >
> > - if (request_locality(chip, 0) != 0)
> > + if (tpm_tis_request_locality(chip, 0) != 0)
> > return -EBUSY;
> >
> > rc = tpm_tis_send_data(chip, cmd_getticks, len);
> > @@ -689,7 +714,7 @@ static int probe_itpm(struct tpm_chip *chip)
> >
> > out:
> > tpm_tis_ready(chip);
> > - release_locality(chip, priv->locality);
> > + tpm_tis_release_locality(chip, priv->locality);
> >
> > return rc;
> > }
> > @@ -751,7 +776,7 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
> > cap_t cap;
> > int ret;
> >
> > - ret = request_locality(chip, 0);
> > + ret = tpm_tis_request_locality(chip, 0);
> > if (ret < 0)
> > return ret;
> >
> > @@ -760,7 +785,7 @@ static int tpm_tis_gen_interrupt(struct tpm_chip *chip)
> > else
> > ret = tpm1_getcap(chip, TPM_CAP_PROP_TIS_TIMEOUT, &cap, desc, 0);
> >
> > - release_locality(chip, 0);
> > + tpm_tis_release_locality(chip, 0);
> >
> > return ret;
> > }
> > @@ -785,33 +810,33 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
> > }
> > priv->irq = irq;
> >
> > - rc = request_locality(chip, 0);
> > + rc = tpm_tis_request_locality(chip, 0);
> > if (rc < 0)
> > return rc;
> >
> > rc = tpm_tis_read8(priv, TPM_INT_VECTOR(priv->locality),
> > &original_int_vec);
> > if (rc < 0) {
> > - release_locality(chip, priv->locality);
> > + tpm_tis_release_locality(chip, priv->locality);
> > return rc;
> > }
> >
> > rc = tpm_tis_write8(priv, TPM_INT_VECTOR(priv->locality), irq);
> > if (rc < 0) {
> > - release_locality(chip, priv->locality);
> > + tpm_tis_release_locality(chip, priv->locality);
> > return rc;
> > }
> >
> > rc = tpm_tis_read32(priv, TPM_INT_STATUS(priv->locality), &int_status);
> > if (rc < 0) {
> > - release_locality(chip, priv->locality);
> > + tpm_tis_release_locality(chip, priv->locality);
> > return rc;
> > }
> >
> > /* Clear all existing */
> > rc = tpm_tis_write32(priv, TPM_INT_STATUS(priv->locality), int_status);
> > if (rc < 0) {
> > - release_locality(chip, priv->locality);
> > + tpm_tis_release_locality(chip, priv->locality);
> > return rc;
> > }
> >
> > @@ -819,11 +844,11 @@ static int tpm_tis_probe_irq_single(struct tpm_chip *chip, u32 intmask,
> > rc = tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality),
> > intmask | TPM_GLOBAL_INT_ENABLE);
> > if (rc < 0) {
> > - release_locality(chip, priv->locality);
> > + tpm_tis_release_locality(chip, priv->locality);
> > return rc;
> > }
> >
> > - release_locality(chip, priv->locality);
> > + tpm_tis_release_locality(chip, priv->locality);
> > clear_bit(TPM_TIS_IRQ_TESTED, &priv->flags);
> >
> > /* Generate an interrupt by having the core call through to
> > @@ -959,8 +984,8 @@ static const struct tpm_class_ops tpm_tis = {
> > .req_complete_mask = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
> > .req_complete_val = TPM_STS_DATA_AVAIL | TPM_STS_VALID,
> > .req_canceled = tpm_tis_req_canceled,
> > - .request_locality = request_locality,
> > - .relinquish_locality = release_locality,
> > + .request_locality = tpm_tis_request_locality,
> > + .relinquish_locality = tpm_tis_release_locality,
> > .clk_enable = tpm_tis_clkrun_enable,
> > };
> >
> > @@ -994,6 +1019,8 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
> > priv->timeout_min = TPM_TIMEOUT_USECS_MIN;
> > priv->timeout_max = TPM_TIMEOUT_USECS_MAX;
> > priv->phy_ops = phy_ops;
> > + priv->locality_count = 0;
> > + mutex_init(&priv->locality_count_mutex);
> >
> > dev_set_drvdata(&chip->dev, priv);
> >
> > @@ -1071,14 +1098,14 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
> >
> > intmask &= ~TPM_GLOBAL_INT_ENABLE;
> >
> > - rc = request_locality(chip, 0);
> > + rc = tpm_tis_request_locality(chip, 0);
> > if (rc < 0) {
> > rc = -ENODEV;
> > goto out_err;
> > }
> >
> > tpm_tis_write32(priv, TPM_INT_ENABLE(priv->locality), intmask);
> > - release_locality(chip, 0);
> > + tpm_tis_release_locality(chip, 0);
> >
> > rc = tpm_chip_start(chip);
> > if (rc)
> > @@ -1112,13 +1139,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
> > * proper timeouts for the driver.
> > */
> >
> > - rc = request_locality(chip, 0);
> > + rc = tpm_tis_request_locality(chip, 0);
> > if (rc < 0)
> > goto out_err;
> >
> > rc = tpm_get_timeouts(chip);
> >
> > - release_locality(chip, 0);
> > + tpm_tis_release_locality(chip, 0);
> >
> > if (rc) {
> > dev_err(dev, "Could not get TPM timeouts and durations\n");
> > @@ -1138,11 +1165,11 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
> > dev_err(&chip->dev, FW_BUG
> > "TPM interrupt not working, polling instead\n");
> >
> > - rc = request_locality(chip, 0);
> > + rc = tpm_tis_request_locality(chip, 0);
> > if (rc < 0)
> > goto out_err;
> > disable_interrupts(chip);
> > - release_locality(chip, 0);
> > + tpm_tis_release_locality(chip, 0);
> > }
> > }
> >
> > @@ -1209,13 +1236,13 @@ int tpm_tis_resume(struct device *dev)
> > * an error code but for unknown reason it isn't handled.
> > */
> > if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
> > - ret = request_locality(chip, 0);
> > + ret = tpm_tis_request_locality(chip, 0);
> > if (ret < 0)
> > return ret;
> >
> > tpm1_do_selftest(chip);
> >
> > - release_locality(chip, 0);
> > + tpm_tis_release_locality(chip, 0);
> > }
> >
> > return 0;
> > diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
> > index e005eb99480e..7c6c14707e31 100644
> > --- a/drivers/char/tpm/tpm_tis_core.h
> > +++ b/drivers/char/tpm/tpm_tis_core.h
> > @@ -91,6 +91,8 @@ enum tpm_tis_flags {
> >
> > struct tpm_tis_data {
> > u16 manufacturer_id;
> > + struct mutex locality_count_mutex;
> > + unsigned int locality_count;
> > int locality;
> > int irq;
> > unsigned int int_mask;
> > --
> > 2.25.1
> >
>
> I'm kind of thinking that should tpm_tis_data have a lock for its
> contents?
>
> I kind of doubt that we would ever need more than one lock for it,
> and it would give some more ensurance to not be race, especially
> when re-enabling interrupts this feels important to be "extra safe".
>
> I looked at this commit, and did not see anything that would prevent
> using a spin lock instead of mutex. With a spin lock priv can be
> accessed also in the interrupt context.
>
> So instead prepend this patch with a patch that adds:
>
> struct spin_lock lock;
>
> And something like:
>
> static inline struct tpm_tis_data *tpm_tis_priv_get(struct tpm_chip *chip)
> {
> struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
>
> spin_lock(&priv->lock);
> return priv;
> }
>
> static inline void tpm_tis_priv_put(struct tpm_tis_data *priv)
> {
> spin_unlock(&priv->lock);
> }
>
> And change the sites where priv is used to acquire the instance with this.

I.e.

struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);

becomes:

struct tpm_tis_data *priv = tpm_tis_priv_get(&chip);

In some simes most likely the acquirance must be done later, e.g.
because of locking order with chip's lock (perhaps).

BR, Jarkko