From 362ca36c866be050dce9b8a546e1ead60abd1409 Mon Sep 17 00:00:00 2001 From: Alex Jones Date: Sat, 6 Dec 2025 22:41:30 +0000 Subject: [PATCH 1/2] [ot] hw/opentitan: ot_otp_engine: Replace digest BH with a timer From testing, it seems that the use of Bottom Halves on hosts under higher processing loads (where QEMU is more often pre-empted) can produce inconsistent / slow results. Timers with a short timeout provide the same decoupling functionality as the BH but with more consistency and expedience. This could be especially problematic for SW which expects digest writes to be processed within a certain time. If handling of the BH was deferred long enough, then enough guest code could be processed for it to look like e.g. 10 ms had passed, exceeding common SW timeouts. This could be seen happening occasionally under high processing loads. Signed-off-by: Alex Jones --- hw/opentitan/ot_otp_engine.c | 15 ++++++++++----- include/hw/opentitan/ot_otp_engine.h | 2 +- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/hw/opentitan/ot_otp_engine.c b/hw/opentitan/ot_otp_engine.c index 9ffcccfb83568..10fee61a92468 100644 --- a/hw/opentitan/ot_otp_engine.c +++ b/hw/opentitan/ot_otp_engine.c @@ -57,8 +57,11 @@ #define OT_OTP_HW_CLOCK QEMU_CLOCK_VIRTUAL_RT /* the following delays are arbitrary for now */ -#define DAI_DIGEST_DELAY_NS 50000u /* 50us */ -#define LCI_PROG_SCHED_NS 1000u /* 1us*/ +#define DAI_DIGEST_DELAY_NS 50000 /* 50us */ +#define LCI_PROG_SCHED_NS 1000 /* 1us */ + +/* Use a timer with a small delay instead of a BH for reliability */ +#define DIGEST_DECOUPLE_DELAY_NS 100 /* 100 ns */ /* The size of keys used for OTP scrambling */ #define OTP_SCRAMBLING_KEY_WIDTH 128u @@ -1622,7 +1625,8 @@ static void ot_otp_engine_dai_complete(void *opaque) break; case OT_OTP_DAI_DIG_WAIT: g_assert(s->dai->partition >= 0); - qemu_bh_schedule(s->dai->digest_bh); + int64_t now = qemu_clock_get_ns(OT_VIRTUAL_CLOCK); + timer_mod(s->dai->digest_write, now + DIGEST_DECOUPLE_DELAY_NS); break; case OT_OTP_DAI_ERROR: break; @@ -2746,11 +2750,11 @@ static void ot_otp_engine_reset_enter(Object *obj, ResetType type) c->parent_phases.enter(obj, type); } - qemu_bh_cancel(s->dai->digest_bh); qemu_bh_cancel(s->lc_broadcast.bh); qemu_bh_cancel(s->pwr_otp_bh); timer_del(s->dai->delay); + timer_del(s->dai->digest_write); timer_del(s->lci->prog_delay); qemu_bh_cancel(s->keygen->entropy_bh); s->keygen->edn_sched = false; @@ -2900,7 +2904,8 @@ static void ot_otp_engine_init(Object *obj) s->dai->delay = timer_new_ns(OT_VIRTUAL_CLOCK, &ot_otp_engine_dai_complete, s); - s->dai->digest_bh = qemu_bh_new(&ot_otp_engine_dai_write_digest, s); + s->dai->digest_write = + timer_new_ns(OT_VIRTUAL_CLOCK, &ot_otp_engine_dai_write_digest, s); s->lci->prog_delay = timer_new_ns(OT_OTP_HW_CLOCK, &ot_otp_engine_lci_write_word, s); s->pwr_otp_bh = qemu_bh_new(&ot_otp_engine_pwr_otp_bh, s); diff --git a/include/hw/opentitan/ot_otp_engine.h b/include/hw/opentitan/ot_otp_engine.h index 07ee708ed7a8f..6bfde4c8d300a 100644 --- a/include/hw/opentitan/ot_otp_engine.h +++ b/include/hw/opentitan/ot_otp_engine.h @@ -165,7 +165,7 @@ typedef struct { typedef struct OtOTPDAIController { QEMUTimer *delay; /* simulate delayed access completion */ - QEMUBH *digest_bh; /* write computed digest to OTP cell */ + QEMUTimer *digest_write; /* write computed digest to OTP cell */ OtOTPDAIState state; int partition; /* current partition being worked on or -1 */ } OtOTPDAIController; From a4f79c65be06da5c27616cd9c06380c21194db6a Mon Sep 17 00:00:00 2001 From: Alex Jones Date: Sun, 7 Dec 2025 03:22:49 +0000 Subject: [PATCH 2/2] [ot] hw/opentian: ot_otp_engine: Replace entropy BH with a timer From testing, it seems that the use of Bottom Halves on hosts under higher processing loads (where QEMU is more often pre-empted) can produce inconsistent / slow results. Timers with a short timeout provide the same decoupling functionality as the BH but with more consistency and expedience. Signed-off-by: Alex Jones --- hw/opentitan/ot_otp_engine.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/hw/opentitan/ot_otp_engine.c b/hw/opentitan/ot_otp_engine.c index 10fee61a92468..7ae0657af0ca4 100644 --- a/hw/opentitan/ot_otp_engine.c +++ b/hw/opentitan/ot_otp_engine.c @@ -61,7 +61,8 @@ #define LCI_PROG_SCHED_NS 1000 /* 1us */ /* Use a timer with a small delay instead of a BH for reliability */ -#define DIGEST_DECOUPLE_DELAY_NS 100 /* 100 ns */ +#define DIGEST_DECOUPLE_DELAY_NS 100 /* 100 ns */ +#define ENTROPY_RESCHEDULE_DELAY_NS 100 /* 100 ns */ /* The size of keys used for OTP scrambling */ #define OTP_SCRAMBLING_KEY_WIDTH 128u @@ -206,7 +207,7 @@ struct OtOTPScrmblKeyInit_ { }; struct OtOTPKeyGen_ { - QEMUBH *entropy_bh; + QEMUTimer *request_entropy; OtPresentState *present; OtPrngState *prng; OtFifo32 entropy_buf; @@ -1648,7 +1649,7 @@ static const OtOTPHWCfg *ot_otp_engine_get_hw_cfg(const OtOTPIf *dev) return (const OtOTPHWCfg *)s->hw_cfg; } -static void ot_otp_engine_request_entropy_bh(void *opaque) +static void ot_otp_engine_request_entropy(void *opaque) { OtOTPEngineState *s = opaque; @@ -1683,7 +1684,9 @@ ot_otp_engine_keygen_push_entropy(void *opaque, uint32_t bits, bool fips) resched); if (resched && !s->keygen->edn_sched) { - qemu_bh_schedule(s->keygen->entropy_bh); + int64_t now = qemu_clock_get_ns(OT_OTP_HW_CLOCK); + timer_mod(s->keygen->request_entropy, + now + ENTROPY_RESCHEDULE_DELAY_NS); } } @@ -1817,7 +1820,9 @@ static void ot_otp_engine_generate_scrambling_key( if (needed_entropy) { /* some entropy bits have been used, refill the buffer */ - qemu_bh_schedule(s->keygen->entropy_bh); + int64_t now = qemu_clock_get_ns(OT_OTP_HW_CLOCK); + timer_mod(s->keygen->request_entropy, + now + ENTROPY_RESCHEDULE_DELAY_NS); } } @@ -2756,7 +2761,7 @@ static void ot_otp_engine_reset_enter(Object *obj, ResetType type) timer_del(s->dai->delay); timer_del(s->dai->digest_write); timer_del(s->lci->prog_delay); - qemu_bh_cancel(s->keygen->entropy_bh); + timer_del(s->keygen->request_entropy); s->keygen->edn_sched = false; memset(s->hw_cfg, 0, sizeof(*s->hw_cfg)); @@ -2815,7 +2820,8 @@ static void ot_otp_engine_reset_exit(Object *obj, ResetType type) ot_edn_connect_endpoint(s->edn, s->edn_ep, &ot_otp_engine_keygen_push_entropy, s); - qemu_bh_schedule(s->keygen->entropy_bh); + int64_t now = qemu_clock_get_ns(OT_OTP_HW_CLOCK); + timer_mod(s->keygen->request_entropy, now + ENTROPY_RESCHEDULE_DELAY_NS); } static void ot_otp_engine_realize(DeviceState *dev, Error **errp) @@ -2908,9 +2914,10 @@ static void ot_otp_engine_init(Object *obj) timer_new_ns(OT_VIRTUAL_CLOCK, &ot_otp_engine_dai_write_digest, s); s->lci->prog_delay = timer_new_ns(OT_OTP_HW_CLOCK, &ot_otp_engine_lci_write_word, s); + s->keygen->request_entropy = + timer_new_ns(OT_OTP_HW_CLOCK, &ot_otp_engine_request_entropy, s); s->pwr_otp_bh = qemu_bh_new(&ot_otp_engine_pwr_otp_bh, s); s->lc_broadcast.bh = qemu_bh_new(&ot_otp_engine_lc_broadcast_bh, s); - s->keygen->entropy_bh = qemu_bh_new(&ot_otp_engine_request_entropy_bh, s); for (unsigned part_ix = 0; part_ix < s->part_count; part_ix++) { if (!s->part_descs[part_ix].buffered) {