From ae0c6f651d485e308b2d80b8b73ca0cc83971b58 Mon Sep 17 00:00:00 2001 From: Jan Henrik Weinstock Date: Fri, 1 Nov 2024 18:43:09 +0100 Subject: [PATCH] rviommu: test tablewalks, context fetches and counters --- src/vcml/models/riscv/iommu.cpp | 34 ++++--- test/models/riscv_iommu.cpp | 164 +++++++++++++++++++++++++++++++- 2 files changed, 182 insertions(+), 16 deletions(-) diff --git a/src/vcml/models/riscv/iommu.cpp b/src/vcml/models/riscv/iommu.cpp index 0c88f138..e8b3e2a7 100644 --- a/src/vcml/models/riscv/iommu.cpp +++ b/src/vcml/models/riscv/iommu.cpp @@ -269,8 +269,8 @@ enum fctl_bits : u32 { FCTL_GXL = bit(2), }; -using DDTP_IOMMU_MODE = field<0, 4, u64>; -using DDTP_IOMMU_PPN = field; +using DDTP_MODE = field<0, 4, u64>; +using DDTP_PPN = field; enum ddtp_bits : u64 { DDTP_MODE_OFF = 0, @@ -383,7 +383,7 @@ bool iommu::dma_write(u64 addr, void* data, size_t size, bool* excl, return true; if (excl && m_dma_xptr && addr == m_dma_addr && - mwr::atomic_cas(m_dma_xptr, &m_dma_xval, &data, size)) { + mwr::atomic_cas(m_dma_xptr, &m_dma_xval, data, size)) { m_dma_addr = ~0ull; m_dma_xptr = nullptr; m_dma_xval = 0; @@ -530,8 +530,8 @@ int iommu::fetch_context(u32 devid, u32 procid, bool dbg, bool dmi, size_t depth = 0; size_t ddidx[3] = { 0, 0, 0 }; - u64 mode = ddtp.get_field(); - u64 addr = ddtp.get_field() << PAGE_BITS; + u64 mode = ddtp.get_field(); + u64 addr = ddtp.get_field() << PAGE_BITS; switch (mode) { case DDTP_MODE_OFF: @@ -583,7 +583,7 @@ int iommu::fetch_context(u32 devid, u32 procid, bool dbg, bool dmi, u64 rawctx[8]; memset(rawctx, 0, sizeof(rawctx)); - size_t ctxsz = msi_flat ? 8 : 4; + size_t ctxsz = (msi_flat ? 8 : 4) * sizeof(u64); if (!dma_read(addr + ddidx[0] * ctxsz, rawctx, ctxsz, false, dbg)) return IOMMU_FAULT_DDT_LOAD_FAULT; @@ -695,7 +695,7 @@ int iommu::fetch_iotlb(context& ctx, u64 virt, bool wnr, bool dbg, bool dmi, } iotlb iotlb_s; - int fault = tablewalk(ctx, virt, wnr, false, false, dbg, iotlb_s); + int fault = tablewalk(ctx, virt, false, wnr, false, dbg, iotlb_s); if (fault == TWALK_FAULT_G_STAGE) return IOMMU_PAGE_FAULT_R; if (fault != TWALK_FAULT_NONE) @@ -703,13 +703,15 @@ int iommu::fetch_iotlb(context& ctx, u64 virt, bool wnr, bool dbg, bool dmi, iotlb iotlb_g; u64 guest_phys = iotlb_s.ppn << PAGE_BITS; - if (tablewalk(ctx, guest_phys, wnr, true, false, dbg, iotlb_g)) + if (tablewalk(ctx, guest_phys, true, wnr, false, dbg, iotlb_g)) return iommu_page_fault(wnr); entry.vpn = iotlb_s.vpn; entry.ppn = iotlb_g.ppn; entry.r = iotlb_s.r && iotlb_g.r; entry.w = iotlb_s.w && iotlb_g.w; + entry.gscid = gscid; + entry.pscid = pscid; entry.pbmt = iotlb_s.pbmt | iotlb_g.pbmt; if (!dbg) @@ -877,8 +879,8 @@ int iommu::tablewalk(context& ctx, u64 virt, bool g, bool wnr, bool ind, entry.vpn = virt >> PAGE_BITS; entry.ppn = get_field(pte); - entry.r = pte & PTE_R; - entry.w = pte & PTE_W; + entry.r = !!(pte & PTE_R); + entry.w = !!(pte & PTE_W); entry.pbmt = 0; if (svpbmt) @@ -936,9 +938,6 @@ bool iommu::translate(const tlm_generic_payload& tx, const tlm_sbi& info, return false; } - if (!info.is_debug && !dmi) - increment_counter(ctx, IOMMU_EVENT_UX_REQ); - err = fetch_iotlb(ctx, virt, tx.is_write(), info.is_debug, dmi, entry); if (err) { if (!info.is_debug && !dmi) { @@ -957,6 +956,13 @@ bool iommu::translate(const tlm_generic_payload& tx, const tlm_sbi& info, return false; } + if (!info.is_debug && !dmi) { + if (ddtp.get_field() == DDTP_MODE_BARE) + increment_counter(ctx, IOMMU_EVENT_UX_REQ); + else + increment_counter(ctx, IOMMU_EVENT_TX_REQ); + } + return true; } @@ -1078,7 +1084,7 @@ void iommu::write_fctl(u32 val) { } void iommu::write_ddtp(u64 val) { - u64 mask = DDTP_IOMMU_MODE::MASK | DDTP_IOMMU_PPN::MASK; + u64 mask = DDTP_MODE::MASK | DDTP_PPN::MASK; if (((ddtp ^ val) & mask) == 0) return; // no change diff --git a/test/models/riscv_iommu.cpp b/test/models/riscv_iommu.cpp index 68546857..2ba7a998 100644 --- a/test/models/riscv_iommu.cpp +++ b/test/models/riscv_iommu.cpp @@ -17,8 +17,26 @@ const u64 IOMMU_ADDR = 0x40000000; const u64 IOMMU_SIZE = 1 * KiB; const u64 IOMMU_CAPS = IOMMU_ADDR + 0; -// const u64 IOMMU_FCTL = IOMMU_ADDR + 8; -// const u64 IOMMU_DDTP = IOMMU_ADDR + 16; +const u64 IOMMU_FCTL = IOMMU_ADDR + 8; +const u64 IOMMU_DDTP = IOMMU_ADDR + 16; +const u64 IOMMU_CNTINH = IOMMU_ADDR + 92; +const u64 IOMMU_HPMCYCLES = IOMMU_ADDR + 96; + +constexpr u64 iommu_iohpmctr(int i) { + return IOMMU_ADDR + 104 + i * 8; +} + +constexpr u64 iommu_iohpmevt(int i) { + return IOMMU_ADDR + 352 + i * 8; +} + +const u64 DDTP0_OFFSET = 16 * KiB; +const u64 DDTP0_ADDR = MEM_ADDR + DDTP0_OFFSET; +const u64 DDTP1_OFFSET = 32 * KiB; +const u64 DDTP1_ADDR = MEM_ADDR + DDTP1_OFFSET; + +const u64 PGTP_OFFSET = 64 * KiB; +const u64 PGTP_ADDR = MEM_ADDR + PGTP_OFFSET; class iommu_test : public test_base { @@ -67,18 +85,160 @@ class iommu_test : public test_base rst.bind(bus.rst); rst.bind(iommu.rst); + dma.allow_dmi = false; + EXPECT_STREQ(iommu.kind(), "vcml::riscv::iommu"); } + bool enable_counters(bool en) { + for (int i = 1; i < 9; i++) { + if (failed(out.writew(iommu_iohpmevt(i), en ? i : 0u))) + return false; + if (failed(out.writew(iommu_iohpmctr(i), 0u))) + return false; + } + return true; + } + void test_capabilities() { u64 caps; ASSERT_OK(out.readw(IOMMU_CAPS, caps)); EXPECT_EQ(caps, 0x000001ece7ef8f10); } + void test_feature_control() { + u32 fctl; + ASSERT_OK(out.readw(IOMMU_FCTL, fctl)); + EXPECT_EQ(fctl, 0); + ASSERT_OK(out.writew(IOMMU_FCTL, 7u)); + ASSERT_OK(out.readw(IOMMU_FCTL, fctl)); + EXPECT_EQ(fctl, 6u); // only WSI and GXL writable + ASSERT_OK(out.writew(IOMMU_FCTL, 0u)); + } + + void test_iohpmcycles() { + u64 t1, t2, t3, t4; + ASSERT_OK(out.readw(IOMMU_HPMCYCLES, t1)); + wait(iommu.clk.cycles(100)); + ASSERT_OK(out.readw(IOMMU_HPMCYCLES, t2)); + EXPECT_EQ(t2 - t1, 100); + ASSERT_OK(out.writew(IOMMU_CNTINH, 1u)); + wait(iommu.clk.cycles(100)); + ASSERT_OK(out.readw(IOMMU_HPMCYCLES, t3)); + EXPECT_LT(t3 - t2, 10); + ASSERT_OK(out.writew(IOMMU_CNTINH, 0u)); + wait(iommu.clk.cycles(250)); + ASSERT_OK(out.readw(IOMMU_HPMCYCLES, t4)); + EXPECT_EQ(t4 - t3, 250); + } + + void test_iommu_off() { + out.writew(IOMMU_DDTP, 0u); + ASSERT_AE(dma.writew(MEM_ADDR, 0xffffffff)); + } + + void test_iommu_bare() { + out.writew(IOMMU_DDTP, 1u); + ASSERT_OK(dma.writew(MEM_ADDR, 0xffffffff)); + u32 data; + out.readw(MEM_ADDR, data); + EXPECT_EQ(data, 0xffffffff); + } + + void test_iommu_lvl1_dev1_bare() { + u64* ddtp = (u64*)(mem.data() + DDTP0_OFFSET); + ddtp[8] = 0x0000000000000011; // dev[1].tc = V | DTF + ddtp[9] = 0x0000000000000000; // dev[1].gatp = bare + ddtp[10] = 0x0000000000003000; // dev[1].ta.pscid = 3 + ddtp[11] = 0x0000000000000000; // dev[1].satp = bare + ddtp[12] = 0x0000000000000000; // dev[1].msiptp + ddtp[13] = 0x0000000000000000; // dev1].msi_addr_mask + ddtp[14] = 0x0000000000000000; // dev[1].msi_addr_patter + ddtp[15] = 0x0000000000000000; // dev[1].reserverd + ASSERT_OK(out.writew(IOMMU_DDTP, DDTP0_ADDR | 2)); + + tlm_sbi info = sbi_cpuid(1); + ASSERT_OK(dma.writew(MEM_ADDR, 0xabababab, info)); + u32 data; + out.readw(MEM_ADDR, data); + EXPECT_EQ(data, 0xabababab); + } + + void test_iommu_lvl2_dev2_sv39() { + u64* pgtp = (u64*)(mem.data() + PGTP_OFFSET); + pgtp[0] = (MEM_ADDR >> 2) | 0x0f; // 0x0 -> MEM_ADDR | RWX | V + + u64* ddtp0 = (u64*)(mem.data() + DDTP0_OFFSET); + ddtp0[0] = DDTP1_ADDR >> 2 | 1; + + u64* ddtp1 = (u64*)(mem.data() + DDTP1_OFFSET); + u64 gatp = PGTP_ADDR >> 12 | 8ull << 60; + ddtp1[16] = 0x0000000000000091; // dev[1].tc = V | DTF | GADE + ddtp1[17] = gatp; // dev[1].gatp = sv39 + ddtp1[18] = 0x0000000000003000; // dev[1].ta.pscid = 3 + ddtp1[19] = 0x0000000000000000; // dev[1].satp = bare + ddtp1[20] = 0x0000000000000000; // dev[1].msiptp + ddtp1[21] = 0x0000000000000000; // dev1].msi_addr_mask + ddtp1[22] = 0x0000000000000000; // dev[1].msi_addr_patter + ddtp1[23] = 0x0000000000000000; // dev[1].reserverd + ASSERT_OK(out.writew(IOMMU_DDTP, DDTP0_ADDR | 3)); + + EXPECT_TRUE(enable_counters(true)); + + tlm_sbi info = sbi_cpuid(2); + ASSERT_OK(dma.writew(0, 0xefefefef, info)); + u32 data; + out.readw(MEM_ADDR, data); + EXPECT_EQ(data, 0xefefefef); + ASSERT_OK(dma.writew(4, 0x12121212, info)); + out.readw(MEM_ADDR + 4, data); + EXPECT_EQ(data, 0x12121212); + + // check for DA update + EXPECT_EQ(pgtp[0], (MEM_ADDR >> 2) | 0xcf); + + u64 ux_reqs = 0; + u64 tx_reqs = 0; + u64 tlb_misses = 0; + u64 ddt_walks = 0; + u64 pdt_walks = 0; + u64 table_walks_s = 0; + u64 table_walks_g = 0; + + ASSERT_OK(out.readw(iommu_iohpmctr(1), ux_reqs)); + ASSERT_OK(out.readw(iommu_iohpmctr(2), tx_reqs)); + ASSERT_OK(out.readw(iommu_iohpmctr(4), tlb_misses)); + ASSERT_OK(out.readw(iommu_iohpmctr(5), ddt_walks)); + ASSERT_OK(out.readw(iommu_iohpmctr(6), pdt_walks)); + ASSERT_OK(out.readw(iommu_iohpmctr(7), table_walks_s)); + ASSERT_OK(out.readw(iommu_iohpmctr(8), table_walks_g)); + + EXPECT_EQ(ux_reqs, 0); + EXPECT_EQ(tx_reqs, 2); + EXPECT_EQ(tlb_misses, 1); + EXPECT_EQ(ddt_walks, 2); + EXPECT_EQ(pdt_walks, 0); + EXPECT_EQ(table_walks_s, 0); + EXPECT_EQ(table_walks_g, 1); + + EXPECT_TRUE(enable_counters(false)); + } + virtual void run_test() override { test_capabilities(); wait(SC_ZERO_TIME); + test_feature_control(); + wait(SC_ZERO_TIME); + test_iohpmcycles(); + wait(SC_ZERO_TIME); + test_iommu_off(); + wait(SC_ZERO_TIME); + test_iommu_bare(); + wait(SC_ZERO_TIME); + test_iommu_lvl1_dev1_bare(); + wait(SC_ZERO_TIME); + test_iommu_lvl2_dev2_sv39(); + wait(SC_ZERO_TIME); } };