diff --git a/src/fw/drivers/dma.h b/src/fw/drivers/dma.h index 53a4dfc0..d4b0c04f 100644 --- a/src/fw/drivers/dma.h +++ b/src/fw/drivers/dma.h @@ -41,7 +41,7 @@ void dma_request_start_direct(DMARequest *this, void *dst, const void *src, uint //! Starts a circular DMA transfer which calls the callback for when the transfer is both complete //! and half complete. The length should be specified in bytes. -//! @note The destination address must not be in a cachable region of memory (i.e. SRAM on the F7). +//! @note The destination address must not be in a cacheable region of memory (i.e. SRAM on the F7). //! See the comment within dma.c for more info. void dma_request_start_circular(DMARequest *this, void *dst, const void *src, uint32_t length, DMACircularRequestHandler handler, void *context); diff --git a/src/fw/drivers/mpu.c b/src/fw/drivers/mpu.c index dc0795b6..2a9bf0b7 100644 --- a/src/fw/drivers/mpu.c +++ b/src/fw/drivers/mpu.c @@ -209,11 +209,11 @@ void mpu_set_task_configurable_regions(MemoryRegion_t *memory_regions, } -bool mpu_memory_is_cachable(const void *addr) { +bool mpu_memory_is_cacheable(const void *addr) { if (!dcache_is_enabled()) { return false; } - // TODO PBL-37601: We're assuming only SRAM is cachable for now for simplicity sake. We should + // TODO PBL-37601: We're assuming only SRAM is cacheable for now for simplicity sake. We should // account for MPU configuration and also the fact that memory-mapped QSPI access goes through the // cache. return ((uint32_t)addr >= SRAM_BASE) && ((uint32_t)addr < SRAM_END); diff --git a/src/fw/drivers/mpu.h b/src/fw/drivers/mpu.h index ca7da0b0..e960efec 100644 --- a/src/fw/drivers/mpu.h +++ b/src/fw/drivers/mpu.h @@ -59,6 +59,6 @@ void mpu_get_register_settings(const MpuRegion* region, uint32_t *base_address_r void mpu_set_task_configurable_regions(MemoryRegion_t *memory_regions, const MpuRegion **region_ptrs); -bool mpu_memory_is_cachable(const void *addr); +bool mpu_memory_is_cacheable(const void *addr); void mpu_init_region_from_region(MpuRegion *copy, const MpuRegion *from, bool allow_user_access); diff --git a/src/fw/drivers/stm32f2/dma.c b/src/fw/drivers/stm32f2/dma.c index 9e3e23cc..2bbe40e2 100644 --- a/src/fw/drivers/stm32f2/dma.c +++ b/src/fw/drivers/stm32f2/dma.c @@ -264,7 +264,7 @@ void dma_request_init(DMARequest *this) { //////////////////////////////////////////////////////////////////////////////// static void prv_validate_memory(DMARequest *this, void *dst, const void *src, uint32_t length) { - if (mpu_memory_is_cachable(src)) { + if (mpu_memory_is_cacheable(src)) { // Flush the source buffer from cache so that SRAM has the correct data. uintptr_t aligned_src = (uintptr_t)src; size_t aligned_length = length; @@ -273,11 +273,11 @@ static void prv_validate_memory(DMARequest *this, void *dst, const void *src, ui } const uint32_t alignment_mask = prv_get_data_size_bytes(this) - 1; - if (mpu_memory_is_cachable(dst)) { + if (mpu_memory_is_cacheable(dst)) { // If a cache line within the dst gets evicted while we do the transfer, it'll corrupt SRAM, so // just invalidate it now. dcache_invalidate(dst, length); - // since the dst address is cachable, it needs to be aligned to a cache line and + // since the dst address is cacheable, it needs to be aligned to a cache line and // the length must be an even multiple of cache lines const uint32_t dst_alignment_mask = dcache_alignment_mask_minimum(alignment_mask); PBL_ASSERTN(((length & dst_alignment_mask) == 0) && @@ -377,11 +377,11 @@ void dma_request_start_circular(DMARequest *this, void *dst, const void *src, ui PBL_ASSERTN(!this->stream->state->current_request); this->stream->state->current_request = this; - // TODO: We don't currently support DMA'ing into a cachable region of memory (i.e. SRAM) for + // TODO: We don't currently support DMA'ing into a cacheable region of memory (i.e. SRAM) for // circular transfers. The reason is that it gets complicated because the consumer might be // reading from the buffer at any time (as UART does), as opposed to direct transfers where the // consumer is always reading only after the transfer has completed. - PBL_ASSERTN(!mpu_memory_is_cachable(dst)); + PBL_ASSERTN(!mpu_memory_is_cacheable(dst)); prv_request_start(this, dst, src, length, DMARequestTransferType_Circular); } @@ -449,7 +449,7 @@ void dma_stream_irq_handler(DMAStream *stream) { switch (this->state->transfer_type) { case DMARequestTransferType_Direct: if (has_tc) { - if (mpu_memory_is_cachable(this->state->transfer_dst)) { + if (mpu_memory_is_cacheable(this->state->transfer_dst)) { dcache_invalidate(this->state->transfer_dst, this->state->transfer_length); }