mirror of
https://github.com/google/pebble.git
synced 2025-04-30 23:31:40 -04:00
spelling: cacheable
Signed-off-by: Josh Soref <2119212+jsoref@users.noreply.github.com>
This commit is contained in:
parent
5489a4c0d3
commit
71513f263b
4 changed files with 10 additions and 10 deletions
|
@ -41,7 +41,7 @@ void dma_request_start_direct(DMARequest *this, void *dst, const void *src, uint
|
||||||
|
|
||||||
//! Starts a circular DMA transfer which calls the callback for when the transfer is both complete
|
//! Starts a circular DMA transfer which calls the callback for when the transfer is both complete
|
||||||
//! and half complete. The length should be specified in bytes.
|
//! and half complete. The length should be specified in bytes.
|
||||||
//! @note The destination address must not be in a cachable region of memory (i.e. SRAM on the F7).
|
//! @note The destination address must not be in a cacheable region of memory (i.e. SRAM on the F7).
|
||||||
//! See the comment within dma.c for more info.
|
//! See the comment within dma.c for more info.
|
||||||
void dma_request_start_circular(DMARequest *this, void *dst, const void *src, uint32_t length,
|
void dma_request_start_circular(DMARequest *this, void *dst, const void *src, uint32_t length,
|
||||||
DMACircularRequestHandler handler, void *context);
|
DMACircularRequestHandler handler, void *context);
|
||||||
|
|
|
@ -209,11 +209,11 @@ void mpu_set_task_configurable_regions(MemoryRegion_t *memory_regions,
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool mpu_memory_is_cachable(const void *addr) {
|
bool mpu_memory_is_cacheable(const void *addr) {
|
||||||
if (!dcache_is_enabled()) {
|
if (!dcache_is_enabled()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// TODO PBL-37601: We're assuming only SRAM is cachable for now for simplicity sake. We should
|
// TODO PBL-37601: We're assuming only SRAM is cacheable for now for simplicity sake. We should
|
||||||
// account for MPU configuration and also the fact that memory-mapped QSPI access goes through the
|
// account for MPU configuration and also the fact that memory-mapped QSPI access goes through the
|
||||||
// cache.
|
// cache.
|
||||||
return ((uint32_t)addr >= SRAM_BASE) && ((uint32_t)addr < SRAM_END);
|
return ((uint32_t)addr >= SRAM_BASE) && ((uint32_t)addr < SRAM_END);
|
||||||
|
|
|
@ -59,6 +59,6 @@ void mpu_get_register_settings(const MpuRegion* region, uint32_t *base_address_r
|
||||||
void mpu_set_task_configurable_regions(MemoryRegion_t *memory_regions,
|
void mpu_set_task_configurable_regions(MemoryRegion_t *memory_regions,
|
||||||
const MpuRegion **region_ptrs);
|
const MpuRegion **region_ptrs);
|
||||||
|
|
||||||
bool mpu_memory_is_cachable(const void *addr);
|
bool mpu_memory_is_cacheable(const void *addr);
|
||||||
|
|
||||||
void mpu_init_region_from_region(MpuRegion *copy, const MpuRegion *from, bool allow_user_access);
|
void mpu_init_region_from_region(MpuRegion *copy, const MpuRegion *from, bool allow_user_access);
|
||||||
|
|
|
@ -264,7 +264,7 @@ void dma_request_init(DMARequest *this) {
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
static void prv_validate_memory(DMARequest *this, void *dst, const void *src, uint32_t length) {
|
static void prv_validate_memory(DMARequest *this, void *dst, const void *src, uint32_t length) {
|
||||||
if (mpu_memory_is_cachable(src)) {
|
if (mpu_memory_is_cacheable(src)) {
|
||||||
// Flush the source buffer from cache so that SRAM has the correct data.
|
// Flush the source buffer from cache so that SRAM has the correct data.
|
||||||
uintptr_t aligned_src = (uintptr_t)src;
|
uintptr_t aligned_src = (uintptr_t)src;
|
||||||
size_t aligned_length = length;
|
size_t aligned_length = length;
|
||||||
|
@ -273,11 +273,11 @@ static void prv_validate_memory(DMARequest *this, void *dst, const void *src, ui
|
||||||
}
|
}
|
||||||
|
|
||||||
const uint32_t alignment_mask = prv_get_data_size_bytes(this) - 1;
|
const uint32_t alignment_mask = prv_get_data_size_bytes(this) - 1;
|
||||||
if (mpu_memory_is_cachable(dst)) {
|
if (mpu_memory_is_cacheable(dst)) {
|
||||||
// If a cache line within the dst gets evicted while we do the transfer, it'll corrupt SRAM, so
|
// If a cache line within the dst gets evicted while we do the transfer, it'll corrupt SRAM, so
|
||||||
// just invalidate it now.
|
// just invalidate it now.
|
||||||
dcache_invalidate(dst, length);
|
dcache_invalidate(dst, length);
|
||||||
// since the dst address is cachable, it needs to be aligned to a cache line and
|
// since the dst address is cacheable, it needs to be aligned to a cache line and
|
||||||
// the length must be an even multiple of cache lines
|
// the length must be an even multiple of cache lines
|
||||||
const uint32_t dst_alignment_mask = dcache_alignment_mask_minimum(alignment_mask);
|
const uint32_t dst_alignment_mask = dcache_alignment_mask_minimum(alignment_mask);
|
||||||
PBL_ASSERTN(((length & dst_alignment_mask) == 0) &&
|
PBL_ASSERTN(((length & dst_alignment_mask) == 0) &&
|
||||||
|
@ -377,11 +377,11 @@ void dma_request_start_circular(DMARequest *this, void *dst, const void *src, ui
|
||||||
PBL_ASSERTN(!this->stream->state->current_request);
|
PBL_ASSERTN(!this->stream->state->current_request);
|
||||||
this->stream->state->current_request = this;
|
this->stream->state->current_request = this;
|
||||||
|
|
||||||
// TODO: We don't currently support DMA'ing into a cachable region of memory (i.e. SRAM) for
|
// TODO: We don't currently support DMA'ing into a cacheable region of memory (i.e. SRAM) for
|
||||||
// circular transfers. The reason is that it gets complicated because the consumer might be
|
// circular transfers. The reason is that it gets complicated because the consumer might be
|
||||||
// reading from the buffer at any time (as UART does), as opposed to direct transfers where the
|
// reading from the buffer at any time (as UART does), as opposed to direct transfers where the
|
||||||
// consumer is always reading only after the transfer has completed.
|
// consumer is always reading only after the transfer has completed.
|
||||||
PBL_ASSERTN(!mpu_memory_is_cachable(dst));
|
PBL_ASSERTN(!mpu_memory_is_cacheable(dst));
|
||||||
prv_request_start(this, dst, src, length, DMARequestTransferType_Circular);
|
prv_request_start(this, dst, src, length, DMARequestTransferType_Circular);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -449,7 +449,7 @@ void dma_stream_irq_handler(DMAStream *stream) {
|
||||||
switch (this->state->transfer_type) {
|
switch (this->state->transfer_type) {
|
||||||
case DMARequestTransferType_Direct:
|
case DMARequestTransferType_Direct:
|
||||||
if (has_tc) {
|
if (has_tc) {
|
||||||
if (mpu_memory_is_cachable(this->state->transfer_dst)) {
|
if (mpu_memory_is_cacheable(this->state->transfer_dst)) {
|
||||||
dcache_invalidate(this->state->transfer_dst, this->state->transfer_length);
|
dcache_invalidate(this->state->transfer_dst, this->state->transfer_length);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue