mirror of
https://github.com/wiiu-env/libfat.git
synced 2024-11-25 19:36:52 +01:00
- cleaned up cache code. reverted access policy to what it was before.
- _FAT_fat_linkFreeClusterCleared must go thru cache aswell.
This commit is contained in:
parent
22f3dea64f
commit
806fc418e0
303
source/cache.c
303
source/cache.c
@ -101,151 +101,94 @@ void _FAT_cache_destructor (CACHE* cache) {
|
|||||||
_FAT_mem_free (cache);
|
_FAT_mem_free (cache);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
static u32 accessCounter = 0;
|
static u32 accessCounter = 0;
|
||||||
|
|
||||||
static u32 accessTime(){
|
static u32 accessTime(){
|
||||||
accessCounter++;
|
accessCounter++;
|
||||||
return accessCounter;
|
return accessCounter;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
Retrieve a sector's page from the cache. If it is not found in the cache,
|
|
||||||
load it into the cache and return the page it was loaded to.
|
|
||||||
Return CACHE_FREE on error.
|
|
||||||
*/
|
*/
|
||||||
static unsigned int _FAT_cache_getSector (CACHE* cache, sec_t sector, void* buffer) {
|
|
||||||
|
static CACHE_ENTRY* _FAT_cache_getPage(CACHE *cache,sec_t sector)
|
||||||
|
{
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
CACHE_ENTRY* cacheEntries = cache->cacheEntries;
|
CACHE_ENTRY* cacheEntries = cache->cacheEntries;
|
||||||
unsigned int numberOfPages = cache->numberOfPages;
|
unsigned int numberOfPages = cache->numberOfPages;
|
||||||
unsigned int sectorsPerPage = cache->sectorsPerPage;
|
unsigned int sectorsPerPage = cache->sectorsPerPage;
|
||||||
|
|
||||||
|
bool foundFree = false;
|
||||||
unsigned int oldUsed = 0;
|
unsigned int oldUsed = 0;
|
||||||
unsigned int oldAccess = cacheEntries[0].last_access;
|
unsigned int oldAccess = UINT_MAX;
|
||||||
|
|
||||||
for (i = 0; i < numberOfPages ; i++) {
|
for(i=0;i<numberOfPages;i++) {
|
||||||
if ( sector>=cacheEntries[i].sector && sector < cacheEntries[i].sector+cacheEntries[i].count) {
|
if(sector>=cacheEntries[i].sector && sector<(cacheEntries[i].sector + cacheEntries[i].count)) {
|
||||||
cacheEntries[i].last_access = accessTime();
|
cacheEntries[i].last_access++;
|
||||||
memcpy(buffer, cacheEntries[i].cache + ((sector - cacheEntries[i].sector)*BYTES_PER_READ), BYTES_PER_READ);
|
return &(cacheEntries[i]);
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
// While searching for the desired sector, also search for the least recently used page
|
|
||||||
if ( (cacheEntries[i].sector == CACHE_FREE) || (cacheEntries[i].last_access < oldAccess) ) {
|
if(foundFree==false && (cacheEntries[i].sector==CACHE_FREE || cacheEntries[i].last_access<oldAccess)) {
|
||||||
|
if(cacheEntries[i].sector==CACHE_FREE) foundFree = true;
|
||||||
oldUsed = i;
|
oldUsed = i;
|
||||||
oldAccess = cacheEntries[i].last_access;
|
oldAccess = cacheEntries[i].last_access;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if(foundFree==false && cacheEntries[oldUsed].dirty==true) {
|
||||||
// If it didn't, replace the least used cache page with the desired sector
|
if(!_FAT_disc_writeSectors(cache->disc,cacheEntries[oldUsed].sector,cacheEntries[oldUsed].count,cacheEntries[oldUsed].cache)) return NULL;
|
||||||
if ((cacheEntries[oldUsed].sector != CACHE_FREE) && (cacheEntries[oldUsed].dirty == true)) {
|
|
||||||
// Write the page back to disc if it has been written to
|
|
||||||
if (!_FAT_disc_writeSectors (cache->disc, cacheEntries[oldUsed].sector, cacheEntries[oldUsed].count, cacheEntries[oldUsed].cache)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
cacheEntries[oldUsed].dirty = false;
|
cacheEntries[oldUsed].dirty = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Load the new sector into the cache
|
if(!_FAT_disc_readSectors(cache->disc,sector,sectorsPerPage,cacheEntries[oldUsed].cache)) return NULL;
|
||||||
if (!_FAT_disc_readSectors (cache->disc, sector, sectorsPerPage, cacheEntries[oldUsed].cache)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
cacheEntries[oldUsed].sector = sector;
|
cacheEntries[oldUsed].sector = sector;
|
||||||
cacheEntries[oldUsed].count = sectorsPerPage;
|
cacheEntries[oldUsed].count = sectorsPerPage;
|
||||||
// Increment the usage count, don't reset it
|
cacheEntries[oldUsed].last_access++;
|
||||||
// This creates a paging policy of least recently used PAGE, not sector
|
|
||||||
cacheEntries[oldUsed].last_access = accessTime();
|
return &(cacheEntries[oldUsed]);
|
||||||
memcpy(buffer, cacheEntries[oldUsed].cache, BYTES_PER_READ);
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool _FAT_cache_getSectors (CACHE* cache, sec_t sector, sec_t numSectors, void* buffer) {
|
bool _FAT_cache_readSectors(CACHE *cache,sec_t sector,sec_t numSectors,void *buffer)
|
||||||
unsigned int i;
|
{
|
||||||
CACHE_ENTRY* cacheEntries = cache->cacheEntries;
|
|
||||||
unsigned int numberOfPages = cache->numberOfPages;
|
|
||||||
sec_t sec;
|
sec_t sec;
|
||||||
sec_t secs_to_read;
|
sec_t secs_to_read;
|
||||||
|
CACHE_ENTRY *entry;
|
||||||
|
uint8_t *dest = buffer;
|
||||||
|
|
||||||
unsigned int oldUsed = 0;
|
while(numSectors>0) {
|
||||||
unsigned int oldAccess = cacheEntries[0].last_access;
|
entry = _FAT_cache_getPage(cache,sector);
|
||||||
|
if(entry==NULL) return false;
|
||||||
|
|
||||||
while(numSectors>0)
|
sec = sector - entry->sector;
|
||||||
{
|
secs_to_read = entry->count - sec;
|
||||||
i=0;
|
if(secs_to_read>numSectors) secs_to_read = numSectors;
|
||||||
while (i < numberOfPages ) {
|
|
||||||
if ( sector>=cacheEntries[i].sector && sector < cacheEntries[i].sector+cacheEntries[i].count) {
|
|
||||||
sec=sector-cacheEntries[i].sector;
|
|
||||||
secs_to_read=cacheEntries[i].count-sec;
|
|
||||||
if(secs_to_read>numSectors)secs_to_read=numSectors;
|
|
||||||
memcpy(buffer,cacheEntries[i].cache + (sec*BYTES_PER_READ), secs_to_read*BYTES_PER_READ);
|
|
||||||
cacheEntries[i].last_access = accessTime();
|
|
||||||
numSectors=numSectors-secs_to_read;
|
|
||||||
if(numSectors==0) return true;
|
|
||||||
buffer+=secs_to_read*BYTES_PER_READ;
|
|
||||||
sector+=secs_to_read;
|
|
||||||
i=-1; // recheck all pages again
|
|
||||||
oldUsed = 0;
|
|
||||||
oldAccess = cacheEntries[0].last_access;
|
|
||||||
|
|
||||||
}
|
memcpy(dest,entry->cache + (sec*BYTES_PER_READ),(secs_to_read*BYTES_PER_READ));
|
||||||
else // While searching for the desired sector, also search for the least recently used page
|
|
||||||
if ( (cacheEntries[i].sector == CACHE_FREE) || (cacheEntries[i].last_access < oldAccess) ) {
|
|
||||||
oldUsed = i;
|
|
||||||
oldAccess = cacheEntries[i].last_access;
|
|
||||||
}
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
// If it didn't, replace the least recently used cache page with the desired sector
|
|
||||||
if ((cacheEntries[oldUsed].sector != CACHE_FREE) && (cacheEntries[oldUsed].dirty == true)) {
|
|
||||||
// Write the page back to disc if it has been written to
|
|
||||||
if (!_FAT_disc_writeSectors (cache->disc, cacheEntries[oldUsed].sector, cacheEntries[oldUsed].count, cacheEntries[oldUsed].cache)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
cacheEntries[oldUsed].dirty = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
cacheEntries[oldUsed].sector = sector;
|
dest += (secs_to_read*BYTES_PER_READ);
|
||||||
cacheEntries[oldUsed].count = cache->sectorsPerPage;
|
sector += secs_to_read;
|
||||||
|
numSectors -= secs_to_read;
|
||||||
if (!_FAT_disc_readSectors (cache->disc, sector, cacheEntries[oldUsed].count, cacheEntries[oldUsed].cache)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Increment the usage count, don't reset it
|
|
||||||
// This creates a paging policy of least used PAGE, not sector
|
|
||||||
cacheEntries[oldUsed].last_access = accessTime();
|
|
||||||
|
|
||||||
sec=0;
|
|
||||||
secs_to_read=cacheEntries[oldUsed].count-sec;
|
|
||||||
if(secs_to_read>numSectors)secs_to_read=numSectors;
|
|
||||||
memcpy(buffer,cacheEntries[oldUsed].cache + (sec*BYTES_PER_READ), secs_to_read*BYTES_PER_READ);
|
|
||||||
numSectors=numSectors-secs_to_read;
|
|
||||||
if(numSectors==0) return true;
|
|
||||||
buffer+=secs_to_read*BYTES_PER_READ;
|
|
||||||
|
|
||||||
sector+=secs_to_read;
|
|
||||||
oldUsed = 0;
|
|
||||||
oldAccess = cacheEntries[0].last_access;
|
|
||||||
}
|
}
|
||||||
return false;
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Reads some data from a cache page, determined by the sector number
|
Reads some data from a cache page, determined by the sector number
|
||||||
*/
|
*/
|
||||||
bool _FAT_cache_readPartialSector (CACHE* cache, void* buffer, sec_t sector, unsigned int offset, size_t size) {
|
bool _FAT_cache_readPartialSector (CACHE* cache, void* buffer, sec_t sector, unsigned int offset, size_t size)
|
||||||
void* sec;
|
{
|
||||||
|
sec_t sec;
|
||||||
|
CACHE_ENTRY *entry;
|
||||||
|
|
||||||
|
if (offset + size > BYTES_PER_READ) return false;
|
||||||
|
|
||||||
|
entry = _FAT_cache_getPage(cache,sector);
|
||||||
|
if(entry==NULL) return false;
|
||||||
|
|
||||||
|
sec = sector - entry->sector;
|
||||||
|
memcpy(buffer,entry->cache + ((sec*BYTES_PER_READ) + offset),size);
|
||||||
|
|
||||||
if (offset + size > BYTES_PER_READ) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
sec = (void*) _FAT_mem_align ( BYTES_PER_READ );
|
|
||||||
if(sec == NULL) return false;
|
|
||||||
if(! _FAT_cache_getSector(cache, sector, sec) ) {
|
|
||||||
_FAT_mem_free(sec);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
memcpy(buffer, sec + offset, size);
|
|
||||||
_FAT_mem_free(sec);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -265,35 +208,21 @@ bool _FAT_cache_readLittleEndianValue (CACHE* cache, uint32_t *value, sec_t sect
|
|||||||
/*
|
/*
|
||||||
Writes some data to a cache page, making sure it is loaded into memory first.
|
Writes some data to a cache page, making sure it is loaded into memory first.
|
||||||
*/
|
*/
|
||||||
bool _FAT_cache_writePartialSector (CACHE* cache, const void* buffer, sec_t sector, unsigned int offset, size_t size) {
|
bool _FAT_cache_writePartialSector (CACHE* cache, const void* buffer, sec_t sector, unsigned int offset, size_t size)
|
||||||
unsigned int i;
|
{
|
||||||
void* sec;
|
sec_t sec;
|
||||||
CACHE_ENTRY* cacheEntries = cache->cacheEntries;
|
CACHE_ENTRY *entry;
|
||||||
unsigned int numberOfPages = cache->numberOfPages;
|
|
||||||
|
|
||||||
if (offset + size > BYTES_PER_READ) {
|
if (offset + size > BYTES_PER_READ) return false;
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
//To be sure sector is in cache
|
entry = _FAT_cache_getPage(cache,sector);
|
||||||
sec = (void*) _FAT_mem_align ( BYTES_PER_READ );
|
if(entry==NULL) return false;
|
||||||
if(sec == NULL) return false;
|
|
||||||
if(! _FAT_cache_getSector(cache, sector, sec) ) {
|
|
||||||
_FAT_mem_free(sec);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
_FAT_mem_free(sec);
|
|
||||||
|
|
||||||
//Find where sector is and write
|
sec = sector - entry->sector;
|
||||||
for (i = 0; i < numberOfPages ; i++) {
|
memcpy(entry->cache + ((sec*BYTES_PER_READ) + offset),buffer,size);
|
||||||
if ( sector>=cacheEntries[i].sector && sector < cacheEntries[i].sector+cacheEntries[i].count) {
|
|
||||||
cacheEntries[i].last_access = accessTime();
|
entry->dirty = true;
|
||||||
memcpy (cacheEntries[i].cache + ((sector-cacheEntries[i].sector)*BYTES_PER_READ) + offset, buffer, size);
|
return true;
|
||||||
cache->cacheEntries[i].dirty = true;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool _FAT_cache_writeLittleEndianValue (CACHE* cache, const uint32_t value, sec_t sector, unsigned int offset, int size) {
|
bool _FAT_cache_writeLittleEndianValue (CACHE* cache, const uint32_t value, sec_t sector, unsigned int offset, int size) {
|
||||||
@ -312,105 +241,49 @@ bool _FAT_cache_writeLittleEndianValue (CACHE* cache, const uint32_t value, sec_
|
|||||||
/*
|
/*
|
||||||
Writes some data to a cache page, zeroing out the page first
|
Writes some data to a cache page, zeroing out the page first
|
||||||
*/
|
*/
|
||||||
bool _FAT_cache_eraseWritePartialSector (CACHE* cache, const void* buffer, sec_t sector, unsigned int offset, size_t size) {
|
bool _FAT_cache_eraseWritePartialSector (CACHE* cache, const void* buffer, sec_t sector, unsigned int offset, size_t size)
|
||||||
unsigned int i;
|
{
|
||||||
void* sec;
|
sec_t sec;
|
||||||
CACHE_ENTRY* cacheEntries = cache->cacheEntries;
|
CACHE_ENTRY *entry;
|
||||||
unsigned int numberOfPages = cache->numberOfPages;
|
|
||||||
|
|
||||||
if (offset + size > BYTES_PER_READ) {
|
if (offset + size > BYTES_PER_READ) return false;
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
//To be sure sector is in cache
|
entry = _FAT_cache_getPage(cache,sector);
|
||||||
sec = (void*) _FAT_mem_align ( BYTES_PER_READ );
|
if(entry==NULL) return false;
|
||||||
if(sec == NULL) return false;
|
|
||||||
if(! _FAT_cache_getSector(cache, sector, sec) ) {
|
|
||||||
_FAT_mem_free(sec);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
_FAT_mem_free(sec);
|
|
||||||
|
|
||||||
//Find where sector is and write
|
sec = sector - entry->sector;
|
||||||
for (i = 0; i < numberOfPages ; i++) {
|
memset(entry->cache + (sec*BYTES_PER_READ),0,BYTES_PER_READ);
|
||||||
if ( sector>=cacheEntries[i].sector && sector < cacheEntries[i].sector+cacheEntries[i].count) {
|
memcpy(entry->cache + ((sec*BYTES_PER_READ) + offset),buffer,size);
|
||||||
cacheEntries[i].last_access = accessTime();
|
|
||||||
memset (cacheEntries[i].cache + ((sector-cacheEntries[i].sector)*BYTES_PER_READ), 0, BYTES_PER_READ);
|
entry->dirty = true;
|
||||||
memcpy (cacheEntries[i].cache + ((sector-cacheEntries[i].sector)*BYTES_PER_READ) + offset, buffer, size);
|
return true;
|
||||||
cache->cacheEntries[i].dirty = true;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool _FAT_cache_writeSectors (CACHE* cache, sec_t sector, sec_t numSectors, const void* buffer) {
|
bool _FAT_cache_writeSectors (CACHE* cache, sec_t sector, sec_t numSectors, const void* buffer)
|
||||||
unsigned int i;
|
{
|
||||||
CACHE_ENTRY* cacheEntries = cache->cacheEntries;
|
|
||||||
unsigned int numberOfPages = cache->numberOfPages;
|
|
||||||
sec_t sec;
|
sec_t sec;
|
||||||
sec_t secs_to_write;
|
sec_t secs_to_write;
|
||||||
|
CACHE_ENTRY* entry;
|
||||||
unsigned int oldUsed = 0;
|
const uint8_t *src = buffer;
|
||||||
unsigned int oldAccess = cacheEntries[0].last_access;
|
|
||||||
|
|
||||||
while(numSectors>0)
|
while(numSectors>0)
|
||||||
{
|
{
|
||||||
i=0;
|
entry = _FAT_cache_getPage(cache,sector);
|
||||||
while (i < numberOfPages ) {
|
if(entry==NULL) return false;
|
||||||
if ( (sector>=cacheEntries[i].sector && sector < cacheEntries[i].sector+cacheEntries[i].count) ||
|
|
||||||
(sector == cacheEntries[i].sector+cacheEntries[i].count && cacheEntries[i].count < cache->sectorsPerPage)) {
|
|
||||||
sec=sector-cacheEntries[i].sector;
|
|
||||||
secs_to_write=cache->sectorsPerPage-sec;
|
|
||||||
if(secs_to_write>numSectors)secs_to_write=numSectors;
|
|
||||||
memcpy(cacheEntries[i].cache + (sec*BYTES_PER_READ), buffer, secs_to_write*BYTES_PER_READ);
|
|
||||||
cacheEntries[i].last_access = accessTime();
|
|
||||||
cacheEntries[i].dirty = true;
|
|
||||||
cacheEntries[i].count = sec + secs_to_write;
|
|
||||||
numSectors=numSectors-secs_to_write;
|
|
||||||
if(numSectors==0) return true;
|
|
||||||
buffer+=secs_to_write*BYTES_PER_READ;
|
|
||||||
sector+=secs_to_write;
|
|
||||||
i=-1; // recheck all pages again
|
|
||||||
oldUsed = 0;
|
|
||||||
oldAccess = cacheEntries[0].last_access;
|
|
||||||
|
|
||||||
}
|
sec = sector - entry->sector;
|
||||||
else // While searching for the desired sector, also search for the least recently used page
|
secs_to_write = entry->count - sec;
|
||||||
if ( (cacheEntries[i].sector == CACHE_FREE) || (cacheEntries[i].last_access < oldAccess) ) {
|
if(secs_to_write>numSectors) secs_to_write = numSectors;
|
||||||
oldUsed = i;
|
|
||||||
oldAccess = cacheEntries[i].last_access;
|
|
||||||
}
|
|
||||||
i++;
|
|
||||||
}
|
|
||||||
// If it didn't, replace the least recently used cache page with the desired sector
|
|
||||||
if ((cacheEntries[oldUsed].sector != CACHE_FREE) && (cacheEntries[oldUsed].dirty == true)) {
|
|
||||||
// Write the page back to disc if it has been written to
|
|
||||||
if (!_FAT_disc_writeSectors (cache->disc, cacheEntries[oldUsed].sector, cacheEntries[oldUsed].count, cacheEntries[oldUsed].cache)) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
cacheEntries[oldUsed].dirty = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
secs_to_write=numSectors;
|
memcpy(entry->cache + (sec*BYTES_PER_READ),src,(secs_to_write*BYTES_PER_READ));
|
||||||
if(secs_to_write>cache->sectorsPerPage)secs_to_write=cache->sectorsPerPage;
|
|
||||||
cacheEntries[oldUsed].sector = sector;
|
|
||||||
cacheEntries[oldUsed].count = secs_to_write;
|
|
||||||
|
|
||||||
memcpy(cacheEntries[oldUsed].cache, buffer, secs_to_write*BYTES_PER_READ);
|
src += (secs_to_write*BYTES_PER_READ);
|
||||||
buffer+=secs_to_write*BYTES_PER_READ;
|
sector += secs_to_write;
|
||||||
sector+=secs_to_write;
|
numSectors -= secs_to_write;
|
||||||
numSectors=numSectors-secs_to_write;
|
|
||||||
|
|
||||||
// Increment the usage count, don't reset it
|
entry->dirty = true;
|
||||||
// This creates a paging policy of least used PAGE, not sector
|
|
||||||
cacheEntries[oldUsed].last_access = accessTime();
|
|
||||||
cacheEntries[oldUsed].dirty = true;
|
|
||||||
if(numSectors==0) return true;
|
|
||||||
oldUsed = 0;
|
|
||||||
oldAccess = cacheEntries[0].last_access;
|
|
||||||
}
|
}
|
||||||
return false;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -93,7 +93,7 @@ bool _FAT_cache_eraseWritePartialSector (CACHE* cache, const void* buffer, sec_t
|
|||||||
/*
|
/*
|
||||||
Read several sectors from the cache
|
Read several sectors from the cache
|
||||||
*/
|
*/
|
||||||
bool _FAT_cache_getSectors (CACHE* cache, sec_t sector, sec_t numSectors, void* buffer);
|
bool _FAT_cache_readSectors (CACHE* cache, sec_t sector, sec_t numSectors, void* buffer);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Read a full sector from the cache
|
Read a full sector from the cache
|
||||||
|
@ -411,7 +411,7 @@ ssize_t _FAT_read_r (struct _reent *r, int fd, char *ptr, size_t len) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if ((tempVar > 0) && flagNoError) {
|
if ((tempVar > 0) && flagNoError) {
|
||||||
if (! _FAT_cache_getSectors (cache, _FAT_fat_clusterToSector (partition, position.cluster) + position.sector,
|
if (! _FAT_cache_readSectors (cache, _FAT_fat_clusterToSector (partition, position.cluster) + position.sector,
|
||||||
tempVar, ptr))
|
tempVar, ptr))
|
||||||
{
|
{
|
||||||
flagNoError = false;
|
flagNoError = false;
|
||||||
@ -454,7 +454,7 @@ ssize_t _FAT_read_r (struct _reent *r, int fd, char *ptr, size_t len) {
|
|||||||
#endif
|
#endif
|
||||||
(chunkSize + partition->bytesPerCluster <= remain));
|
(chunkSize + partition->bytesPerCluster <= remain));
|
||||||
|
|
||||||
if (!_FAT_cache_getSectors (cache, _FAT_fat_clusterToSector (partition, position.cluster),
|
if (!_FAT_cache_readSectors (cache, _FAT_fat_clusterToSector (partition, position.cluster),
|
||||||
chunkSize / BYTES_PER_READ, ptr))
|
chunkSize / BYTES_PER_READ, ptr))
|
||||||
{
|
{
|
||||||
flagNoError = false;
|
flagNoError = false;
|
||||||
@ -480,7 +480,7 @@ ssize_t _FAT_read_r (struct _reent *r, int fd, char *ptr, size_t len) {
|
|||||||
// Read remaining sectors
|
// Read remaining sectors
|
||||||
tempVar = remain / BYTES_PER_READ; // Number of sectors left
|
tempVar = remain / BYTES_PER_READ; // Number of sectors left
|
||||||
if ((tempVar > 0) && flagNoError) {
|
if ((tempVar > 0) && flagNoError) {
|
||||||
if (!_FAT_cache_getSectors (cache, _FAT_fat_clusterToSector (partition, position.cluster),
|
if (!_FAT_cache_readSectors (cache, _FAT_fat_clusterToSector (partition, position.cluster),
|
||||||
tempVar, ptr))
|
tempVar, ptr))
|
||||||
{
|
{
|
||||||
flagNoError = false;
|
flagNoError = false;
|
||||||
|
@ -278,7 +278,7 @@ uint32_t _FAT_fat_linkFreeClusterCleared (PARTITION* partition, uint32_t cluster
|
|||||||
// Clear all the sectors within the cluster
|
// Clear all the sectors within the cluster
|
||||||
memset (emptySector, 0, BYTES_PER_READ);
|
memset (emptySector, 0, BYTES_PER_READ);
|
||||||
for (i = 0; i < partition->sectorsPerCluster; i++) {
|
for (i = 0; i < partition->sectorsPerCluster; i++) {
|
||||||
_FAT_disc_writeSectors (partition->disc,
|
_FAT_cache_writeSectors (partition->disc,
|
||||||
_FAT_fat_clusterToSector (partition, newCluster) + i,
|
_FAT_fat_clusterToSector (partition, newCluster) + i,
|
||||||
1, emptySector);
|
1, emptySector);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user