mirror of
https://github.com/wiiu-env/libfat.git
synced 2024-11-22 18:09:17 +01:00
Read/write contigious clusters in one operation to improve speed.
This commit is contained in:
parent
c0c2a463f6
commit
94c4eac0cb
@ -67,8 +67,10 @@
|
|||||||
#elif defined (NDS)
|
#elif defined (NDS)
|
||||||
#define DEFAULT_CACHE_PAGES 8
|
#define DEFAULT_CACHE_PAGES 8
|
||||||
#define USE_RTC_TIME
|
#define USE_RTC_TIME
|
||||||
|
#define LIMIT_SECTORS 128
|
||||||
#elif defined (GBA)
|
#elif defined (GBA)
|
||||||
#define DEFAULT_CACHE_PAGES 2
|
#define DEFAULT_CACHE_PAGES 2
|
||||||
|
#define LIMIT_SECTORS 128
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif // _COMMON_H
|
#endif // _COMMON_H
|
||||||
|
@ -51,8 +51,9 @@ static inline bool _FAT_disc_isInserted (const DISC_INTERFACE* disc) {
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
Read numSectors sectors from a disc, starting at sector.
|
Read numSectors sectors from a disc, starting at sector.
|
||||||
numSectors is between 1 and 256
|
numSectors is between 1 and LIMIT_SECTORS if LIMIT_SECTORS is defined,
|
||||||
sector is from 0 to 2^28
|
else it is at least 1
|
||||||
|
sector is 0 or greater
|
||||||
buffer is a pointer to the memory to fill
|
buffer is a pointer to the memory to fill
|
||||||
*/
|
*/
|
||||||
static inline bool _FAT_disc_readSectors (const DISC_INTERFACE* disc, sec_t sector, sec_t numSectors, void* buffer) {
|
static inline bool _FAT_disc_readSectors (const DISC_INTERFACE* disc, sec_t sector, sec_t numSectors, void* buffer) {
|
||||||
@ -61,8 +62,9 @@ static inline bool _FAT_disc_readSectors (const DISC_INTERFACE* disc, sec_t sect
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
Write numSectors sectors to a disc, starting at sector.
|
Write numSectors sectors to a disc, starting at sector.
|
||||||
numSectors is between 1 and 256
|
numSectors is between 1 and LIMIT_SECTORS if LIMIT_SECTORS is defined,
|
||||||
sector is from 0 to 2^28
|
else it is at least 1
|
||||||
|
sector is 0 or greater
|
||||||
buffer is a pointer to the memory to read from
|
buffer is a pointer to the memory to read from
|
||||||
*/
|
*/
|
||||||
static inline bool _FAT_disc_writeSectors (const DISC_INTERFACE* disc, sec_t sector, sec_t numSectors, const void* buffer) {
|
static inline bool _FAT_disc_writeSectors (const DISC_INTERFACE* disc, sec_t sector, sec_t numSectors, const void* buffer) {
|
||||||
|
@ -329,6 +329,7 @@ int _FAT_close_r (struct _reent *r, int fd) {
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
ssize_t _FAT_read_r (struct _reent *r, int fd, char *ptr, size_t len) {
|
ssize_t _FAT_read_r (struct _reent *r, int fd, char *ptr, size_t len) {
|
||||||
FILE_STRUCT* file = (FILE_STRUCT*) fd;
|
FILE_STRUCT* file = (FILE_STRUCT*) fd;
|
||||||
PARTITION* partition;
|
PARTITION* partition;
|
||||||
@ -426,29 +427,42 @@ ssize_t _FAT_read_r (struct _reent *r, int fd, char *ptr, size_t len) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read in whole clusters
|
// Read in whole clusters, contiguous blocks at a time
|
||||||
while ((remain >= partition->bytesPerCluster) && flagNoError) {
|
while ((remain >= partition->bytesPerCluster) && flagNoError) {
|
||||||
if ( !_FAT_disc_readSectors (
|
uint32_t chunkEnd;
|
||||||
partition->disc, _FAT_fat_clusterToSector (partition, position.cluster),
|
uint32_t nextChunkStart = position.cluster;
|
||||||
partition->sectorsPerCluster, ptr))
|
size_t chunkSize = 0;
|
||||||
|
|
||||||
|
do {
|
||||||
|
chunkEnd = nextChunkStart;
|
||||||
|
nextChunkStart = _FAT_fat_nextCluster (partition, chunkEnd);
|
||||||
|
chunkSize += partition->bytesPerCluster;
|
||||||
|
} while ((nextChunkStart == chunkEnd + 1) &&
|
||||||
|
#ifdef LIMIT_SECTORS
|
||||||
|
(chunkSize + partition->bytesPerCluster <= LIMIT_SECTORS * BYTES_PER_READ) &&
|
||||||
|
#endif
|
||||||
|
(chunkSize + partition->bytesPerCluster <= remain));
|
||||||
|
|
||||||
|
if (!_FAT_disc_readSectors (partition->disc, _FAT_fat_clusterToSector (partition, position.cluster),
|
||||||
|
chunkSize / BYTES_PER_READ, ptr))
|
||||||
{
|
{
|
||||||
flagNoError = false;
|
flagNoError = false;
|
||||||
r->_errno = EIO;
|
r->_errno = EIO;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
ptr += partition->bytesPerCluster;
|
ptr += chunkSize;
|
||||||
remain -= partition->bytesPerCluster;
|
remain -= chunkSize;
|
||||||
|
|
||||||
// Advance to next cluster
|
// Advance to next cluster
|
||||||
tempNextCluster = _FAT_fat_nextCluster(partition, position.cluster);
|
if ((remain == 0) && (nextChunkStart == CLUSTER_EOF)) {
|
||||||
if ((remain == 0) && (tempNextCluster == CLUSTER_EOF)) {
|
|
||||||
position.sector = partition->sectorsPerCluster;
|
position.sector = partition->sectorsPerCluster;
|
||||||
} else if (!_FAT_fat_isValidCluster(partition, tempNextCluster)) {
|
position.cluster = chunkEnd;
|
||||||
|
} else if (!_FAT_fat_isValidCluster(partition, nextChunkStart)) {
|
||||||
r->_errno = EIO;
|
r->_errno = EIO;
|
||||||
flagNoError = false;
|
flagNoError = false;
|
||||||
} else {
|
} else {
|
||||||
position.sector = 0;
|
position.sector = 0;
|
||||||
position.cluster = tempNextCluster;
|
position.cluster = nextChunkStart;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -735,30 +749,45 @@ ssize_t _FAT_write_r (struct _reent *r, int fd, const char *ptr, size_t len) {
|
|||||||
|
|
||||||
// Write whole clusters
|
// Write whole clusters
|
||||||
while ((remain >= partition->bytesPerCluster) && flagNoError) {
|
while ((remain >= partition->bytesPerCluster) && flagNoError) {
|
||||||
|
uint32_t chunkEnd;
|
||||||
|
uint32_t nextChunkStart = position.cluster;
|
||||||
|
size_t chunkSize = 0;
|
||||||
|
|
||||||
|
do {
|
||||||
|
chunkEnd = nextChunkStart;
|
||||||
|
nextChunkStart = _FAT_fat_nextCluster (partition, chunkEnd);
|
||||||
|
if ((nextChunkStart == CLUSTER_EOF) || (nextChunkStart == CLUSTER_FREE)) {
|
||||||
|
// Ran out of clusters so get a new one
|
||||||
|
nextChunkStart = _FAT_fat_linkFreeCluster(partition, chunkEnd);
|
||||||
|
}
|
||||||
|
if (!_FAT_fat_isValidCluster(partition, nextChunkStart)) {
|
||||||
|
// Couldn't get a cluster, so abort
|
||||||
|
r->_errno = ENOSPC;
|
||||||
|
flagNoError = false;
|
||||||
|
} else {
|
||||||
|
chunkSize += partition->bytesPerCluster;
|
||||||
|
}
|
||||||
|
} while (flagNoError && (nextChunkStart == chunkEnd + 1) &&
|
||||||
|
#ifdef LIMIT_SECTORS
|
||||||
|
(chunkSize + partition->bytesPerCluster <= LIMIT_SECTORS * BYTES_PER_READ) &&
|
||||||
|
#endif
|
||||||
|
(chunkSize + partition->bytesPerCluster <= remain));
|
||||||
|
|
||||||
if ( !_FAT_disc_writeSectors (partition->disc, _FAT_fat_clusterToSector(partition, position.cluster),
|
if ( !_FAT_disc_writeSectors (partition->disc, _FAT_fat_clusterToSector(partition, position.cluster),
|
||||||
partition->sectorsPerCluster, ptr))
|
chunkSize / BYTES_PER_READ, ptr))
|
||||||
{
|
{
|
||||||
flagNoError = false;
|
flagNoError = false;
|
||||||
r->_errno = EIO;
|
r->_errno = EIO;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
ptr += partition->bytesPerCluster;
|
ptr += chunkSize;
|
||||||
remain -= partition->bytesPerCluster;
|
remain -= chunkSize;
|
||||||
if (remain > 0) {
|
|
||||||
tempNextCluster = _FAT_fat_nextCluster(partition, position.cluster);
|
if (_FAT_fat_isValidCluster(partition, nextChunkStart)) {
|
||||||
if ((tempNextCluster == CLUSTER_EOF) || (tempNextCluster == CLUSTER_FREE)) {
|
position.cluster = nextChunkStart;
|
||||||
// Ran out of clusters so get a new one
|
|
||||||
tempNextCluster = _FAT_fat_linkFreeCluster(partition, position.cluster);
|
|
||||||
}
|
|
||||||
if (!_FAT_fat_isValidCluster(partition, tempNextCluster)) {
|
|
||||||
// Couldn't get a cluster, so abort
|
|
||||||
r->_errno = ENOSPC;
|
|
||||||
flagNoError = false;
|
|
||||||
} else {
|
|
||||||
position.cluster = tempNextCluster;
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// Allocate a new cluster when next writing the file
|
// Allocate a new cluster when next writing the file
|
||||||
|
position.cluster = chunkEnd;
|
||||||
position.sector = partition->sectorsPerCluster;
|
position.sector = partition->sectorsPerCluster;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user