aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorchrislu <chris.lu@gmail.com>2021-12-23 18:23:18 -0800
committerchrislu <chris.lu@gmail.com>2021-12-23 18:23:18 -0800
commit255a1c7dcd009524c34cb8c3d6fce59c6d9a03cb (patch)
tree6d196832f8c1eb35832fc26498baf02f32963743
parentf77ca41769b8466cec64c39767e0fcc038f51b31 (diff)
downloadseaweedfs-255a1c7dcd009524c34cb8c3d6fce59c6d9a03cb.tar.xz
seaweedfs-255a1c7dcd009524c34cb8c3d6fce59c6d9a03cb.zip
refactor type names
-rw-r--r--weed/filesys/dirty_pages_temp_file.go2
-rw-r--r--weed/filesys/page_writer/chunked_file_writer.go19
2 files changed, 12 insertions, 9 deletions
diff --git a/weed/filesys/dirty_pages_temp_file.go b/weed/filesys/dirty_pages_temp_file.go
index 9690d6768..a207eeb38 100644
--- a/weed/filesys/dirty_pages_temp_file.go
+++ b/weed/filesys/dirty_pages_temp_file.go
@@ -63,7 +63,7 @@ func (pages *TempFileDirtyPages) GetStorageOptions() (collection, replication st
func (pages *TempFileDirtyPages) saveChunkedFileToStorage() {
- pages.chunkedFile.ProcessEachInterval(func(file *os.File, logicChunkIndex int, interval *page_writer.ChunkWrittenInterval) {
+ pages.chunkedFile.ProcessEachInterval(func(file *os.File, logicChunkIndex page_writer.LogicChunkIndex, interval *page_writer.ChunkWrittenInterval) {
reader := page_writer.NewFileIntervalReader(pages.chunkedFile, logicChunkIndex, interval)
pages.saveChunkedFileIntevalToStorage(reader, int64(logicChunkIndex)*pages.chunkedFile.ChunkSize, interval.Size())
})
diff --git a/weed/filesys/page_writer/chunked_file_writer.go b/weed/filesys/page_writer/chunked_file_writer.go
index 9e98c6194..14c034900 100644
--- a/weed/filesys/page_writer/chunked_file_writer.go
+++ b/weed/filesys/page_writer/chunked_file_writer.go
@@ -7,11 +7,14 @@ import (
"sync"
)
+type LogicChunkIndex int
+type ActualChunkIndex int
+
// ChunkedFileWriter assumes the write requests will come in within chunks
type ChunkedFileWriter struct {
dir string
file *os.File
- logicToActualChunkIndex map[int]int
+ logicToActualChunkIndex map[LogicChunkIndex]ActualChunkIndex
chunkUsages []*ChunkWrittenIntervalList
ChunkSize int64
sync.Mutex
@@ -23,7 +26,7 @@ func NewChunkedFileWriter(dir string, chunkSize int64) *ChunkedFileWriter {
return &ChunkedFileWriter{
dir: dir,
file: nil,
- logicToActualChunkIndex: make(map[int]int),
+ logicToActualChunkIndex: make(map[LogicChunkIndex]ActualChunkIndex),
ChunkSize: chunkSize,
}
}
@@ -78,20 +81,20 @@ func (cw *ChunkedFileWriter) ReadDataAt(p []byte, off int64) (maxStop int64) {
}
func (cw *ChunkedFileWriter) toActualWriteOffset(logicOffset int64) (actualOffset int64, chunkUsage *ChunkWrittenIntervalList) {
- logicChunkIndex := int(logicOffset / cw.ChunkSize)
+ logicChunkIndex := LogicChunkIndex(logicOffset / cw.ChunkSize)
offsetRemainder := logicOffset % cw.ChunkSize
existingActualChunkIndex, found := cw.logicToActualChunkIndex[logicChunkIndex]
if found {
return int64(existingActualChunkIndex)*cw.ChunkSize + offsetRemainder, cw.chunkUsages[existingActualChunkIndex]
}
- cw.logicToActualChunkIndex[logicChunkIndex] = len(cw.chunkUsages)
+ cw.logicToActualChunkIndex[logicChunkIndex] = ActualChunkIndex(len(cw.chunkUsages))
chunkUsage = newChunkWrittenIntervalList()
cw.chunkUsages = append(cw.chunkUsages, chunkUsage)
return int64(len(cw.chunkUsages)-1)*cw.ChunkSize + offsetRemainder, chunkUsage
}
-func (cw *ChunkedFileWriter) toActualReadOffset(logicOffset int64) (actualChunkIndex int, chunkUsage *ChunkWrittenIntervalList) {
- logicChunkIndex := int(logicOffset / cw.ChunkSize)
+func (cw *ChunkedFileWriter) toActualReadOffset(logicOffset int64) (actualChunkIndex ActualChunkIndex, chunkUsage *ChunkWrittenIntervalList) {
+ logicChunkIndex := LogicChunkIndex(logicOffset / cw.ChunkSize)
existingActualChunkIndex, found := cw.logicToActualChunkIndex[logicChunkIndex]
if found {
return existingActualChunkIndex, cw.chunkUsages[existingActualChunkIndex]
@@ -99,7 +102,7 @@ func (cw *ChunkedFileWriter) toActualReadOffset(logicOffset int64) (actualChunkI
return 0, nil
}
-func (cw *ChunkedFileWriter) ProcessEachInterval(process func(file *os.File, logicChunkIndex int, interval *ChunkWrittenInterval)) {
+func (cw *ChunkedFileWriter) ProcessEachInterval(process func(file *os.File, logicChunkIndex LogicChunkIndex, interval *ChunkWrittenInterval)) {
for logicChunkIndex, actualChunkIndex := range cw.logicToActualChunkIndex {
chunkUsage := cw.chunkUsages[actualChunkIndex]
for t := chunkUsage.head.next; t != chunkUsage.tail; t = t.next {
@@ -123,7 +126,7 @@ type FileIntervalReader struct {
var _ = io.Reader(&FileIntervalReader{})
-func NewFileIntervalReader(cw *ChunkedFileWriter, logicChunkIndex int, interval *ChunkWrittenInterval) *FileIntervalReader {
+func NewFileIntervalReader(cw *ChunkedFileWriter, logicChunkIndex LogicChunkIndex, interval *ChunkWrittenInterval) *FileIntervalReader {
actualChunkIndex, found := cw.logicToActualChunkIndex[logicChunkIndex]
if !found {
// this should never happen