aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorchrislu <chris.lu@gmail.com>2022-01-22 08:27:40 -0800
committerchrislu <chris.lu@gmail.com>2022-01-22 08:27:40 -0800
commit643bbbeb49fe2ba12e6fcc4ebb92cce70a2b3a9c (patch)
tree2e1f63f70e2826c1667d21435cd628c646ea435f
parent18ed06b420780bdbbd8e5d4faf1fe51302895d44 (diff)
downloadseaweedfs-643bbbeb49fe2ba12e6fcc4ebb92cce70a2b3a9c.tar.xz
seaweedfs-643bbbeb49fe2ba12e6fcc4ebb92cce70a2b3a9c.zip
rename
-rw-r--r--weed/filesys/dirty_pages_mem_chunk.go24
1 files changed, 12 insertions, 12 deletions
diff --git a/weed/filesys/dirty_pages_mem_chunk.go b/weed/filesys/dirty_pages_mem_chunk.go
index f8f3c7984..71da91666 100644
--- a/weed/filesys/dirty_pages_mem_chunk.go
+++ b/weed/filesys/dirty_pages_mem_chunk.go
@@ -10,7 +10,7 @@ import (
"time"
)
-type MemoryChunkPages struct {
+type ChunkedDirtyPages struct {
fh *FileHandle
writeWaitGroup sync.WaitGroup
chunkAddLock sync.Mutex
@@ -22,12 +22,12 @@ type MemoryChunkPages struct {
}
var (
- _ = page_writer.DirtyPages(&MemoryChunkPages{})
+ _ = page_writer.DirtyPages(&ChunkedDirtyPages{})
)
-func newMemoryChunkPages(fh *FileHandle, chunkSize int64) *MemoryChunkPages {
+func newMemoryChunkPages(fh *FileHandle, chunkSize int64) *ChunkedDirtyPages {
- dirtyPages := &MemoryChunkPages{
+ dirtyPages := &ChunkedDirtyPages{
fh: fh,
}
@@ -39,7 +39,7 @@ func newMemoryChunkPages(fh *FileHandle, chunkSize int64) *MemoryChunkPages {
return dirtyPages
}
-func (pages *MemoryChunkPages) AddPage(offset int64, data []byte) {
+func (pages *ChunkedDirtyPages) AddPage(offset int64, data []byte) {
pages.hasWrites = true
glog.V(4).Infof("%v memory AddPage [%d, %d)", pages.fh.f.fullpath(), offset, offset+int64(len(data)))
@@ -48,7 +48,7 @@ func (pages *MemoryChunkPages) AddPage(offset int64, data []byte) {
return
}
-func (pages *MemoryChunkPages) FlushData() error {
+func (pages *ChunkedDirtyPages) FlushData() error {
if !pages.hasWrites {
return nil
}
@@ -59,18 +59,18 @@ func (pages *MemoryChunkPages) FlushData() error {
return nil
}
-func (pages *MemoryChunkPages) ReadDirtyDataAt(data []byte, startOffset int64) (maxStop int64) {
+func (pages *ChunkedDirtyPages) ReadDirtyDataAt(data []byte, startOffset int64) (maxStop int64) {
if !pages.hasWrites {
return
}
return pages.uploadPipeline.MaybeReadDataAt(data, startOffset)
}
-func (pages *MemoryChunkPages) GetStorageOptions() (collection, replication string) {
+func (pages *ChunkedDirtyPages) GetStorageOptions() (collection, replication string) {
return pages.collection, pages.replication
}
-func (pages *MemoryChunkPages) saveChunkedFileIntevalToStorage(reader io.Reader, offset int64, size int64, cleanupFn func()) {
+func (pages *ChunkedDirtyPages) saveChunkedFileIntevalToStorage(reader io.Reader, offset int64, size int64, cleanupFn func()) {
mtime := time.Now().UnixNano()
defer cleanupFn()
@@ -91,13 +91,13 @@ func (pages *MemoryChunkPages) saveChunkedFileIntevalToStorage(reader io.Reader,
}
-func (pages MemoryChunkPages) Destroy() {
+func (pages ChunkedDirtyPages) Destroy() {
pages.uploadPipeline.Shutdown()
}
-func (pages *MemoryChunkPages) LockForRead(startOffset, stopOffset int64) {
+func (pages *ChunkedDirtyPages) LockForRead(startOffset, stopOffset int64) {
pages.uploadPipeline.LockForRead(startOffset, stopOffset)
}
-func (pages *MemoryChunkPages) UnlockForRead(startOffset, stopOffset int64) {
+func (pages *ChunkedDirtyPages) UnlockForRead(startOffset, stopOffset int64) {
pages.uploadPipeline.UnlockForRead(startOffset, stopOffset)
}