-
Notifications
You must be signed in to change notification settings - Fork 15
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
dfc17dd
commit 691dc0d
Showing
6 changed files
with
367 additions
and
33 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,165 @@ | ||
// Copyright 2024 The Accumulate Authors | ||
// | ||
// Use of this source code is governed by an MIT-style | ||
// license that can be found in the LICENSE file or at | ||
// https://opensource.org/licenses/MIT. | ||
|
||
package block | ||
|
||
import ( | ||
"bytes" | ||
"errors" | ||
"fmt" | ||
"io" | ||
"os" | ||
"sort" | ||
"sync/atomic" | ||
|
||
"gitlab.com/accumulatenetwork/accumulate/pkg/database" | ||
"gitlab.com/accumulatenetwork/accumulate/pkg/types/record" | ||
"gitlab.com/accumulatenetwork/core/schema/pkg/binary" | ||
) | ||
|
||
const indexFileEntrySize = 64 | ||
const indexFileEntryCount = 1 << 10 | ||
const indexFileSize = indexFileEntrySize * indexFileEntryCount | ||
|
||
var errNoSpace = errors.New("no space left") | ||
|
||
type indexFile struct { | ||
file *file | ||
count atomic.Int64 | ||
} | ||
|
||
func newIndexFile(name string) (*indexFile, error) { | ||
var err error | ||
f := new(indexFile) | ||
f.file, err = openFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL) | ||
if err != nil { | ||
return nil, err | ||
} | ||
defer closeIfError(&err, f) | ||
|
||
// Always allocate 1024 entries | ||
err = f.file.Grow(indexFileSize) | ||
if err != nil { | ||
return nil, err | ||
} | ||
|
||
return f, err | ||
} | ||
|
||
func openIndexFile(name string) (_ *indexFile, err error) { | ||
f := new(indexFile) | ||
f.file, err = openFile(name, os.O_RDWR) | ||
if err != nil { | ||
return nil, err | ||
} | ||
defer closeIfError(&err, f) | ||
|
||
if len(f.file.data) != indexFileSize { | ||
return nil, fmt.Errorf("invalid size: want %d, got %d", indexFileSize, len(f.file.data)) | ||
} | ||
|
||
// Find the empty region at the end of the file and use that to determine | ||
// the number of entries | ||
f.count.Store(int64(sort.Search(indexFileEntryCount, func(i int) bool { | ||
offset := int64(i) * indexFileEntrySize | ||
return [32]byte(f.file.data[offset:]) == [32]byte{} | ||
}))) | ||
|
||
return f, err | ||
} | ||
|
||
func (f *indexFile) Close() error { | ||
return f.file.Close() | ||
} | ||
|
||
func (f *indexFile) Insert(key *record.KeyHash, loc *recordLocation) error { | ||
if *key == [32]byte{} { | ||
panic("cannot insert the zero key") | ||
} | ||
|
||
f.file.mu.RLock() | ||
defer f.file.mu.RUnlock() | ||
|
||
count := f.count.Load() | ||
i := f.find(count, *key) | ||
offset := i * indexFileEntrySize | ||
if i < count && [32]byte(f.file.data[offset:offset+32]) == *key { | ||
return f.writeAt(key, loc, offset) | ||
} | ||
|
||
// Do we need extra space? | ||
if int64(len(f.file.data))/indexFileEntrySize <= count { | ||
return errNoSpace | ||
} | ||
|
||
if i < count { | ||
end := count * indexFileEntrySize | ||
copy(f.file.data[offset+64:], f.file.data[offset:end]) | ||
} | ||
f.count.Add(1) | ||
return f.writeAt(key, loc, offset) | ||
} | ||
|
||
func (f *indexFile) writeAt(key *record.KeyHash, loc *recordLocation, offset int64) error { | ||
wr := f.file.WriteRange(offset, offset+64) | ||
return f.write(key, loc, wr) | ||
} | ||
|
||
func (f *indexFile) write(key *record.KeyHash, loc *recordLocation, wr io.Writer) error { | ||
_, err := wr.Write(key[:]) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
enc := binary.NewEncoder(wr) | ||
err = loc.MarshalBinaryV2(enc) | ||
if err != nil { | ||
if errors.Is(err, io.EOF) { | ||
return io.ErrUnexpectedEOF | ||
} | ||
return err | ||
} | ||
|
||
_, err = wr.Write([]byte{binary.EmptyObject}) | ||
if errors.Is(err, io.EOF) { | ||
return nil | ||
} | ||
return err | ||
} | ||
|
||
func (f *indexFile) Find(key *record.Key) (*recordLocation, error) { | ||
hash := key.Hash() | ||
|
||
f.file.mu.RLock() | ||
count := f.count.Load() | ||
index := f.find(count, hash) | ||
f.file.mu.RUnlock() | ||
if index >= count { | ||
return nil, (*database.NotFoundError)(key) | ||
} | ||
|
||
offset := int64(index) * indexFileEntrySize | ||
if [32]byte(f.file.data[offset:offset+32]) != hash { | ||
return nil, (*database.NotFoundError)(key) | ||
} | ||
|
||
return f.readAt(offset) | ||
} | ||
|
||
func (f *indexFile) find(count int64, hash record.KeyHash) int64 { | ||
return int64(sort.Search(int(count), func(i int) bool { | ||
offset := i * indexFileEntrySize | ||
return bytes.Compare(hash[:], f.file.data[offset:offset+32]) <= 0 | ||
})) | ||
} | ||
|
||
func (f *indexFile) readAt(offset int64) (*recordLocation, error) { | ||
rd := f.file.ReadRange(offset+32, offset+64) | ||
dec := binary.NewDecoder(rd) | ||
loc := new(recordLocation) | ||
err := loc.UnmarshalBinaryV2(dec) | ||
return loc, err | ||
} |
Oops, something went wrong.