Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
d268f3e
Store TraceAssembler state locally in working dir (#92)
dungeon-master-666 Dec 20, 2024
d5f7c78
Support token data dict value as direct slice (#93)
dungeon-master-666 Dec 20, 2024
d648022
Add support of extra currencies (#95)
dungeon-master-666 Dec 21, 2024
b46f3f1
Rebased ton submodule on upstream testnet (#97)
dungeon-master-666 Dec 21, 2024
3ba6221
Clean up address_book and trace_edges
dungeon-master-666 Dec 21, 2024
05b8572
fix segfault
dungeon-master-666 Dec 24, 2024
67b6d66
improve trace assembler states handling
dungeon-master-666 Jan 2, 2025
0614122
Optimize reading of account states (#99)
dungeon-master-666 Jan 17, 2025
bc9aa65
Refactor PG insert manager, try insert with COPY (#100)
dungeon-master-666 Jan 17, 2025
cdab7ca
Fix RefInt256 nullness (#101)
dungeon-master-666 Jan 18, 2025
dd267b0
build fixes
dungeon-master-666 Jan 18, 2025
4afeda1
If TA state not found, start indexing from smaller seqno.
dungeon-master-666 Jan 29, 2025
3431d12
index extra currencies in ton-smc-scanner (#105)
dungeon-master-666 Jan 30, 2025
45f17aa
Fixed missing comma
kdimentionaltree Jan 30, 2025
c72f556
Create indexes and run migrations by default
kdimentionaltree Feb 2, 2025
1366ddf
Fill empty dns_entry in case it was deleted (#106)
dungeon-master-666 Feb 3, 2025
64bdf8f
Implement measuring statistics (#107)
dungeon-master-666 Feb 6, 2025
b4c7e80
fix statistics add by batch
dungeon-master-666 Feb 7, 2025
977890a
bf
dungeon-master-666 Feb 7, 2025
ca47d6d
Fix excessive quoting of code and data in latest_account_states table…
dungeon-master-666 Feb 13, 2025
5674c88
Add anycast parsing support (#104)
shuva10v Feb 18, 2025
4fa6a64
Fix macos build (#110)
dungeon-master-666 Feb 18, 2025
d567325
Add test for Jetton burn without custom payload parser
shuva10v Feb 26, 2025
045f2bb
Add jetton burn test
shuva10v Feb 26, 2025
1fe5c1f
Add github workflow script
shuva10v Feb 26, 2025
8795821
Fix cmake file
shuva10v Feb 26, 2025
407f2e3
Merge pull request #1 from shuva10v/feature/jettons_burn
shuva10v Feb 26, 2025
f16fd05
Add gh workflow
shuva10v Feb 26, 2025
3409b8e
Testing workflow
shuva10v Feb 26, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 41 additions & 0 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
name: Test pull request

on:
pull_request:
branches: [ "main" ]

permissions:
contents: read

env:
BUILD_TYPE: Release

jobs:
build:
runs-on: ubuntu-22.04

steps:
- name: Check out repository
uses: actions/checkout@v3
with:
submodules: 'recursive'

- name: Install system libraries
run: |
sudo apt-get update
sudo apt-get install -y build-essential git cmake ninja-build zlib1g-dev libsecp256k1-dev libmicrohttpd-dev libsodium-dev liblz4-dev libjemalloc-dev
wget https://apt.llvm.org/llvm.sh
chmod +x llvm.sh
sudo ./llvm.sh 16 all


- name: Configure CMake
run: cmake -B ${{github.workspace}}/build

- name: Build
# Build your program with the given configuration
run: cmake --build ${{github.workspace}}/build --config ${{env.BUILD_TYPE}}

- name: Test
working-directory: ${{github.workspace}}/build
run: ./tondb-scanner/test-tondb
8 changes: 3 additions & 5 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -225,9 +225,7 @@ sandbox/
# vs code
.vscode/

.DS_Store

tondb-scanner/src/tokens.cpp
tondb-scanner/src/tokens.h

ton-index-worker/src/tokens.cpp
ton-index-worker/src/tokens.h
tondb-scanner/src/tokens-tlb.cpp
tondb-scanner/src/tokens-tlb.h
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ add_subdirectory(ton-index-clickhouse)
add_subdirectory(ton-integrity-checker)
add_subdirectory(ton-smc-scanner)
add_subdirectory(ton-trace-emulator)
add_subdirectory(sandbox-cpp)
add_subdirectory(celldb-migrate)

if (PGTON)
message("Building pgton")
Expand Down
12 changes: 12 additions & 0 deletions celldb-migrate/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
cmake_minimum_required(VERSION 3.16)

add_executable(celldb-migrate
src/main.cpp
)
target_include_directories(celldb-migrate
PUBLIC src
)
target_compile_features(celldb-migrate PRIVATE cxx_std_20)
target_link_libraries(celldb-migrate tondb-scanner)

install(TARGETS celldb-migrate RUNTIME DESTINATION bin)
231 changes: 231 additions & 0 deletions celldb-migrate/src/main.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,231 @@
#include "td/utils/port/signals.h"
#include "td/utils/OptionParser.h"
#include "td/utils/format.h"
#include "td/utils/logging.h"
#include "td/utils/check.h"
#include "td/utils/port/path.h"
#include "td/actor/actor.h"
#include "crypto/vm/cp0.h"
#include "tddb/td/db/RocksDb.h"
#include <iostream>
#include "rocksdb/utilities/optimistic_transaction_db.h"
#include "crypto/vm/db/DynamicBagOfCellsDb.h"
#include "crypto/vm/db/CellStorage.h"


static td::Status from_rocksdb(rocksdb::Status status) {
if (status.ok()) {
return td::Status::OK();
}
return td::Status::Error(status.ToString());
}
static td::Slice from_rocksdb(rocksdb::Slice slice) {
return td::Slice(slice.data(), slice.size());
}
static rocksdb::Slice to_rocksdb(td::Slice slice) {
return rocksdb::Slice(slice.data(), slice.size());
}

class MigrateBatchActor: public td::actor::Actor {
td::Bits256 from_;
td::Bits256 to_;
std::shared_ptr<td::RocksDb> db_;
int new_compress_depth_;
td::Promise<td::Unit> promise_;

std::unique_ptr<vm::CellLoader> loader_;
std::shared_ptr<vm::DynamicBagOfCellsDb> boc_;

uint32_t migrated_{0};
public:
MigrateBatchActor(td::Bits256 from, td::Bits256 to, std::shared_ptr<td::RocksDb> db, int new_compress_depth, td::Promise<td::Unit> promise)
: from_(from), to_(to), db_(db), new_compress_depth_(new_compress_depth), promise_(std::move(promise)) {

loader_ = std::make_unique<vm::CellLoader>(db_);
boc_ = vm::DynamicBagOfCellsDb::create();
boc_->set_celldb_compress_depth(new_compress_depth_); // probably not necessary in this context
boc_->set_loader(std::make_unique<vm::CellLoader>(db_));
}

void start_up() override {
vm::CellStorer storer{*db_};

std::unique_ptr<rocksdb::Iterator> it;
it.reset(db_->raw_db()->NewIterator({}));
db_->begin_write_batch().ensure();
for (it->Seek(to_rocksdb(from_.as_slice())); it->Valid(); it->Next()) {
auto key = from_rocksdb(it->key());
if (key.size() != 32) {
LOG(WARNING) << "CellDb: skipping key with size " << key.size();
continue;
}
td::Bits256 hash = td::Bits256(td::ConstBitPtr{key.ubegin()});
if (!(hash < to_)) {
break;
}

auto value = from_rocksdb(it->value());
migrate_cell(hash, value, storer);
}
db_->commit_write_batch().ensure();

LOG(INFO) << "Migrating batch from " << from_.to_hex() << " to " << to_.to_hex() << " done. Migrated " << migrated_ << " cells";
promise_.set_value(td::Unit());
stop();
}

td::Status migrate_cell(const td::Bits256& hash, const td::Slice& value, vm::CellStorer& storer) {
auto R = loader_->load(hash.as_slice(), value, true, boc_->as_ext_cell_creator());
if (R.is_error()) {
LOG(WARNING) << "CellDb: failed to load cell: " << R.move_as_error();
return td::Status::OK();
}
if (R.ok().status == vm::CellLoader::LoadResult::NotFound) {
LOG(WARNING) << "CellDb: cell not found";
return td::Status::OK();
}
bool expected_stored_boc = R.ok().cell_->get_depth() == new_compress_depth_;
if (expected_stored_boc != R.ok().stored_boc_) {
++migrated_;
storer.set(R.ok().refcnt(), R.ok().cell_, expected_stored_boc).ensure();
LOG(DEBUG) << "Migrating cell " << hash.to_hex();
}
return td::Status::OK();
}
};

class MigrateCellDBActor: public td::actor::Actor {
std::string db_root_;
int new_compress_depth_;
int max_parallel_batches_;

std::shared_ptr<td::RocksDb> db_;

uint32_t migrated_{0};

td::Bits256 current_;

int cur_parallel_batches_{0};
public:
MigrateCellDBActor(std::string db_root, int new_compress_depth, int max_parallel_batches)
: db_root_(db_root), new_compress_depth_(new_compress_depth), max_parallel_batches_(max_parallel_batches) {
td::RocksDbOptions read_db_options;
read_db_options.use_direct_reads = true;
auto db_r = td::RocksDb::open(db_root_ + "/celldb", std::move(read_db_options));
if (db_r.is_error()) {
LOG(FATAL) << "failed to open db: " << db_r.error();
stop();
return;
}
db_ = std::make_shared<td::RocksDb>(db_r.move_as_ok());

current_ = td::Bits256::zero();
}

void start_up() override {
uint64_t count;
db_->raw_db()->GetIntProperty("rocksdb.estimate-num-keys", &count);
LOG(INFO) << "Estimated total number of keys: " << count;

deploy_batches();
}

void deploy_batches() {
using namespace td::literals;
const auto interval_bi = "0000100000000000000000000000000000000000000000000000000000000000"_rx256;
CHECK(interval_bi.not_null());

while (cur_parallel_batches_ < max_parallel_batches_) {
auto current_bi = td::bits_to_refint(current_.bits(), 256, false);
auto to_bi = current_bi + interval_bi;
if (!to_bi->is_valid()) {
LOG(INFO) << "New to_bi is invalid. Stopping.";
return;
}
td::Bits256 to;
std::string to_hex = to_bi->to_hex_string();
if (to_hex.size() < 64) {
to_hex = std::string(64 - to_hex.size(), '0') + to_hex;
}
if (to.from_hex(to_hex) >= 256) {
LOG(INFO) << "New to_bi is too large. Stopping.";
return;
}

auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), current = current_, to = to](td::Result<td::Unit> R) {
if (R.is_error()) {
LOG(ERROR) << "failed to migrate batch from " << current.to_hex() << " to " << to.to_hex() << ": " << R.error();
} else {
LOG(INFO) << "migrated batch from " << current.to_hex() << " to " << to.to_hex();
}
td::actor::send_closure(SelfId, &MigrateCellDBActor::on_batch_migrated);
});
auto db_clone = std::make_shared<td::RocksDb>(db_->clone());
td::actor::create_actor<MigrateBatchActor>("migrate", current_, to, db_clone, new_compress_depth_, std::move(P)).release();
current_ = to;

cur_parallel_batches_++;
}
}

void on_batch_migrated() {
cur_parallel_batches_--;

deploy_batches();

if (cur_parallel_batches_ == 0) {
LOG(INFO) << "Migrated all batches";
stop();
}
}
};

int main(int argc, char* argv[]) {
SET_VERBOSITY_LEVEL(verbosity_INFO);
td::set_default_failure_signal_handler().ensure();

td::OptionParser p;
std::string db_root;
int new_compress_depth = 0;
p.set_description("Migrate CellDB to another compress db value");
p.add_option('\0', "help", "prints_help", [&]() {
char b[10240];
td::StringBuilder sb(td::MutableSlice{b, 10000});
sb << p;
std::cout << sb.as_cslice().c_str();
std::exit(2);
});
p.add_option('D', "db", "Path to TON DB folder", [&](td::Slice fname) {
db_root = fname.str();
});
p.add_checked_option('\0', "new-celldb-compress-depth", "New value of celldb compress depth", [&](td::Slice fname) {
int v;
try {
v = std::stoi(fname.str());
} catch (...) {
return td::Status::Error("bad value for --new-celldb-compress-depth: not a number");
}
new_compress_depth = v;
return td::Status::OK();
});
auto S = p.run(argc, argv);
if (S.is_error()) {
LOG(ERROR) << "failed to parse options: " << S.move_as_error();
std::_Exit(2);
}
if (db_root.empty()) {
LOG(ERROR) << "db path is empty";
std::_Exit(2);
}
if (new_compress_depth <= 0) {
LOG(ERROR) << "new compress depth is invalid";
std::_Exit(2);
}

td::actor::Scheduler scheduler({32});
scheduler.run_in_context(
[&] { td::actor::create_actor<MigrateCellDBActor>("migrate", db_root, new_compress_depth, 32).release(); });
while (scheduler.run(1)) {
}
return 0;
}
2 changes: 1 addition & 1 deletion external/libpqxx
Submodule libpqxx updated 185 files
2 changes: 1 addition & 1 deletion external/ton
Submodule ton updated 560 files
20 changes: 0 additions & 20 deletions sandbox-cpp/CMakeLists.txt

This file was deleted.

9 changes: 0 additions & 9 deletions sandbox-cpp/src/DbReader.cpp

This file was deleted.

11 changes: 0 additions & 11 deletions sandbox-cpp/src/DbReader.h

This file was deleted.

35 changes: 0 additions & 35 deletions sandbox-cpp/src/Scheduler.cpp

This file was deleted.

Loading