Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Resolve memory leaks and fix bugs. #79

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
MQSim
build/

.vscode
traces
10 changes: 9 additions & 1 deletion src/exec/Host_System.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,8 +94,16 @@ Host_System::~Host_System()
if (ssd_device->Host_interface->GetType() == HostInterface_Types::SATA) {
delete this->SATA_hba;
}

Host_Components::IO_Flow_Base* base_flow;
for (uint16_t flow_id = 0; flow_id < this->IO_flows.size(); flow_id++) {
delete this->IO_flows[flow_id];
if(base_flow = dynamic_cast<Host_Components::IO_Flow_Synthetic*>(IO_flows[flow_id])){
delete (Host_Components::IO_Flow_Synthetic*)base_flow;
} else if(base_flow = dynamic_cast<Host_Components::IO_Flow_Trace_Based*>(IO_flows[flow_id])){
delete (Host_Components::IO_Flow_Trace_Based*)base_flow;
}else{
PRINT_ERROR("Error in delete IO_flows classified by IO flow Type")
}
}
}

Expand Down
7 changes: 3 additions & 4 deletions src/exec/SSD_Device.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -308,7 +308,7 @@ SSD_Device::SSD_Device(Device_Parameter_Set *parameters, std::vector<IO_Flow_Par
parameters->Flash_Parameters.Die_No_Per_Chip, parameters->Flash_Parameters.Plane_No_Per_Die,
parameters->Flash_Parameters.Block_No_Per_Plane, parameters->Flash_Parameters.Page_No_Per_Block,
parameters->Flash_Parameters.Page_Capacity / SECTOR_SIZE_IN_BYTE, parameters->Use_Copyback_for_GC, max_rho, 10,
parameters->Seed++);
parameters->Dynamic_Wearleveling_Enabled, parameters->Static_Wearleveling_Enabled, parameters->Static_Wearleveling_Threshold, parameters->Seed++);
Simulator->AddObject(gcwl);
fbm->Set_GC_and_WL_Unit(gcwl);
ftl->GC_and_WL_Unit = gcwl;
Expand Down Expand Up @@ -380,10 +380,9 @@ SSD_Device::~SSD_Device()
{
delete ((SSD_Components::ONFI_Channel_NVDDR2 *)this->Channels[channel_cntr])->Chips[chip_cntr];
}
delete this->Channels[channel_cntr];
delete[] (SSD_Components::ONFI_Channel_NVDDR2 *)this->Channels[channel_cntr];
}

delete this->PHY;
delete (SSD_Components::NVM_PHY_ONFI_NVDDR2*)this->PHY;
delete ((SSD_Components::FTL *)this->Firmware)->TSU;
delete ((SSD_Components::FTL *)this->Firmware)->BlockManager;
delete ((SSD_Components::FTL *)this->Firmware)->Address_Mapping_Unit;
Expand Down
12 changes: 6 additions & 6 deletions src/host/IO_Flow_Base.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -543,15 +543,15 @@ IO_Flow_Base::IO_Flow_Base(const sim_object_id_type &name, uint16_t flow_id, LHA
xmlwriter.Write_attribute_string(attr, val);

attr = "IOPS";
val = std::to_string((double)STAT_generated_request_count / (Simulator->Time() / SIM_TIME_TO_SECONDS_COEFF));
val = std::to_string((double)STAT_generated_request_count / ((double)Simulator->Time() / SIM_TIME_TO_SECONDS_COEFF));
xmlwriter.Write_attribute_string(attr, val);

attr = "IOPS_Read";
val = std::to_string((double)STAT_generated_read_request_count / (Simulator->Time() / SIM_TIME_TO_SECONDS_COEFF));
val = std::to_string((double)STAT_generated_read_request_count / ((double)Simulator->Time() / SIM_TIME_TO_SECONDS_COEFF));
xmlwriter.Write_attribute_string(attr, val);

attr = "IOPS_Write";
val = std::to_string((double)STAT_generated_write_request_count / (Simulator->Time() / SIM_TIME_TO_SECONDS_COEFF));
val = std::to_string((double)STAT_generated_write_request_count / ((double)Simulator->Time() / SIM_TIME_TO_SECONDS_COEFF));
xmlwriter.Write_attribute_string(attr, val);

attr = "Bytes_Transferred";
Expand All @@ -567,15 +567,15 @@ IO_Flow_Base::IO_Flow_Base(const sim_object_id_type &name, uint16_t flow_id, LHA
xmlwriter.Write_attribute_string(attr, val);

attr = "Bandwidth";
val = std::to_string((double)STAT_transferred_bytes_total / (Simulator->Time() / SIM_TIME_TO_SECONDS_COEFF));
val = std::to_string((double)STAT_transferred_bytes_total / ((double)Simulator->Time() / SIM_TIME_TO_SECONDS_COEFF));
xmlwriter.Write_attribute_string(attr, val);

attr = "Bandwidth_Read";
val = std::to_string((double)STAT_transferred_bytes_read / (Simulator->Time() / SIM_TIME_TO_SECONDS_COEFF));
val = std::to_string((double)STAT_transferred_bytes_read / ((double)Simulator->Time() / SIM_TIME_TO_SECONDS_COEFF));
xmlwriter.Write_attribute_string(attr, val);

attr = "Bandwidth_Write";
val = std::to_string((double)STAT_transferred_bytes_write / (Simulator->Time() / SIM_TIME_TO_SECONDS_COEFF));
val = std::to_string((double)STAT_transferred_bytes_write / ((double)Simulator->Time() / SIM_TIME_TO_SECONDS_COEFF));
xmlwriter.Write_attribute_string(attr, val);


Expand Down
3 changes: 3 additions & 0 deletions src/host/IO_Flow_Trace_Based.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@ IO_Flow_Trace_Based::IO_Flow_Trace_Based(const sim_object_id_type &name, uint16_

IO_Flow_Trace_Based::~IO_Flow_Trace_Based()
{
if(trace_file.is_open()){
trace_file.close();
}
}

Host_IO_Request *IO_Flow_Trace_Based::Generate_next_request()
Expand Down
9 changes: 8 additions & 1 deletion src/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,7 @@ void read_configuration_parameters(const string ssd_config_file_path, Execution_
PRINT_MESSAGE("Error in the SSD configuration file!")
PRINT_MESSAGE("Using MQSim's default configuration.")
}
delete[] temp_string;
} else {
PRINT_MESSAGE("Using MQSim's default configuration.");
PRINT_MESSAGE("Writing the default configuration parameters to the expected configuration file.");
Expand Down Expand Up @@ -304,8 +305,14 @@ int main(int argc, char* argv[])

PRINT_MESSAGE("Writing results to output file .......");
collect_results(ssd, host, (workload_defs_file_path.substr(0, workload_defs_file_path.find_last_of(".")) + "_scenario_" + std::to_string(cntr) + ".xml").c_str());

for (auto io_flow_def = (*io_scen)->begin(); io_flow_def != (*io_scen)->end(); io_flow_def++) {
delete *io_flow_def;
}
delete *io_scen;
}
cout << "Simulation complete; Press any key to exit." << endl;
delete exec_params;
cout << "Simulation complete; Press any key to exit." << endl;

cin.get(); // Disable if you prefer batch runs

Expand Down
10 changes: 10 additions & 0 deletions src/ssd/Address_Mapping_Unit_Base.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,15 @@ namespace SSD_Components
virtual void Remove_barrier_for_accessing_lpa(const stream_id_type stream_id, const LPA_type lpa) = 0; //Removes the barrier that has already been set for accessing an LPA (i.e., the GC_and_WL_Unit_Base unit successfully finished relocating LPA from one physical location to another physical location).
virtual void Remove_barrier_for_accessing_mvpn(const stream_id_type stream_id, const MVPN_type mvpn) = 0; //Removes the barrier that has already been set for accessing an MVPN (i.e., the GC_and_WL_Unit_Base unit successfully finished relocating MVPN from one physical location to another physical location).
virtual void Start_servicing_writes_for_overfull_plane(const NVM::FlashMemory::Physical_Page_Address plane_address) = 0;//This function is invoked when GC execution is finished on a plane and the plane has enough number of free pages to service writes
typedef void(*TransactionServicedSignalHandlerType) (NVM_Transaction_Flash*);

void Connect_to_user_request_arrived_signal(TransactionServicedSignalHandlerType function)
{
connected_transaction_serviced_signal_handler = function;
}
protected:

void ConnectDCMServiedTransactionHandler(void(*dcmServicedTransactionHandler)(NVM_Transaction_Flash*));
FTL* ftl;
NVM_PHY_ONFI* flash_controller;
Flash_Block_Manager_Base* block_manager;
Expand All @@ -106,6 +114,8 @@ namespace SSD_Components
bool fold_large_addresses;
bool mapping_table_stored_on_flash;

TransactionServicedSignalHandlerType connected_transaction_serviced_signal_handler;

virtual bool query_cmt(NVM_Transaction_Flash* transaction) = 0;
virtual PPA_type online_create_entry_for_reads(LPA_type lpa, const stream_id_type stream_id, NVM::FlashMemory::Physical_Page_Address& read_address, uint64_t read_sectors_bitmap) = 0;
virtual void manage_user_transaction_facing_barrier(NVM_Transaction_Flash* transaction) = 0;
Expand Down
28 changes: 22 additions & 6 deletions src/ssd/Address_Mapping_Unit_Page_Level.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,6 @@ namespace SSD_Components

Cached_Mapping_Table::~Cached_Mapping_Table()
{
std::unordered_map<LPA_type, CMTSlotType*> addressMap;
std::list<std::pair<LPA_type, CMTSlotType*>> lruList;

auto entry = addressMap.begin();
while (entry != addressMap.end()) {
delete (*entry).second;
Expand Down Expand Up @@ -399,6 +396,17 @@ namespace SSD_Components
delete domains[i];
}
delete[] domains;

for (unsigned int channel_id = 0; channel_id < channel_count; channel_id++) {
for (unsigned int chip_id = 0; chip_id < chip_no_per_channel; chip_id++) {
for (unsigned int die_id = 0; die_id < die_no_per_chip; die_id++) {
delete[] Write_transactions_for_overfull_planes[channel_id][chip_id][die_id];
}
delete[] Write_transactions_for_overfull_planes[channel_id][chip_id];
}
delete[] Write_transactions_for_overfull_planes[channel_id];
}
delete[] Write_transactions_for_overfull_planes;
}

void Address_Mapping_Unit_Page_Level::Setup_triggers()
Expand Down Expand Up @@ -436,6 +444,7 @@ namespace SSD_Components
allocate_plane_for_translation_write(dummy_tr);
allocate_page_in_plane_for_translation_write(dummy_tr, (MVPN_type)dummy_tr->LPA, false);
flash_controller->Change_flash_page_status_for_preconditioning(dummy_tr->Address, dummy_tr->LPA);
block_manager->Program_transaction_serviced(dummy_tr->Address);
}
}
mapping_table_stored_on_flash = true;
Expand Down Expand Up @@ -588,6 +597,8 @@ namespace SSD_Components
if (transaction->Type == Transaction_Type::READ) {
if (ppa == NO_PPA) {
ppa = online_create_entry_for_reads(transaction->LPA, streamID, transaction->Address, ((NVM_Transaction_Flash_RD*)transaction)->read_sectors_bitmap);
block_manager->Program_transaction_serviced(transaction->Address);
flash_controller->Change_flash_page_status_for_preconditioning(transaction->Address, transaction->LPA);
}
transaction->PPA = ppa;
Convert_ppa_to_address(transaction->PPA, transaction->Address);
Expand Down Expand Up @@ -1616,6 +1627,11 @@ namespace SSD_Components
allocate_plane_for_translation_write(writeTR);
allocate_page_in_plane_for_translation_write(writeTR, mvpn, false);
domains[stream_id]->DepartingMappingEntries.insert(get_MVPN(lpn, stream_id));

if(readTR != NULL){
readTR->RelatedWrite = writeTR;
}

ftl->TSU->Submit_transaction(writeTR);

Stats::Total_flash_reads_for_mapping++;
Expand Down Expand Up @@ -1777,8 +1793,8 @@ namespace SSD_Components
MVPN_type mpvn = (MVPN_type)flash_controller->Get_metadata(addr.ChannelID, addr.ChipID, addr.DieID, addr.PlaneID, addr.BlockID, addr.PageID);
if (domains[block->Stream_id]->GlobalTranslationDirectory[mpvn].MPPN != Convert_address_to_ppa(addr)) {
PRINT_ERROR("Inconsistency in the global translation directory when locking an MPVN!")
Set_barrier_for_accessing_mvpn(block->Stream_id, mpvn);
}
Set_barrier_for_accessing_mvpn(block->Stream_id, mpvn);
} else {
LPA_type lpa = flash_controller->Get_metadata(addr.ChannelID, addr.ChipID, addr.DieID, addr.PlaneID, addr.BlockID, addr.PageID);
LPA_type ppa = domains[block->Stream_id]->GlobalMappingTable[lpa].PPA;
Expand All @@ -1805,7 +1821,7 @@ namespace SSD_Components
//If there are read requests waiting behind the barrier, then MQSim assumes they can be serviced with the actual page data that is accessed during GC execution
auto read_tr = domains[stream_id]->Read_transactions_behind_LPA_barrier.find(lpa);
while (read_tr != domains[stream_id]->Read_transactions_behind_LPA_barrier.end()) {
handle_transaction_serviced_signal_from_PHY((*read_tr).second);
connected_transaction_serviced_signal_handler((*read_tr).second);
delete (*read_tr).second;
domains[stream_id]->Read_transactions_behind_LPA_barrier.erase(read_tr);
read_tr = domains[stream_id]->Read_transactions_behind_LPA_barrier.find(lpa);
Expand All @@ -1814,7 +1830,7 @@ namespace SSD_Components
//If there are write requests waiting behind the barrier, then MQSim assumes they can be serviced with the actual page data that is accessed during GC execution. This may not be 100% true for all write requests, but, to avoid more complexity in the simulation, we accept this assumption.
auto write_tr = domains[stream_id]->Write_transactions_behind_LPA_barrier.find(lpa);
while (write_tr != domains[stream_id]->Write_transactions_behind_LPA_barrier.end()) {
handle_transaction_serviced_signal_from_PHY((*write_tr).second);
connected_transaction_serviced_signal_handler((*write_tr).second);
delete (*write_tr).second;
domains[stream_id]->Write_transactions_behind_LPA_barrier.erase(write_tr);
write_tr = domains[stream_id]->Write_transactions_behind_LPA_barrier.find(lpa);
Expand Down
14 changes: 12 additions & 2 deletions src/ssd/Data_Cache_Manager_Flash_Advanced.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@ namespace SSD_Components
{
case SSD_Components::Cache_Sharing_Mode::SHARED:
{
delete[] back_pressure_buffer_depth;
delete per_stream_cache[0];
while (dram_execution_queue[0].size()) {
delete dram_execution_queue[0].front();
Expand All @@ -70,6 +71,8 @@ namespace SSD_Components
break;
}
case SSD_Components::Cache_Sharing_Mode::EQUAL_PARTITIONING:
{
delete[] back_pressure_buffer_depth;
for (unsigned int i = 0; i < stream_count; i++) {
delete per_stream_cache[i];
while (dram_execution_queue[i].size()) {
Expand All @@ -81,11 +84,13 @@ namespace SSD_Components
}
}
break;

}
default:
break;
}

delete per_stream_cache;
delete[] per_stream_cache;
delete[] dram_execution_queue;
delete[] waiting_user_requests_queue_for_dram_free_slot;
delete[] bloom_filter;
Expand All @@ -94,6 +99,7 @@ namespace SSD_Components
void Data_Cache_Manager_Flash_Advanced::Setup_triggers()
{
Data_Cache_Manager_Base::Setup_triggers();
static_cast<FTL*>(nvm_firmware)->Address_Mapping_Unit->Connect_to_user_request_arrived_signal(handle_transaction_serviced_signal_from_PHY);
flash_controller->ConnectToTransactionServicedSignal(handle_transaction_serviced_signal_from_PHY);
}

Expand Down Expand Up @@ -313,6 +319,8 @@ namespace SSD_Components
flash_written_back_write_size_in_sectors += count_sector_no_from_status_bitmap(tr->write_sectors_bitmap);
bloom_filter[user_request->Stream_id].insert(tr->LPA);
writeback_transactions.push_back(tr);
} else{
delete tr;
}
user_request->Transaction_list.erase(it++);
}
Expand All @@ -328,6 +336,8 @@ namespace SSD_Components
read_transfer_info->next_event_type = Data_Cache_Simulation_Event_Type::MEMORY_READ_FOR_CACHE_EVICTION_FINISHED;
read_transfer_info->Stream_id = user_request->Stream_id;
service_dram_access_request(read_transfer_info);
} else{
delete evicted_cache_slots;
}

//Issue memory write to write data to DRAM
Expand Down Expand Up @@ -398,9 +408,9 @@ namespace SSD_Components
timestamp, ((NVM_Transaction_Flash_RD*)transaction)->read_sectors_bitmap | slot.State_bitmap_of_existing_sectors);
} else {
if (!((Data_Cache_Manager_Flash_Advanced*)_my_instance)->per_stream_cache[transaction->Stream_id]->Check_free_slot_availability()) {
std::list<NVM_Transaction*>* evicted_cache_slots = new std::list<NVM_Transaction*>;
Data_Cache_Slot_Type evicted_slot = ((Data_Cache_Manager_Flash_Advanced*)_my_instance)->per_stream_cache[transaction->Stream_id]->Evict_one_slot_lru();
if (evicted_slot.Status == Cache_Slot_Status::DIRTY_NO_FLASH_WRITEBACK) {
std::list<NVM_Transaction*>* evicted_cache_slots = new std::list<NVM_Transaction*>;
Memory_Transfer_Info* transfer_info = new Memory_Transfer_Info;
transfer_info->Size_in_bytes = count_sector_no_from_status_bitmap(evicted_slot.State_bitmap_of_existing_sectors) * SECTOR_SIZE_IN_BYTE;
evicted_cache_slots->push_back(new NVM_Transaction_Flash_WR(Transaction_Source_Type::USERIO,
Expand Down
23 changes: 13 additions & 10 deletions src/ssd/Data_Cache_Manager_Flash_Simple.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ namespace SSD_Components
{
Data_Cache_Manager_Base::Setup_triggers();
flash_controller->ConnectToTransactionServicedSignal(handle_transaction_serviced_signal_from_PHY);
static_cast<FTL*>(nvm_firmware)->Address_Mapping_Unit->Connect_to_user_request_arrived_signal(handle_transaction_serviced_signal_from_PHY);
}

void Data_Cache_Manager_Flash_Simple::Do_warmup(std::vector<Utils::Workload_Statistics*> workload_stats)
Expand Down Expand Up @@ -96,6 +97,7 @@ namespace SSD_Components
if (user_request->Transaction_list.size() > 0) {
static_cast<FTL*>(nvm_firmware)->Address_Mapping_Unit->Translate_lpa_to_ppa_and_dispatch(user_request->Transaction_list);
}
return;
}
default:
PRINT_ERROR("The specified caching mode is not not support in simple cache manager!")
Expand All @@ -113,6 +115,7 @@ namespace SSD_Components
if (user_request->Transaction_list.size() > 0) {
waiting_user_requests_queue_for_dram_free_slot[user_request->Stream_id].push_back(user_request);
}
return;
}
default:
PRINT_ERROR("The specified caching mode is not not support in simple cache manager!")
Expand Down Expand Up @@ -291,33 +294,33 @@ namespace SSD_Components
void Data_Cache_Manager_Flash_Simple::Execute_simulator_event(MQSimEngine::Sim_Event* ev)
{
Data_Cache_Simulation_Event_Type eventType = (Data_Cache_Simulation_Event_Type)ev->Type;
Memory_Transfer_Info* transfer_inf = (Memory_Transfer_Info*)ev->Parameters;
Memory_Transfer_Info* transfer_info = (Memory_Transfer_Info*)ev->Parameters;

switch (eventType)
{
case Data_Cache_Simulation_Event_Type::MEMORY_READ_FOR_USERIO_FINISHED://A user read is service from DRAM cache
case Data_Cache_Simulation_Event_Type::MEMORY_WRITE_FOR_USERIO_FINISHED:
((User_Request*)(transfer_inf)->Related_request)->Sectors_serviced_from_cache -= transfer_inf->Size_in_bytes / SECTOR_SIZE_IN_BYTE;
if (is_user_request_finished((User_Request*)(transfer_inf)->Related_request)) {
broadcast_user_request_serviced_signal(((User_Request*)(transfer_inf)->Related_request));
((User_Request*)(transfer_info)->Related_request)->Sectors_serviced_from_cache -= transfer_info->Size_in_bytes / SECTOR_SIZE_IN_BYTE;
if (is_user_request_finished((User_Request*)(transfer_info)->Related_request)) {
broadcast_user_request_serviced_signal(((User_Request*)(transfer_info)->Related_request));
}
break;
case Data_Cache_Simulation_Event_Type::MEMORY_READ_FOR_CACHE_EVICTION_FINISHED://Reading data from DRAM and writing it back to the flash storage
static_cast<FTL*>(nvm_firmware)->Address_Mapping_Unit->Translate_lpa_to_ppa_and_dispatch(*((std::list<NVM_Transaction*>*)(transfer_inf->Related_request)));
delete (std::list<NVM_Transaction*>*)transfer_inf->Related_request;
static_cast<FTL*>(nvm_firmware)->Address_Mapping_Unit->Translate_lpa_to_ppa_and_dispatch(*((std::list<NVM_Transaction*>*)(transfer_info->Related_request)));
delete (std::list<NVM_Transaction*>*)transfer_info->Related_request;
break;
case Data_Cache_Simulation_Event_Type::MEMORY_WRITE_FOR_CACHE_FINISHED://The recently read data from flash is written back to memory to support future user read requests
break;
}

dram_execution_queue[transfer_inf->Stream_id].pop();
if (dram_execution_queue[transfer_inf->Stream_id].size() > 0) {
Memory_Transfer_Info* new_transfer_info = dram_execution_queue[transfer_inf->Stream_id].front();
dram_execution_queue[transfer_info->Stream_id].pop();
if (dram_execution_queue[transfer_info->Stream_id].size() > 0) {
Memory_Transfer_Info* new_transfer_info = dram_execution_queue[transfer_info->Stream_id].front();
Simulator->Register_sim_event(Simulator->Time() + estimate_dram_access_time(new_transfer_info->Size_in_bytes, dram_row_size, dram_busrt_size,
dram_burst_transfer_time_ddr, dram_tRCD, dram_tCL, dram_tRP),
this, new_transfer_info, static_cast<int>(new_transfer_info->next_event_type));
}

delete transfer_inf;
delete transfer_info;
}
}
1 change: 1 addition & 0 deletions src/ssd/Flash_Block_Manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,7 @@ namespace SSD_Components
{
PlaneBookKeepingType* plane_record = &plane_manager[page_address.ChannelID][page_address.ChipID][page_address.DieID][page_address.PlaneID];
plane_record->Invalid_pages_count++;
plane_record->Valid_pages_count--;
if (plane_record->Blocks[page_address.BlockID].Stream_id != stream_id) {
PRINT_ERROR("Inconsistent status in the Invalidate_page_in_block function! The accessed block is not allocated to stream " << stream_id)
}
Expand Down
2 changes: 1 addition & 1 deletion src/ssd/Flash_Block_Manager_Base.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ namespace SSD_Components

bool Flash_Block_Manager_Base::Is_page_valid(Block_Pool_Slot_Type* block, flash_page_ID_type page_id)
{
if ((block->Invalid_page_bitmap[page_id / 64] & (((uint64_t)1) << page_id)) == 0) {
if ((block->Invalid_page_bitmap[page_id / 64] & (((uint64_t)1) << (page_id % 64))) == 0) {
return true;
}
return false;
Expand Down
Loading