Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][Typos][M-[1-22]] Fix typo(macor, mantain, mak, manualy, mannualy, maping, maxmium, Maxinum, menas, momory, Meatadata,miliseconds, Minium, minumum, minimun, modication, modifed, modifing, modifty, moduels, Modul, Morevoer, mutiple, Mutiply, multipy, Multiplie, mutiply, Muti, muti, mutexs) #70392

Open
wants to merge 2 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 0 additions & 30 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -282,36 +282,6 @@ localy = 'localy'
Localy = 'Localy'
logarithmical = 'logarithmical'
Loggin = 'Loggin'
macor = 'macor'
mantain = 'mantain'
mak = 'mak'
manualy = 'manualy'
mannualy = 'mannualy'
maping = 'maping'
Maxinum = 'Maxinum'
maxmium = 'maxmium'
menas = 'menas'
momory = 'momory'
Meatadata = 'Meatadata'
miliseconds = 'miliseconds'
minimun = 'minimun'
Minium = 'Minium'
minumum = 'minumum'
modication = 'modication'
modifed = 'modifed'
modifing = 'modifing'
modifty = 'modifty'
moduels = 'moduels'
Modul = 'Modul'
Morevoer = 'Morevoer'
mutiple = 'mutiple'
mutiply = 'mutiply'
Mutiply = 'Mutiply'
multipy = 'multipy'
Multiplie = 'Multiplie'
Muti = 'Muti'
muti = 'muti'
mutexs = 'mutexs'
occured = 'occured'
Ocurred = 'Ocurred'
occures = 'occures'
Expand Down
4 changes: 2 additions & 2 deletions paddle/cinn/ir/group_schedule/search/config_searcher.cc
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ ScheduleConfigSearcher::ScheduleConfigSearcher(
constraints_(constraints) {}

std::pair<ScoreType, CandidateType> ScheduleConfigSearcher::Search(
bool is_search_minimun) {
bool is_search_minimum) {
VLOG(6) << "Start Search...";
CandidateGenerator candidate_generator(candidate_range_, constraints_);
std::vector<CandidateType> candidates = candidate_generator.Candidates();
Expand All @@ -222,7 +222,7 @@ std::pair<ScoreType, CandidateType> ScheduleConfigSearcher::Search(
VLOG(6) << "Score = " << score;
records_[score] = candidate;
}
return is_search_minimun ? *records_.begin() : *(records_.end()--);
return is_search_minimum ? *records_.begin() : *(records_.end()--);
}

} // namespace search
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/ir/group_schedule/search/config_searcher.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ class ScheduleConfigSearcher {
const std::vector<std::pair<int, int>>& candidate_range,
const std::vector<ConstraintFunc>& constraints = {});

std::pair<ScoreType, CandidateType> Search(bool is_search_minimun = true);
std::pair<ScoreType, CandidateType> Search(bool is_search_minimum = true);

private:
std::vector<std::unique_ptr<BaseObjectiveFunc>> objective_funcs_;
Expand Down
14 changes: 7 additions & 7 deletions paddle/cinn/ir/ir_base.cc
Original file line number Diff line number Diff line change
Expand Up @@ -404,14 +404,14 @@ const IndexExpr IndexExpr::operand(int32_t i) const {
return get()->operand(i).as_index();
}

int64_t IndexExpr::GetLargestMutiplyPart() const {
int64_t IndexExpr::GetLargestMultiplyPart() const {
switch (node_type()) {
case cinn::ir::IrNodeTy::_Var_:
return 1;
case cinn::ir::IrNodeTy::Div: {
if (operand(1).type().is_index_type()) {
int64_t lhsDiv = operand(0).GetLargestMutiplyPart();
int64_t rhsDiv = operand(1).GetLargestMutiplyPart();
int64_t lhsDiv = operand(0).GetLargestMultiplyPart();
int64_t rhsDiv = operand(1).GetLargestMultiplyPart();
if (lhsDiv % rhsDiv == 0) return std::abs(lhsDiv / rhsDiv);
}
return 1;
Expand All @@ -421,13 +421,13 @@ int64_t IndexExpr::GetLargestMutiplyPart() const {
return std::abs(int_imm->value);
}
case cinn::ir::IrNodeTy::Mul: {
return operand(0).GetLargestMutiplyPart() *
operand(1).GetLargestMutiplyPart();
return operand(0).GetLargestMultiplyPart() *
operand(1).GetLargestMultiplyPart();
}
case cinn::ir::IrNodeTy::Add:
case cinn::ir::IrNodeTy::Mod: {
return std::gcd(operand(0).GetLargestMutiplyPart(),
operand(1).GetLargestMutiplyPart());
return std::gcd(operand(0).GetLargestMultiplyPart(),
operand(1).GetLargestMultiplyPart());
}
}
PADDLE_THROW(::common::errors::Unimplemented("Unsupported type of expr: %s",
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/ir/ir_base.h
Original file line number Diff line number Diff line change
Expand Up @@ -499,7 +499,7 @@ struct IndexExpr : public IrNodeRef {

Type type() const { return p_->type(); }

int64_t GetLargestMutiplyPart() const;
int64_t GetLargestMultiplyPart() const;

IndexExpr Normalize() const;

Expand Down
13 changes: 7 additions & 6 deletions paddle/cinn/ir/op/ir_operators.cc
Original file line number Diff line number Diff line change
Expand Up @@ -509,8 +509,8 @@ static IndexExpr SimplifyDiv(const IndexExpr &lhs, const IndexExpr &rhs) {

// (expr1 * c1 * c2 + expr2 * c1 * c3) / c1 ===> expr1 * c2 + expr2 * c3.
if (lhsAdd) {
int64_t llhsFactor = lhsAdd->a().as_index().GetLargestMutiplyPart();
int64_t lrhsFactor = lhsAdd->b().as_index().GetLargestMutiplyPart();
int64_t llhsFactor = lhsAdd->a().as_index().GetLargestMultiplyPart();
int64_t lrhsFactor = lhsAdd->b().as_index().GetLargestMultiplyPart();
if (llhsFactor % rhsConst->value == 0 &&
lrhsFactor % rhsConst->value == 0) {
return lhsAdd->a().as_index() / rhsConst->value +
Expand Down Expand Up @@ -565,20 +565,21 @@ static IndexExpr SimplifyMod(const IndexExpr &lhs, const IndexExpr &rhs) {

// (expr1 * c1 * c2+ expr2 * c3) % c1 ===> expr2 * c3 % c1.
if (lhsAdd) {
int64_t llhsFactor = lhsAdd->a().as_index().GetLargestMutiplyPart();
int64_t lrhsFactor = lhsAdd->b().as_index().GetLargestMutiplyPart();
int64_t llhsFactor = lhsAdd->a().as_index().GetLargestMultiplyPart();
int64_t lrhsFactor = lhsAdd->b().as_index().GetLargestMultiplyPart();
if (llhsFactor % rhsConst->value == 0)
return lhsAdd->b().as_index() % rhsConst->value;
if (lrhsFactor % rhsConst->value == 0)
return lhsAdd->a().as_index() % rhsConst->value;
}

// expr1 * (c1 * c2) % c1 ===> 0.
if (lhs.GetLargestMutiplyPart() % rhsConst->value == 0) return IndexExpr(0);
if (lhs.GetLargestMultiplyPart() % rhsConst->value == 0)
return IndexExpr(0);

// expr1 % (c1 * c2) % c1 ===> expr1 % c1.
if (lhsMod) {
int64_t llhsFactor = lhsMod->b().as_index().GetLargestMutiplyPart();
int64_t llhsFactor = lhsMod->b().as_index().GetLargestMultiplyPart();
if (llhsFactor % rhsConst->value == 0)
return lhsMod->a().as_index() % rhsConst->value;
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/common/ddim.h
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ TEST_API std::ostream& operator<<(std::ostream&, const DDim&);

/**
* \brief Flatten dim to 3d
* e.g., DDim d = mak_ddim({1, 2, 3, 4, 5, 6})
* e.g., DDim d = make_ddim({1, 2, 3, 4, 5, 6})
* flatten_to_3d(d, 2, 4); ===> {1*2, 3*4, 5*6} ===> {2, 12, 30}
*/
TEST_API DDim flatten_to_3d(const DDim& src,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/distributed/ps/service/heter_server.cc
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ int SendAndRecvVariableHandler::SaveInSwitchWithShard(
VLOG(4) << "entering SaveInSwitchWithShard";
int32_t group_id = request->group_id();
if (group_id >= FLAGS_heter_world_size) {
LOG(ERROR) << "group id exceed maxmium";
LOG(ERROR) << "group id exceed maximum";
}
auto& local_shard = _local_shards[group_id];
auto& request_io_buffer = cntl->request_attachment();
Expand Down
22 changes: 11 additions & 11 deletions paddle/fluid/distributed/ps/table/common_graph_table.cc
Original file line number Diff line number Diff line change
Expand Up @@ -368,11 +368,11 @@ paddle::framework::GpuPsCommRankFea GraphTable::make_gpu_ps_rank_fea(
// 遍历 rank_nodes[i][shard_num],分8份,分配到 res
std::vector<std::future<size_t>> tasks;

auto mutexs = new std::mutex[shard_num_per_server];
auto mutexes = new std::mutex[shard_num_per_server];
for (int i = 0; i < node_num_; i++) {
for (size_t shard_id = 0; shard_id < shard_num_per_server; shard_id++) {
tasks.push_back(_cpu_worker_pool[gpu_id]->enqueue(
[i, gpu_id, shard_id, &rank_nodes, &node_num_vec, &mutexs]()
[i, gpu_id, shard_id, &rank_nodes, &node_num_vec, &mutexes]()
-> size_t {
auto &rank_node = rank_nodes[i][shard_id];
size_t start = 0;
Expand All @@ -381,9 +381,9 @@ paddle::framework::GpuPsCommRankFea GraphTable::make_gpu_ps_rank_fea(
start++;
}
}
mutexs[shard_id].lock();
mutexes[shard_id].lock();
node_num_vec[shard_id] += start;
mutexs[shard_id].unlock();
mutexes[shard_id].unlock();
return start;
}));
}
Expand Down Expand Up @@ -3150,12 +3150,12 @@ class MergeShardVector {
_slice_num = slice_num;
_shard_keys = output;
_shard_keys->resize(slice_num);
_mutexs = new std::mutex[slice_num];
_mutexes = new std::mutex[slice_num];
}
~MergeShardVector() {
if (_mutexs != nullptr) {
delete[] _mutexs;
_mutexs = nullptr;
if (_mutexes != nullptr) {
delete[] _mutexes;
_mutexes = nullptr;
}
}
// merge shard keys
Expand All @@ -3165,15 +3165,15 @@ class MergeShardVector {
auto &dest = (*_shard_keys)[shard_id];
auto &src = shard_keys[shard_id];

_mutexs[shard_id].lock();
_mutexes[shard_id].lock();
dest.insert(dest.end(), src.begin(), src.end());
_mutexs[shard_id].unlock();
_mutexes[shard_id].unlock();
}
}

private:
int _slice_num = 0;
std::mutex *_mutexs = nullptr;
std::mutex *_mutexes = nullptr;
std::vector<std::vector<uint64_t>> *_shard_keys;
};

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ static int BuildFusion(Graph* graph,
embeddings_tensor->Resize(
{embedding_tensor.dims()[0], weightx_tensor.dims()[1]});

// Multiplie embeddings via WeightsX and add bias
// Multiply embeddings via WeightsX and add bias
auto embedding_data = embedding_tensor.data<float>();
auto weightx_data = weightx_tensor.data<float>();
auto embeddings_data =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -532,7 +532,7 @@ void DependencyBuilder::ShrinkDownstreamMap() {
continue;
}

std::set<size_t> minumum_nexts;
std::set<size_t> minimum_nexts;
for (size_t item : op_downstream_map_->at(i)) {
bool not_after_any = true;
// find the op that is not executed after any
Expand All @@ -546,12 +546,12 @@ void DependencyBuilder::ShrinkDownstreamMap() {
}
if (not_after_any) {
VLOG(8) << "downstream op of " << i << ": " << item;
minumum_nexts.insert(item);
minimum_nexts.insert(item);
}
}
// NOTE(Ruibiao): op_happens_before will not be changed when shrink
// downstream map
(*op_downstream_map_)[i] = minumum_nexts;
(*op_downstream_map_)[i] = minimum_nexts;
}
VLOG(8) << "Finish shrink downstream map";
VLOG(8) << "downstream count: " << CountDownstreamMap(*op_downstream_map_);
Expand Down Expand Up @@ -1008,7 +1008,7 @@ void DependencyBuilderSimplify::ShrinkDownstreamMap() {
continue;
}

std::set<size_t> minumum_nexts;
std::set<size_t> minimum_nexts;
for (size_t item : op_downstream_map_.at(i)) {
bool not_after_any = true;
// find the op that is not executed any
Expand All @@ -1022,12 +1022,12 @@ void DependencyBuilderSimplify::ShrinkDownstreamMap() {
}
if (not_after_any) {
VLOG(8) << "downstream op of " << i << ": " << item;
minumum_nexts.insert(item);
minimum_nexts.insert(item);
}
}
// NOTE(Ruibiao): op_happens_before will not be changed when shrink
// downstream map
op_downstream_map_.at(i) = minumum_nexts;
op_downstream_map_.at(i) = minimum_nexts;
}
VLOG(8) << "Finish shrink downstream map";
VLOG(8) << "downstream count: " << CountDownstreamMap(op_downstream_map_);
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/framework/new_executor/pir_interpreter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1413,7 +1413,7 @@ void PirInterpreter::CalculateLastLiveOps() {
}
VLOG(4) << "var_ref_count_.size() : " << var_ref_count_.size();
for (size_t i = 0; i < last_live_ops_.size(); ++i) {
std::set<size_t> minumum_last_live_ops;
std::set<size_t> minimum_last_live_ops;
for (size_t item : last_live_ops_[i]) {
bool not_before_any = true;
// find the op that is not executed before any
Expand All @@ -1429,11 +1429,11 @@ void PirInterpreter::CalculateLastLiveOps() {
VLOG(6) << "last live op of var " << i << " "
<< value_exe_info_->GetNameById(static_cast<int>(i)) << " : "
<< item << " " << vec_instruction_base_[item]->Name();
minumum_last_live_ops.insert(item);
minimum_last_live_ops.insert(item);
vec_instruction_base_[item]->AddGCCheckVar(i);
}
}
last_live_ops_[i] = minumum_last_live_ops;
last_live_ops_[i] = minimum_last_live_ops;
var_ref_count_[i] = static_cast<int>(last_live_ops_[i].size());
}
VLOG(4) << "shrink the last_live_ops list for all vars in skip_gc_vars";
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/framework/new_executor/program_interpreter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -842,7 +842,7 @@ void ProgramInterpreter::Convert(
// in this case, a is the input of op1 and op2, we only need to check
// a after op2, because op2 always uses a after op1.
for (size_t i = 0; i < last_live_ops_.size(); ++i) {
std::set<size_t> minumum_last_live_ops;
std::set<size_t> minimum_last_live_ops;
for (size_t item : last_live_ops_[i]) {
bool not_before_any = true;
// find the op that is not executed before any
Expand All @@ -858,14 +858,14 @@ void ProgramInterpreter::Convert(
VLOG(8) << "last live op of var " << i << " "
<< var_scope_.GetNameById(static_cast<int>(i)) << " : " << item
<< " " << vec_instruction_[item].OpBase()->Type();
minumum_last_live_ops.insert(item);
minimum_last_live_ops.insert(item);
if (!(var_scope_.VarDesc(static_cast<int>(i)) &&
var_scope_.VarDesc(static_cast<int>(i))->Persistable())) {
vec_instruction_[item].AddGCCheckVar(i);
}
}
}
last_live_ops_[i] = minumum_last_live_ops;
last_live_ops_[i] = minimum_last_live_ops;
vec_meta_info[i].var_ref_count_ =
static_cast<int>(last_live_ops_[i].size());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ namespace framework {

// A multiplexing waiter, be able to wait multiple kinds of events
// simultaneously.
// Muti-Producer single-consumer single-slot message-queue.
// Multi-Producer single-consumer single-slot message-queue.
class EventsWaiter {
public:
using EventId = std::size_t;
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1815,7 +1815,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
if (is_xpu_kp_support) {
auto expected_kernel_key_library_type = kernel_type_->library_type_;
kernel_type_->library_type_ = LibraryType::kKP;
VLOG(3) << "modifing XPU KP kernel in static graph: "
VLOG(3) << "modifying XPU KP kernel in static graph: "
<< phi_kernel_name
<< ", using_kernel_key:" << *kernel_type_.get();
auto try_phi_kernel_key =
Expand Down Expand Up @@ -1899,7 +1899,7 @@ void OperatorWithKernel::RunImpl(const Scope& scope,
if (is_xpu_kp_support) {
auto expected_kernel_key_library_type = kernel_type_->library_type_;
kernel_type_->library_type_ = LibraryType::kKP;
VLOG(3) << "modifing XPU KP kernel in static graph: "
VLOG(3) << "modifying XPU KP kernel in static graph: "
<< phi_kernel_name
<< ", using_kernel_key:" << *kernel_type_.get();
auto try_phi_kernel_key =
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/var_desc.h
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ class TEST_API VarDesc {
proto::VarType::TensorDesc *mutable_tensor_desc();
std::vector<proto::VarType::TensorDesc *> mutable_tensor_descs();

// Is it really needed? Or just mantain a ptr from the block?
// Is it really needed? Or just maintain a ptr from the block?
proto::VarDesc desc_;
AttributeMap attrs_;

Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/imperative/prepared_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ PreparedOp PrepareImpl(
if (is_xpu_kp_support) {
auto expected_kernel_key_backend = expected_kernel_key.backend();
expected_kernel_key.set_backend(phi::Backend::KPS);
VLOG(3) << "modifing XPU KP kernel: " << phi_kernel_name
VLOG(3) << "modifying XPU KP kernel: " << phi_kernel_name
<< ", using_kernel_key:" << expected_kernel_key;

if (!phi_kernel_factory.HasKernel(phi_kernel_name,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/inference/api/paddle_pass_builder.h
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ class PD_INFER_DECL PaddlePassBuilder {
std::vector<std::string> AnalysisPasses() const {
auto passes = analysis_passes_;
// To make sure the ir_graph_to_program should be the last pass so any
// modication of IR will persist to the program.
// modification of IR will persist to the program.
passes.push_back("ir_graph_to_program_pass");
return passes;
}
Expand Down
Loading