Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle][Typos][P-[42-48]] Fix typo (producted,progam,progrss,propogated,Propogation,protocal,PROTOCAL,pthon,pyrhon) #70991

Open
wants to merge 6 commits into
base: develop
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ option(WITH_MULTINODE_TESTING "Test multinode apis and ops" OFF)
option(WITH_MKL "Compile PaddlePaddle with MKL support." ${AVX_FOUND})
option(WITH_SYSTEM_BLAS "Use system blas library" OFF)
option(WITH_DISTRIBUTE "Compile with distributed support" OFF)
option(WITH_BRPC_RDMA "Use brpc rdma as the rpc protocal" OFF)
option(WITH_BRPC_RDMA "Use brpc rdma as the rpc protocol" OFF)
option(ON_INFER "Turn on inference optimization and inference-lib generation"
ON)
option(WITH_CPP_DIST "Install PaddlePaddle C++ distribution" OFF)
Expand Down
10 changes: 0 additions & 10 deletions _typos.toml
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ clen = 'clen'
datas = 'datas'
dota = 'dota'
dout = "dout"
eles = 'eles'
entrys = 'entrys'
feeded = 'feeded'
grad = "grad"
Expand All @@ -58,15 +57,6 @@ ouput = 'ouput'
outpout = 'outpout'
ouptut = 'ouptut'
Ouput = 'Ouput'
producted = 'producted'
progam = 'progam'
progrss = 'progrss'
propogated = 'propogated'
Propogation = 'Propogation'
protocal = 'protocal'
PROTOCAL = 'PROTOCAL'
pyrhon = 'pyrhon'
pthon = 'pthon'
regist = 'regist'
Regist = 'Regist'
Registe = 'Registe'
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/inference/tensorrt/convert/matrix_multiply_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -114,11 +114,11 @@ class MatrixMultiplyOpConverter : public OpConverter {
before_shape_tensors.push_back(
GetEleTensorOfShape(input_shape_tensor, i));
}
nvinfer1::ITensor* producted = Add1DConstantLayer(1);
nvinfer1::ITensor* produced = Add1DConstantLayer(1);
for (int i = x_num_col_dims; i < x_rank; ++i) {
producted = Prod(producted, GetEleTensorOfShape(input_shape_tensor, i));
produced = Prod(produced, GetEleTensorOfShape(input_shape_tensor, i));
}
before_shape_tensors.push_back(producted);
before_shape_tensors.push_back(produced);
nvinfer1::ITensor* before_shape_tensor = Concat(before_shape_tensors);
auto* reshape_before_layer =
TRT_ENGINE_ADD_LAYER(engine_, Shuffle, *input1);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/spmd_rules/dim_trans.cc
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ std::shared_ptr<DimTrans> make_split(const std::shared_ptr<DimTrans> dim,
}

// Given a `dim_trans` of an output axis, get the input axis
// whose dim mapping should be propogated to it.
// whose dim mapping should be propagated to it.
// If the returned input axis is none, the output axis's
// dim mapping should be set to -1 (replicated). For an axis
// that is flattened from input axes, return the leftmost
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/infermeta/spmd_rules/gather.cc
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ SpmdInfo GatherInferSpmdBase(const DistMetaTensor& x,
out_axes[axis] = 'k';
}

// Step2: Sharding Propogation
// Step2: Sharding Propagation
// Step2.1: Merge input shardings
std::vector<int64_t> x_dims_mapping(x_dims_mapping_src);
if (axis < x_ndim) {
Expand Down Expand Up @@ -132,7 +132,7 @@ SpmdInfo GatherInferSpmdReverseBase(const DistMetaTensor& x,
out_axes[axis] = 'k';
}

// Step2: Sharding Propogation
// Step2: Sharding Propagation
// Step2.1: Merge output shardings
std::unordered_map<std::string, int64_t> axis_to_dim_map =
ShardingMergeForTensors({{out_axes, out_dims_mapping_src}});
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/infermeta/spmd_rules/nonzero.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ SpmdInfo NonZeroInferSpmd(const DistMetaTensor& x) {
std::string x_axes(x_ndim, '1');
std::string output_axes(2, '1');

// Step2: Sharding Propogation
// Step2: Sharding Propagation
// Step2.1: Merge input shardings
std::unordered_map<std::string, int64_t> axis_to_dim_map =
ShardingMergeForTensors({{x_axes, x_dims_mapping_src}});
Expand Down Expand Up @@ -65,7 +65,7 @@ SpmdInfo NonZeroInferSpmdReverse(const DistMetaTensor& x,
std::string x_axes(x_ndim, '1');
std::string output_axes(2, '1');

// Step2: Sharding Propogation
// Step2: Sharding Propagation
// Step2.1: Merge input shardings
std::unordered_map<std::string, int64_t> axis_to_dim_map =
ShardingMergeForTensors({{output_axes, output_dims_mapping_src}});
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/infermeta/spmd_rules/scatter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ SpmdInfo ScatterBaseInferSpmd(const DistMetaTensor& x,
std::string out_axes = GetBroadcastAxes(x_ndim, x_ndim, alphabet);
out_axes[0] = '1';

// Step2: Sharding Propogation
// Step2: Sharding Propagation
// Step2.1: Merge input shardings
std::unordered_map<std::string, int64_t> axis_to_dim_map =
ShardingMergeForTensors({{index_axes, index_dims_mapping_src},
Expand Down Expand Up @@ -139,7 +139,7 @@ SpmdInfo ScatterBaseInferSpmdReverse(const DistMetaTensor& x,
GetBroadcastAxes(updates_ndim, updates_ndim, alphabet);
std::string out_axes = GetBroadcastAxes(out_ndim, out_ndim, alphabet);

// Step2: Sharding Propogation
// Step2: Sharding Propagation
// Step2.1: Merge output shardings
// the batch axis of output must be replicated
// TODO(zhangyichen): consider the case when the output is partial
Expand Down
2 changes: 1 addition & 1 deletion paddle/scripts/fast_install.sh
Original file line number Diff line number Diff line change
Expand Up @@ -340,7 +340,7 @@ function checkLinuxPython(){
if [ "$python_path" != "" ];then
break
else
echo "输入路径有误,未找到pyrhon"
echo "输入路径有误,未找到 python"
fi
done
fi
Expand Down
8 changes: 4 additions & 4 deletions python/paddle/distributed/launch/controllers/master.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from paddle.distributed.launch.utils.kv_client import KVClient
from paddle.distributed.launch.utils.kv_server import KVServer

ETCD_PROTOCAL = 'etcd://'
ETCD_PROTOCOL = 'etcd://'


def _cmp_by_ip(x):
Expand All @@ -39,7 +39,7 @@ class Master:

MAIN = "main"
STANDBY = "standby"
PATICIPANT = "participant"
PARTICIPANT = "participant"

def __init__(self, ctx):
self.ctx = ctx
Expand All @@ -64,7 +64,7 @@ def sync_peers(self, prefix, key, value, size, rank=-1) -> (list, int):

@classmethod
def factory(cls, ctx):
if ctx.args.master and ctx.args.master.startswith(ETCD_PROTOCAL):
if ctx.args.master and ctx.args.master.startswith(ETCD_PROTOCOL):
return ETCDMaster(ctx)
else:
return HTTPMaster(ctx)
Expand All @@ -75,7 +75,7 @@ def lazy_init(self):
if self.initialized:
return

self.role = Master.PATICIPANT
self.role = Master.PARTICIPANT

if self.ctx.args.master:
self.endpoint = self.ctx.args.master
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/jit/dy2static/convert_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -675,7 +675,7 @@ def convert_super(super_fn):
class VariableTuple:
"""
this class will cause enumerate can't be wrapped by other iterator change function.
this will be fixed when list<Variable> is producted.
this will be fixed when list<Variable> is produced.
VariableTuple can only deal with variables which is fixed.
"""

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/tensor/linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -2747,7 +2747,7 @@ def mv(x: Tensor, vec: Tensor, name: str | None = None) -> Tensor:
For more information, please refer to :ref:`api_guide_Name`. Default is None.

Returns:
Tensor: The tensor which is producted by x and vec.
Tensor: The tensor which is produced by x and vec.

Examples:
.. code-block:: python
Expand Down
12 changes: 6 additions & 6 deletions test/deprecated/legacy_test/auto_parallel_op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -428,7 +428,7 @@ class AutoParallelForwardChecker:
def __init__(
self,
op_type,
pthon_api,
python_api,
dtype,
placements_map,
inputs,
Expand All @@ -441,7 +441,7 @@ def __init__(
self.checker_name = "AutoParallelForwardChecker"
self.init_checker(
op_type,
pthon_api,
python_api,
dtype,
placements_map,
inputs,
Expand All @@ -455,7 +455,7 @@ def __init__(
def init_checker(
self,
op_type,
pthon_api,
python_api,
dtype,
placements_map,
inputs,
Expand All @@ -466,7 +466,7 @@ def init_checker(
python_out_sig=None,
):
self.op_type = op_type
self.public_python_api = pthon_api
self.public_python_api = python_api
self.dtype = np.dtype(dtype)
self.placements_map = placements_map
self.inputs = inputs
Expand Down Expand Up @@ -667,7 +667,7 @@ class AutoParallelGradChecker(AutoParallelForwardChecker):
def __init__(
self,
op_type,
pthon_api,
python_api,
dtype,
placements_map,
inputs,
Expand All @@ -683,7 +683,7 @@ def __init__(
):
super().__init__(
op_type,
pthon_api,
python_api,
dtype,
placements_map,
inputs,
Expand Down
12 changes: 6 additions & 6 deletions test/legacy_test/auto_parallel_op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -427,7 +427,7 @@ class AutoParallelForwardChecker:
def __init__(
self,
op_type,
pthon_api,
python_api,
dtype,
placements_map,
inputs,
Expand All @@ -440,7 +440,7 @@ def __init__(
self.checker_name = "AutoParallelForwardChecker"
self.init_checker(
op_type,
pthon_api,
python_api,
dtype,
placements_map,
inputs,
Expand All @@ -454,7 +454,7 @@ def __init__(
def init_checker(
self,
op_type,
pthon_api,
python_api,
dtype,
placements_map,
inputs,
Expand All @@ -465,7 +465,7 @@ def init_checker(
python_out_sig=None,
):
self.op_type = op_type
self.public_python_api = pthon_api
self.public_python_api = python_api
self.dtype = np.dtype(dtype)
self.placements_map = placements_map
self.inputs = inputs
Expand Down Expand Up @@ -666,7 +666,7 @@ class AutoParallelGradChecker(AutoParallelForwardChecker):
def __init__(
self,
op_type,
pthon_api,
python_api,
dtype,
placements_map,
inputs,
Expand All @@ -682,7 +682,7 @@ def __init__(
):
super().__init__(
op_type,
pthon_api,
python_api,
dtype,
placements_map,
inputs,
Expand Down
Loading