Skip to content

Commit

Permalink
all code
Browse files Browse the repository at this point in the history
  • Loading branch information
a162837 committed Oct 25, 2024
1 parent 2284964 commit 065a9dd
Show file tree
Hide file tree
Showing 2 changed files with 137 additions and 49 deletions.
99 changes: 53 additions & 46 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -3706,10 +3706,31 @@ def log10_(x: Tensor, name: str | None = None) -> Tensor:
return _C_ops.log10_(x)


def check_clip_tensor(c_x, value, re_value, value_type, name):
if value is None:
value = fill_constant([1], value_type, re_value)
else:
if isinstance(value, paddle.Tensor):
if value.shape == [0]:
raise ValueError(
f"The {name} dimension should be equal to the inner dimension of the x, but the {name} dimension is {value.shape}"
)
elif (
value.shape not in [[], [1]]
and value.shape != c_x.shape[-len(value.shape) :]
):
raise ValueError(
f"The {name} dimension should be equal to the inner dimension of the x, but the {name} dimension is {value.shape} and the x dimension is {c_x.shape[-len(value.shape):]}."
)
else:
value = fill_constant([1], value_type, value)
return value


def clip(
x: Tensor,
min: float | None = None,
max: float | None = None,
min: float | Tensor | None = None,
max: float | Tensor | None = None,
name: str | None = None,
) -> Tensor:
"""
Expand Down Expand Up @@ -3763,26 +3784,18 @@ def clip(
min_ = float(np.finfo(np.float32).min)
max_ = float(np.finfo(np.float32).max)

if isinstance(min, Tensor) or isinstance(max, Tensor):
def check_clip_tensor(c_x, value, re_value, value_type, name):
if value is None:
value = fill_constant([1], value_type, re_value)
else:
if isinstance(value, Tensor):
if value.shape == [0]:
value = fill_constant([1], value_type, re_value)
elif value.shape not in [[], [1]] and value.shpae != c_x.shape[-len(value.shape):]:
raise ValueError(
f"The {name} dimension should be equal to the inner dimension of the x, but the {name} dimension is {value.shape} and the x dimension is {c_x.shape[-len(value.shape):]}."
)
else:
value = fill_constant([1], value_type, value)
return value
if (isinstance(min, paddle.Tensor) and min.shape not in [[1], [0], []]) or (
isinstance(max, paddle.Tensor) and max.shape not in [[1], [0], []]
):
min_n = check_clip_tensor(x, min, min_, x_dtype, 'min')
max_n = check_clip_tensor(x, max, max_, x_dtype, 'max')

min_n = paddle.expand(min_n, x.shape) if min_n.shape != x.shape else min_n
max_n = paddle.expand(max_n, x.shape) if max_n.shape != x.shape else max_n
min_n = (
paddle.expand(min_n, x.shape) if min_n.shape != x.shape else min_n
)
max_n = (
paddle.expand(max_n, x.shape) if max_n.shape != x.shape else max_n
)

output_min = paddle.where(x < min_n, min_n, x)
output = paddle.where(output_min > max_n, max_n, output_min)
Expand Down Expand Up @@ -3846,7 +3859,10 @@ def check_clip_tensor(c_x, value, re_value, value_type, name):
dtype=helper.input_dtype('x')
)
helper.append_op(
type='clip', inputs=inputs, outputs={'Out': [output]}, attrs=attrs
type='clip',
inputs=inputs,
outputs={'Out': [output]},
attrs=attrs,
)

return output
Expand All @@ -3869,35 +3885,26 @@ def clip_(
max = fmax if max is None else max

if in_dynamic_mode():
if not isinstance(max, Tensor) and not isinstance(min, Tensor):
return _C_ops.clip_(x, min, max)
else:
if not isinstance(max, Tensor):
max = fill_constant([1], float, max)
else:
if max.shape == [0]:
max = fill_constant([1], float, max)
elif max.shape not in [[], [1]] and max.shpae != x.shape[-len(max.shape):]:
raise ValueError(
f"The max dimension should be equal to the inner dimension of the x, but the max dimension is {max.shape} and the x dimension is {x.shape[-len(max.shape):]}."
)

if not isinstance(min, Tensor):
min = fill_constant([1], float, min)
else:
if min.shape == [0]:
min = fill_constant([1], float, min)
elif min.shape not in [[], [1]] and min.shpae != x.shape[-len(min.shape):]:
raise ValueError(
f"The min dimension should be equal to the inner dimension of the x, but the min dimension is {min.shape} and the x dimension is {x.shape[-len(min.shape):]}."
)

max_expand = paddle.expand(max, x.shape) if max.shape != x.shape else max
min_expand = paddle.expand(min, x.shape) if min.shape != x.shape else min
if (
isinstance(min, paddle.Tensor) and min.shape not in [[1], [0], []]
) or (
isinstance(max, paddle.Tensor) and max.shape not in [[1], [0], []]
):
max = check_clip_tensor(x, max, fmin, x.dtype, 'max')
min = check_clip_tensor(x, min, fmin, x.dtype, 'min')

max_expand = (
paddle.expand(max, x.shape) if max.shape != x.shape else max
)
min_expand = (
paddle.expand(min, x.shape) if min.shape != x.shape else min
)

paddle.where_(x > min_expand, x, min_expand)
return paddle.where_(x < max_expand, x, max_expand)

else:
return _C_ops.clip_(x, min, max)


def trace(
x: Tensor,
Expand Down
87 changes: 84 additions & 3 deletions test/legacy_test/test_clip_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -496,19 +496,100 @@ def test_tensor_all_clip(self):
images = paddle.static.data(
name='image', shape=data_shape, dtype='float32'
)
min = paddle.static.data(name='min', shape=data_shape[-2:], dtype='float32')
max = paddle.static.data(name='max', shape=data_shape[-3:], dtype='float32')
min = paddle.static.data(
name='min', shape=data_shape[-2:], dtype='float32'
)
max = paddle.static.data(
name='max', shape=data_shape[-3:], dtype='float32'
)
out = paddle.clip(images, min, max)
res = exe.run(feed={"image": data, 'min': min_data, 'max':max_data}, fetch_list=[out])
res = exe.run(
feed={"image": data, 'min': min_data, 'max': max_data},
fetch_list=[out],
)
res_np = np.clip(data, min_data, max_data)
np.testing.assert_allclose(res_np, res[0], rtol=1e-05)
paddle.disable_static()

data = np.random.random(data_shape).astype('float32')
min_data = np.random.random(data_shape[-2:]).astype('float32')
max_data = np.random.random(data_shape[-3:]).astype('float32')
out_np = np.clip(data, min_data, max_data)
data = paddle.to_tensor(data)
min_data = paddle.to_tensor(min_data)
max_data = paddle.to_tensor(max_data)
out = paddle.clip(data, min_data, max_data)
np.testing.assert_allclose(out.numpy(), out_np, rtol=1e-05)

def test_tensor_none_clip(self):
paddle.enable_static()
data_shape = [1, 9, 9, 4]
data = np.random.random(data_shape).astype('float32')
min_data = np.random.random(data_shape[-2:]).astype('float32')
place = (
base.CUDAPlace(0)
if base.core.is_compiled_with_cuda()
else base.CPUPlace()
)
exe = base.Executor(place)
main = paddle.static.Program()
startup = paddle.static.Program()
with paddle.static.program_guard(main, startup):
images = paddle.static.data(
name='image', shape=data_shape, dtype='float32'
)
min = paddle.static.data(
name='min', shape=data_shape[-2:], dtype='float32'
)
max = paddle.static.data(
name='max', shape=data_shape[-3:], dtype='float32'
)
out = paddle.clip(images, min, max)
res = exe.run(
feed={"image": data, 'min': min_data, 'max': None}, fetch_list=[out]
)
res_np = np.clip(data, min_data)
np.testing.assert_allclose(res_np, res[0], rtol=1e-05)

paddle.disable_static()

data = np.random.random(data_shape).astype('float32')
min_data = np.random.random(data_shape[-2:]).astype('float32')
out_np = np.clip(data, min_data)
data = paddle.to_tensor(data)
min_data = paddle.to_tensor(min_data)
out = paddle.clip(data, min_data)
np.testing.assert_allclose(out.numpy(), out_np, rtol=1e-05)

def test_shapeerror_clip(self):
data_shape = [1, 9, 9, 4]
data = np.random.random(data_shape).astype('float32')
data = paddle.to_tensor(data)
with self.assertRaises(ValueError):
paddle.clip(data, paddle.rand([2]))

data_shape = [1, 9, 9, 4]
data = np.random.random(data_shape).astype('float32')
data = paddle.to_tensor(data)
with self.assertRaises(ValueError):
paddle.clip(
data, min=paddle.to_tensor([1, 2, 3, 4]), max=paddle.rand([0])
)


class TestInplaceClipAPI(TestClipAPI):
def _executed_api(self, x, min=None, max=None):
return x.clip_(min, max)

def test_tensor_clip_(self):
data_shape = [1, 9, 9, 4]
data = paddle.to_tensor(np.random.random(data_shape).astype('float32'))
min = paddle.to_tensor(
np.random.random(data_shape[-2:]).astype('float32')
)
max = min + 5
data.clip_(min, max)


if __name__ == '__main__':
unittest.main()

0 comments on commit 065a9dd

Please sign in to comment.