Skip to content

Commit

Permalink
fix dygraph code
Browse files Browse the repository at this point in the history
  • Loading branch information
a162837 committed Oct 30, 2024
1 parent f766ab4 commit bc3b43c
Show file tree
Hide file tree
Showing 2 changed files with 99 additions and 88 deletions.
19 changes: 13 additions & 6 deletions python/paddle/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -3710,13 +3710,13 @@ def check_clip_tensor(c_x, value, re_value, value_type, name):
if value is None:
value = fill_constant([1], value_type, re_value)
else:
if isinstance(value, paddle.Tensor):
if isinstance(value, Variable, paddle.pir.Value, paddle.Tensor):
if value.shape == [0]:
raise ValueError(
f"The {name} dimension should be equal to the inner dimension of the x, but the {name} dimension is {value.shape}"
)
elif (
value.shape not in [[], [1]]
value.shape != [] and value.shape != [1]
and value.shape != c_x.shape[-len(value.shape) :]
):
raise ValueError(
Expand Down Expand Up @@ -3774,21 +3774,25 @@ def clip(
if x_dtype == 'paddle.int32':
min_ = np.iinfo(np.int32).min
max_ = np.iinfo(np.int32).max - 2**7
value_dtype = 'int32'
elif x_dtype == 'paddle.int64':
min_ = np.iinfo(np.int64).min
max_ = np.iinfo(np.int64).max - 2**39
value_dtype = 'int64'
elif x_dtype == 'paddle.float16':
min_ = float(np.finfo(np.float16).min)
max_ = float(np.finfo(np.float16).max)
value_dtype = 'float16'
else:
min_ = float(np.finfo(np.float32).min)
max_ = float(np.finfo(np.float32).max)
value_dtype = 'float32'

if (isinstance(min, paddle.Tensor) and min.shape not in [[1], [0], []]) or (
isinstance(max, paddle.Tensor) and max.shape not in [[1], [0], []]
if (isinstance(min, (Variable, paddle.pir.Value, paddle.Tensor)) and (min.shape != [1] and min.shape != [0] and min.shape != [])) or (
isinstance(max, (Variable, paddle.pir.Value, paddle.Tensor)) and (max.shape != [1] and max.shape != [0] and max.shape != [])
):
min_n = check_clip_tensor(x, min, min_, x_dtype, 'min')
max_n = check_clip_tensor(x, max, max_, x_dtype, 'max')
min_n = check_clip_tensor(x, min, min_, value_dtype, 'min')
max_n = check_clip_tensor(x, max, max_, value_dtype, 'max')

min_n = (
paddle.expand(min_n, x.shape) if min_n.shape != x.shape else min_n
Expand All @@ -3797,6 +3801,9 @@ def clip(
paddle.expand(max_n, x.shape) if max_n.shape != x.shape else max_n
)

min_n.stop_gradient = True
max_n.stop_gradient = True

output_min = paddle.where(x < min_n, min_n, x)
output = paddle.where(output_min > max_n, max_n, output_min)
return output
Expand Down
168 changes: 86 additions & 82 deletions test/legacy_test/test_clip_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -478,88 +478,92 @@ def test_fp16(self):


class TestClipTensor(unittest.TestCase):
# def test_tensor_all_clip(self):
# paddle.enable_static()
# data_shape = [1, 9, 9, 4]
# data = np.random.random(data_shape).astype('float32')
# min_data = np.random.random(data_shape[-2:]).astype('float32')
# max_data = np.random.random(data_shape[-3:]).astype('float32')
# place = (
# base.CUDAPlace(0)
# if base.core.is_compiled_with_cuda()
# else base.CPUPlace()
# )
# exe = base.Executor(place)
# main = paddle.static.Program()
# startup = paddle.static.Program()
# with paddle.static.program_guard(main, startup):
# images = paddle.static.data(
# name='image', shape=data_shape, dtype='float32'
# )
# min = paddle.static.data(
# name='min', shape=data_shape[-2:], dtype='float32'
# )
# max = paddle.static.data(
# name='max', shape=data_shape[-3:], dtype='float32'
# )
# out = paddle.clip(images, min, max)
# res = exe.run(
# feed={"image": data, 'min': min_data, 'max': max_data},
# fetch_list=[out],
# )
# res_np = np.clip(data, min_data, max_data)
# np.testing.assert_allclose(res_np, res[0], rtol=1e-05)
# paddle.disable_static()

# data = np.random.random(data_shape).astype('float32')
# min_data = np.random.random(data_shape[-2:]).astype('float32')
# max_data = np.random.random(data_shape[-3:]).astype('float32')
# out_np = np.clip(data, min_data, max_data)
# data = paddle.to_tensor(data)
# min_data = paddle.to_tensor(min_data)
# max_data = paddle.to_tensor(max_data)
# out = paddle.clip(data, min_data, max_data)
# np.testing.assert_allclose(out.numpy(), out_np, rtol=1e-05)

# def test_tensor_none_clip(self):
# paddle.enable_static()
# data_shape = [1, 9, 9, 4]
# data = np.random.random(data_shape).astype('float32')
# min_data = np.random.random(data_shape[-2:]).astype('float32')
# place = (
# base.CUDAPlace(0)
# if base.core.is_compiled_with_cuda()
# else base.CPUPlace()
# )
# exe = base.Executor(place)
# main = paddle.static.Program()
# startup = paddle.static.Program()
# with paddle.static.program_guard(main, startup):
# images = paddle.static.data(
# name='image', shape=data_shape, dtype='float32'
# )
# min = paddle.static.data(
# name='min', shape=data_shape[-2:], dtype='float32'
# )
# max = paddle.static.data(
# name='max', shape=data_shape[-3:], dtype='float32'
# )
# out = paddle.clip(images, min, max)
# res = exe.run(
# feed={"image": data, 'min': min_data, 'max': None}, fetch_list=[out]
# )
# res_np = np.clip(data, min_data)
# np.testing.assert_allclose(res_np, res[0], rtol=1e-05)

# paddle.disable_static()

# data = np.random.random(data_shape).astype('float32')
# min_data = np.random.random(data_shape[-2:]).astype('float32')
# out_np = np.clip(data, min_data)
# data = paddle.to_tensor(data)
# min_data = paddle.to_tensor(min_data)
# out = paddle.clip(data, min_data)
# np.testing.assert_allclose(out.numpy(), out_np, rtol=1e-05)
def test_static_clip(self):
data_shape = [1, 2, 3, 4]
self.place = (
base.CUDAPlace(0)
if base.core.is_compiled_with_cuda()
else base.CPUPlace()
)
data = np.random.random(data_shape).astype('float32')
min_data = np.random.random(data_shape[-2:]).astype('float32')
max_data = np.random.random(data_shape[-3:]).astype('float32')
paddle.enable_static()
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.static.data(
name='x', shape=data_shape, dtype='float32'
)
min = paddle.static.data(
name='min', shape=data_shape[-2:], dtype='float32'
)
max = paddle.static.data(
name='max', shape=data_shape[-3:], dtype='float32'
)
out = paddle.clip(x, min, max)
exe = base.Executor(self.place)
res = exe.run(
feed={
"x": data,
'min': min_data,
'max': max_data,
},
fetch_list=[out],
)
res_np = np.clip(data, min_data, max_data)
np.testing.assert_allclose(res_np, res[0], rtol=1e-05)
paddle.disable_static()

def test_dygraph_clip(self):
self.place = (
base.CUDAPlace(0)
if base.core.is_compiled_with_cuda()
else base.CPUPlace()
)
paddle.disable_static(self.place)
data_shape = [1, 2, 3, 4]
data = np.random.random(data_shape).astype('float32')
min_data = np.random.random(data_shape[-2:]).astype('float32')
max_data = np.random.random(data_shape[-3:]).astype('float32')
out_np = np.clip(data, min_data, max_data)
data = paddle.to_tensor(data)
min_data = paddle.to_tensor(min_data)
max_data = paddle.to_tensor(max_data)
out = paddle.clip(data, min_data, max_data)
np.testing.assert_allclose(out.numpy(), out_np, rtol=1e-05)

data_shape = [1, 2, 3, 4]
data = np.random.random(data_shape).astype('float32')
min_data = np.random.random(data_shape[-2:]).astype('float32')
max_data = np.random.random(data_shape[-1:]).astype('float32')
out_np = np.clip(data, min_data, max_data)
data = paddle.to_tensor(data)
min_data = paddle.to_tensor(min_data)
max_data = paddle.to_tensor(max_data)
out = paddle.clip(data, min_data, max_data)
np.testing.assert_allclose(out.numpy(), out_np, rtol=1e-05)

data_shape = [1, 2, 3, 4]
data = np.random.random(data_shape).astype('int32')
min_data = np.random.random(data_shape[-2:]).astype('int32')
max_data = 5
out_np = np.clip(data, min_data, max_data)
data = paddle.to_tensor(data)
min_data = paddle.to_tensor(min_data)
max_data = paddle.to_tensor([max_data], dtype='int32')
out = paddle.clip(data, min_data, max_data)
np.testing.assert_allclose(out.numpy(), out_np, rtol=1e-05)

data_shape = [1, 2, 3, 4]
data = np.random.random(data_shape).astype('float32')
min_data = np.random.random(data_shape[-2:]).astype('float32')
max_data = float(np.finfo(np.float32).max)
out_np = np.clip(data, min_data, max_data)
data = paddle.to_tensor(data)
min_data = paddle.to_tensor(min_data)
out = paddle.clip(data, min_data)
np.testing.assert_allclose(out.numpy(), out_np, rtol=1e-05)

paddle.enable_static()

def test_shapeerror_clip(self):
data_shape = [1, 9, 9, 4]
Expand Down

0 comments on commit bc3b43c

Please sign in to comment.