-
Notifications
You must be signed in to change notification settings - Fork 40
/
character.py
157 lines (143 loc) · 6.39 KB
/
character.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class CTCLabelConverter(object):
""" Convert between text-label and text-index for baidu warpctc """
def __init__(self, flags):
# character (str): set of the possible characters.
flags = flags.Global
self.character_type = flags.character_type
self.loss_type = flags.loss_type
if self.character_type == 'en':
self.character_str = "0123456789abcdefghijklmnopqrstuvwxyz"
dict_character = list(self.character_str)
elif self.character_type == 'ch':
character_dict_path = flags.character_dict_path
add_space = False
if hasattr(flags, 'use_space_char'):
add_space = flags.use_space_char
self.character_str = ""
with open(character_dict_path, 'rb') as fin:
lines = fin.readlines()
for line in lines:
line = line.decode('utf-8').strip("\n").strip("\r\n")
self.character_str += line
if add_space:
self.character_str += " "
dict_character = list(self.character_str)
elif self.character_type == "en_sensitive":
# same with ASTER setting (use 94 char).
self.character_str = string.printable[:-6]
dict_character = list(self.character_str)
else:
self.character_str = None
assert self.character_str is not None, \
"Nonsupport type of the character: {}".format(self.character_str)
self.dict = {}
for i, char in enumerate(dict_character):
# NOTE: 0 is reserved for 'CTCblank' token required by CTCLoss
self.dict[char] = i + 1
self.character = ['[blank]'] + dict_character # dummy '[CTCblank]' token for CTCLoss (index 0)
self.char_num = len(self.character)
def encode(self, text):
"""convert text-label into text-index.
input:
text: text labels of each image. [batch_size]
output:
text: concatenated text index for CTCLoss.
[sum(text_lengths)] = [text_index_0 + text_index_1 + ... + text_index_(n - 1)]
length: length of each text. [batch_size]
"""
length = [len(s) for s in text]
# text = ''.join(text)
# text = [self.dict[char] for char in text]
d = []
batch_max_length = max(length)
for s in text:
t = [self.dict[char] for char in s]
t.extend([0] * (batch_max_length - len(s)))
d.append(t)
return (torch.tensor(d, dtype=torch.long), torch.tensor(length, dtype=torch.long))
def decode(self, preds, raw=False):
""" convert text-index into text-label. """
preds_idx = preds.argmax(axis=2)
preds_prob = preds.max(axis=2)
result_list = []
for word, prob in zip(preds_idx, preds_prob):
if raw:
result_list.append((''.join([self.character[int(i)] for i in word]), prob))
else:
result = []
conf = []
for i, index in enumerate(word):
if word[i] != 0 and (not (i > 0 and word[i - 1] == word[i])):
result.append(self.character[int(index)])
conf.append(prob[i])
result_list.append((''.join(result), conf))
return result_list
class AttnLabelConverter(object):
def __init__(self, flags):
# character (str): set of the possible characters.
flags = flags.Global
self.character_type = flags.character_type
self.loss_type = flags.loss_type
if self.character_type == 'en':
self.character_str = "0123456789abcdefghijklmnopqrstuvwxyz"
dict_character = list(self.character_str)
elif self.character_type == 'ch':
character_dict_path = flags.character_dict_path
add_space = False
if hasattr(flags, 'use_space_char'):
add_space = flags.use_space_char
self.character_str = ""
with open(character_dict_path, 'rb') as fin:
lines = fin.readlines()
for line in lines:
line = line.decode('utf-8').strip("\n").strip("\r\n")
self.character_str += line
if add_space:
self.character_str += " "
dict_character = list(self.character_str)
elif self.character_type == "en_sensitive":
# same with ASTER setting (use 94 char).
self.character_str = string.printable[:-6]
dict_character = list(self.character_str)
else:
self.character_str = None
assert self.character_str is not None, \
"Nonsupport type of the character: {}".format(self.character_str)
self.character = ['[Go]', '[s]'] + dict_character # '[Go]' for the start token, '[s]' for the end token
self.dict = {}
for i, char in enumerate(self.character):
# NOTE: 0 is reserved for 'CTCblank' token required by CTCLoss
self.dict[char] = i
self.char_num = len(self.character)
def encode(self, text):
length = [len(s)+1 for s in text]
batch_max_length = max(length) + 1
batch_size = len(length)
outputs = torch.LongTensor(batch_size, batch_max_length).fill_(0)
for i in range(batch_size):
curr_text = list(text[i])
curr_text.append('[s]')
curr_text = [self.dict[char] for char in curr_text]
outputs[i, 1: len(curr_text)+1] = torch.LongTensor(curr_text)
return (outputs, torch.IntTensor(length))
def decode(self, preds):
preds_idx = preds.argmax(axis=2)
preds_prob = preds.max(axis=2)
text_conf = []
for idx, prob in zip(preds_idx, preds_prob):
curr_text = [self.character[index] for index in idx]
text_conf.append((curr_text, prob))
result_list = []
for text, prob in text_conf:
end_index = ''.join(text).find('[s]')
text = text[: end_index]
prob = prob[: end_index]
result_list.append((''.join(text), prob))
return result_list