forked from songyouwei/ABSA-PyTorch
-
Notifications
You must be signed in to change notification settings - Fork 6
/
dan_parser.py
312 lines (257 loc) · 11.4 KB
/
dan_parser.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
# -*- coding: utf-8 -*-
# file: parser.py
# author: albertopaz <[email protected]>
# Copyright (C) 2018. All Rights Reserved.
import networkx as nx
import numpy as np
import spacy
# Enforce fully connected tree (one root per sample)
# https://github.com/explosion/spaCy/issues/1850
def one_sentence_per_doc(doc):
doc[0].sent_start = True
for i in range(1, len(doc)):
doc[i].sent_start = False
return doc
# DAN dependency based parser
class Parser(object):
def __init__(self, one_root = True, boc = None):
'''
Initialize the DAN dependency based parser python class
'''
self.nlp = spacy.load("en_core_web_sm")
self.nlp.remove_pipe('ner')
if one_root:
self.nlp.add_pipe(one_sentence_per_doc, before='parser')
self.bag_of_concepts = boc
def parse(self, text, aspect_start, aspect_end):
'''
Parse a text to obtain and returns:
tree_positions, concepts, supersized aspect, and dependency_tree
'''
doc = self.nlp(text)
aspect_term = ' '.join(a.text for a in doc[aspect_start:aspect_end])
parents, concepts = [], []
edges, aspect_nodes = [], []
subtree_aspect, compound_aspect, noun_chunk_aspect = None, None, None
for tok in doc:
# get parents and extract concepts
parents.append(tok.head.i + 1 if tok.dep_ != 'ROOT' else 0) # +1 to be consistent with stan
concepts.append(self.extract_concepts(tok))
# identify nodes corresponding to the aspect and super size it
if (tok.i >= aspect_start) and (tok.i < aspect_end):
aspect_nodes.append('{}-{}'.format(tok.lower_, tok.i))
if subtree_aspect == None:
subtree_aspect = self.subtree_supersize(tok)
if compound_aspect == None:
compound_aspect = self.compound_supersize(tok, aspect_term)
# load spacy dependency tree into a networkx graph
for c in tok.children:
edges.append(('{}-{}'.format(tok.lower_, tok.i),
'{}-{}'.format(c.lower_, c.i)))
# consolidate output
noun_chunk_aspect = self.noun_chunk_supersize(doc, aspect_start, aspect_end)
position_vector = self.shortest_path(doc, nx.Graph(edges), aspect_nodes)
concept_vector = self.make_concept_vector(doc, concepts)
tree = self.read_tree(parents)
dan_inputs = {
'tree_positions': position_vector,
'concepts': concept_vector,
'compound_aspect': compound_aspect,
'subtree_aspect': subtree_aspect,
'noun_chunk_aspect': noun_chunk_aspect,
'dependency_tree': tree
}
return dan_inputs
def shortest_path(self, doc, graph, aspect_nodes):
'''
Find the shortest path in the dependency graph from each word to the aspect
'''
distance_to_aspect = np.zeros(len(doc))
for tok in doc:
path_len = []
for aspect in aspect_nodes:
try: path_len.append(nx.shortest_path_length(graph,
source='{}-{}'.format(tok.lower_, tok.i),
target = aspect))
except: print('critical error when calculating shortest paths!')
distance_to_aspect[tok.i] = min(path_len)
return distance_to_aspect
# concept to vector, max one concept per token
def make_concept_vector(self, doc, concepts):
concept_vector = np.zeros(len(doc))
concepts = list(filter(None, concepts))
for c in concepts:
if c[0]['text'] in self.bag_of_concepts.word2idx:
if concept_vector[c[0]['pos']] != 0:
# single concept - >
print('>>> overwriting ')
concept_vector[c[0]['pos']] = self.bag_of_concepts.word2idx[c[0]['text']]
return concept_vector
def extract_concepts(self, tok):
'''
Dependency based semantic parser for concept-level text analysis (simplified)
'''
concepts = []
# TODO: add bigram based rules (?)
# check : (tok.pos, tok.nbor(1).pos)
# VERB head token rules
if tok.head.pos_ == 'VERB':
# Joint Subject noun and Adjective complement rule
if tok.dep_ == 'nsubj':
for c in tok.head.children:
if c.dep_ == 'acomp':
concepts.append({'pos': max(c.i, tok.i),
'text': c.lemma_ + '_' + tok.lemma_})
# Direct nominal objects
if tok.dep_ == 'dobj':
concepts.append({'pos': max(tok.i, tok.head.i),
'text': tok.head.lemma_ + '_' + tok.lemma_})
# Adjective and clausal complements Rules
if tok.dep_ == 'acomp':
concepts.append({'pos': max(tok.i, tok.head.i),
'text': tok.head.lemma_ + '_' + tok.lemma_})
# Open clausal complements
if tok.dep_ == 'xcomp':
concepts.append({'pos': max(tok.head.i, tok.i),
'text':tok.head.lemma_ + '_' + tok.lemma_})
for t in tok.children:
if t.dep_ == 'dobj':
concepts.append({'pos': max(tok.head.i, tok.i, t.i),
'text':tok.head.lemma_ + '_'
+ tok.lemma_ + '_' + t.lemma_})
# Dependency-based
# negation : problems with dont
if tok.dep_ == 'neg':
concepts.append({'pos': max(tok.i, tok.head.i),
'text': tok.lemma_ + '_' + tok.head.lemma_})
# Adjectival, adverbial and participial modification
elif tok.dep_ == 'amod':
concepts.append({'pos': max(tok.i, tok.head.i),
'text':tok.lemma_+ '_' + tok.head.lemma_})
# Prepositional phrases
elif tok.dep_ == 'prep':
for c in tok.children:
if c.dep_ == 'pobj':
concepts.append({'pos': max(tok.head.i, tok.i, c.i),
'text': tok.head.lemma_ + '_'
+ tok.lemma_ + '_' + c.lemma_})
# Adverbial clause modifier
elif tok.dep_ == 'advcl':
concepts.append({'pos': max( tok.i, tok.head.i),
'text':tok.lemma_ + '_' + tok.head.lemma_})
# Noun Compound Modifier
elif tok.dep_ == 'compound':
concepts.append({'pos': max(tok.i, tok.head.i),
'text': tok.lemma_ + '_' + tok.head.lemma_})
if len(concepts) > 0:
return concepts
def noun_chunk_supersize(self, doc, aspect_start, aspect_end):
'''
supersize the aspect based on its adjacency to other noun phrases
'''
aspect_indices = set(range(aspect_start, aspect_end))
selected = None
candidates = []
for nc in doc.noun_chunks:
nc_start = nc.root.left_edge.i
nc_end = nc.root.right_edge.i+1
nc_indices = list(range(nc_start, nc_end))
# find new nc2 in the intersection of nc with aspect
if bool(set(aspect_indices) & set(nc_indices)):
for nc2 in [ nc, doc[min(aspect_start, nc_start) :max(aspect_end,nc_end)]]:
candidates.append(nc2)
if len(candidates) > 0:
selected = max(candidates, key=len)
if selected[0].dep_ == 'det':
selected = selected[1:]
selected = ' '.join(s.text for s in selected)
return selected
def subtree_supersize(self, aspect_token):
'''
supersize the aspect based on prepositional dependency relations (down)
'''
subtree = None
for c in list(aspect_token.children):
if c.dep_ == 'prep':
subtree = [t for t in aspect_token.subtree]
if subtree[0].dep_ == 'det': # remove det relations at the begining
subtree = subtree[1:]
subtree = ' '.join(s.text for s in subtree)
return subtree
def compound_supersize(self, aspect_token, aspect_term):
'''
supersize the aspect based on compound dependency relations (up)
'''
compound = None
if aspect_token.dep_ == 'compound':
if aspect_token.head.text not in aspect_term:
compound = aspect_term + ' ' + aspect_token.head.text
return compound
def read_tree(self, parents):
'''
converts list of parents into a tree object
'''
trees = dict()
root = None
for i in range(1, len(parents) + 1):
if i - 1 not in trees.keys() and parents[i - 1] != -1:
idx = i
prev = None
while True:
parent = parents[idx - 1]
if parent == -1:
break
tree = Tree()
if prev is not None:
tree.add_child(prev)
trees[idx - 1] = tree
tree.idx = idx - 1
if parent - 1 in trees.keys():
trees[parent - 1].add_child(tree)
break
elif parent == 0:
root = tree
break
else:
prev = tree
idx = parent
#print('\n', ' '.join(str(p-1) for p in parents))
#root.paint()
return root
class Tree(object):
'''
Tree object from stanfordnlp/treelstm
'''
def __init__(self):
self.parent = None
self.num_children = 0
self.children = list()
def add_child(self, child):
child.parent = self
self.num_children += 1
self.children.append(child)
def size(self):
if getattr(self, '_size'):
return self._size
count = 1
for i in range(self.num_children):
count += self.children[i].size()
self._size = count
return self._size
def depth(self):
if getattr(self, '_depth'):
return self._depth
count = 0
if self.num_children > 0:
for i in range(self.num_children):
child_depth = self.children[i].depth()
if child_depth > count:
count = child_depth
count += 1
self._depth = count
return self._depth
def paint(self, level = 0):
print(level * ' ├──', self.idx)
level = level + 1
for idx in range(self.num_children):
self.children[idx].paint(level)