-
Notifications
You must be signed in to change notification settings - Fork 6
/
node.py
76 lines (64 loc) · 1.79 KB
/
node.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
'''
Copyright 2018 Hongzheng Chen
E-mail: [email protected]
This is the implementation of Deep-reinforcement-learning-based scheduler for High-Level Synthesis.
This file contains the definition and implementation of Graph class.
'''
from copy import deepcopy
class OSPair(object):
def __init__(self, op, step):
super(OSPair, self).__init__()
self.op = op
self.step = step
def __repr__(self):
return "Op: %d, Step: %d" % (self.op + 1,self.step + 1)
class Node(object):
def __init__(self, num_, name_, type_, delay_):
super(Node, self).__init__()
self.num = num_
self.name = name_
self.type = type_
self.delay = delay_
self.pred = []
self.succ = []
self.asap = [] # the -1-th
self.alap = [] # the 1-st
self.iasap = []
self.ialap = []
self.cstep = -1
def initial(self):
self.asap = []
self.alap = []
for p in self.iasap:
self.asap.append(deepcopy(p)) # very important
for p in self.ialap:
self.alap.append(deepcopy(p))
self.asap.sort(key=lambda x:x.step,reverse=True)
self.alap.sort(key=lambda x:x.step)
self.cstep = -1
def schedule(self, step):
self.cstep = step
def setASAP(self,op,asap_):
flag = False
for i in range(len(self.asap)):
if self.asap[i].op == op:
self.asap[i].step = asap_
flag = True
if not flag:
self.asap.append(OSPair(op,asap_))
self.iasap.append(OSPair(op,asap_)) # different copies
self.asap.sort(key=lambda x:x.step,reverse=True)
def setALAP(self,op,alap_):
flag = False
for i in range(len(self.alap)):
if self.alap[i].op == op:
self.alap[i].step = alap_
flag = True
if not flag:
self.alap.append(OSPair(op,alap_))
self.ialap.append(OSPair(op,alap_))
self.alap.sort(key=lambda x:x.step)
def getASAP(self):
return self.asap[0].step
def getALAP(self):
return self.alap[0].step