Skip to content

Commit d14482a

Browse files
committed
Python 3 Compatability
1 parent a97b68e commit d14482a

10 files changed

+163
-259
lines changed

DataSequence.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -97,13 +97,13 @@ def from_grid(cls, grid_transcript, trfile):
9797
"""Creates a new DataSequence from a [grid_transript] and a [trfile].
9898
grid_transcript should be the product of the 'make_simple_transcript' method of TextGrid.
9999
"""
100-
data_entries = zip(*grid_transcript)[2]
100+
data_entries = list(zip(*grid_transcript))[2]
101101
if isinstance(data_entries[0], str):
102-
data = map(str.lower, zip(*grid_transcript)[2])
102+
data = list(map(str.lower, list(zip(*grid_transcript))[2]))
103103
else:
104104
data = data_entries
105-
word_starts = np.array(map(float, zip(*grid_transcript)[0]))
106-
word_ends = np.array(map(float, zip(*grid_transcript)[1]))
105+
word_starts = np.array(list(map(float, list(zip(*grid_transcript))[0])))
106+
word_ends = np.array(list(map(float, list(zip(*grid_transcript))[1])))
107107
word_avgtimes = (word_starts + word_ends)/2.0
108108

109109
tr = trfile.avgtr

SemanticModel.py

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import tables
2-
import cPickle
2+
import pickle
33
import numpy as np
44

55
import logging
@@ -117,11 +117,12 @@ def load(cls, filename):
117117
"""Loads a semantic model from the given filename.
118118
"""
119119
logger.debug("Loading file: %s"%filename)
120-
shf = tables.openFile(filename)
120+
shf = tables.open_file(filename)
121121

122122
newsm = cls(None, None)
123-
newsm.data = shf.getNode("/data").read()
124-
newsm.vocab = shf.getNode("/vocab").read()
123+
newsm.data = shf.get_node("/data").read()
124+
newsm.vocab = [s.decode('utf-8') for s in shf.get_node("/vocab").read()]
125+
125126
shf.close()
126127
logger.debug("Done loading file..")
127128
return newsm
@@ -277,17 +278,17 @@ def similarity(self, word1, word2):
277278
def print_best_worst(self, ii, n=10):
278279
vector = self.data[ii]
279280
sv = np.argsort(self.data[ii])
280-
print "Best:"
281-
print "-------------"
281+
print ("Best:")
282+
print ("-------------")
282283
for ni in range(1,n+1):
283-
print "%s: %0.08f"%(np.array(self.vocab)[sv[-ni]], vector[sv[-ni]])
284+
print ("%s: %0.08f"%(np.array(self.vocab)[sv[-ni]], vector[sv[-ni]]))
284285

285-
print "\nWorst:"
286-
print "-------------"
286+
print ("\nWorst:")
287+
print ("-------------")
287288
for ni in range(n):
288-
print "%s: %0.08f"%(np.array(self.vocab)[sv[ni]], vector[sv[ni]])
289+
print ("%s: %0.08f"%(np.array(self.vocab)[sv[ni]], vector[sv[ni]]))
289290

290-
print "\n"
291+
print ("\n")
291292

292293

293294
def gaussianize(vec):

0 commit comments

Comments
 (0)