Skip to content

Commit 0a762cf

Browse files
authored
Merge pull request #59 from yjmantilla/exemplar_file_tests
Exemplar file tests
2 parents 62306cd + cb87293 commit 0a762cf

File tree

8 files changed

+160
-53
lines changed

8 files changed

+160
-53
lines changed

requirements-dev.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@ flask
1313
uvicorn
1414
fastapi-jsonrpc
1515
versioneer
16+
pybv >= 0.7.3
1617
https://api.github.com/repos/mne-tools/mne-bids/zipball/main
1718
git+https://github.com/yjmantilla/bidscoin.git@sovabids
1819
-e .

requirements-user.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
11
bids_validator
2+
pybv >= 0.7.3
23
https://api.github.com/repos/mne-tools/mne-bids/zipball/main
34
.

requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
requests
2-
pybv
2+
pybv >= 0.7.3
33
pyyaml
44
pandas
55
fastapi-jsonrpc

sovabids/datasets.py

Lines changed: 122 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@
99
import mne
1010
import numpy as np
1111
from mne_bids.write import _write_raw_brainvision
12+
import fileinput
13+
1214

1315
def lemon_prepare():
1416
"""Download and prepare a few files of the LEMON dataset.
@@ -121,23 +123,24 @@ def lemon_bidscoin_prepare(src_path):
121123
print('already done, skipping...')
122124
print('finish')
123125

124-
def make_dummy_dataset(PATTERN='T%task%/S%session%/sub%subject%_%acquisition%_%run%',
126+
def make_dummy_dataset(EXAMPLE,
127+
PATTERN='T%task%/S%session%/sub%subject%_%acquisition%_%run%',
125128
DATASET = 'DUMMY',
126129
NSUBS = 2,
127130
NSESSIONS = 2,
128131
NTASKS = 2,
129132
NACQS = 2,
130133
NRUNS = 2,
131-
NCHANNELS = 2,
132-
SFREQ = 200,
133-
STOP = 10,
134-
NUMEVENTS = 10,
135134
PREFIXES = {'subject':'SU','session':'SE','task':'TA','acquisition':'AC','run':'RU'},
136-
ROOT=None):
135+
ROOT=None,
136+
):
137137
"""Create a dummy dataset given some parameters.
138138
139139
Parameters
140140
----------
141+
EXAMPLE : str,PathLike|list , required
142+
Path of the file to replicate as each file in the dummy dataset.
143+
If a list, it is assumed each item is a file. All of these items are replicated.
141144
PATTERN : str, optional
142145
The pattern in placeholder notation using the following fields:
143146
%dataset%, %task%, %session%, %subject%, %run%, %acquisition%
@@ -153,20 +156,13 @@ def make_dummy_dataset(PATTERN='T%task%/S%session%/sub%subject%_%acquisition%_%r
153156
Number of acquisitions.
154157
NRUNS : int, optional
155158
Number of runs.
156-
NCHANNELS : int, optional
157-
Number of channels.
158-
SFREQ : float, optional
159-
Samplinf frequency of the data.
160-
STOP : float, optional
161-
Time duration of the data in seconds.
162-
NUMEVENTS : int, optional
163-
Number of events along the duration.
164159
PREFIXES : dict, optional
165160
Dictionary with the following keys:'subject', 'session', 'task' and 'acquisition'.
166161
The values are the corresponding prefix. RUN is not present because it has to be a number.
167162
ROOT : str, optional
168163
Path where the files will be generated.
169164
If None, the _data subdir will be used.
165+
170166
"""
171167

172168
if ROOT is None:
@@ -176,8 +172,6 @@ def make_dummy_dataset(PATTERN='T%task%/S%session%/sub%subject%_%acquisition%_%r
176172
data_dir = ROOT
177173
os.makedirs(data_dir,exist_ok=True)
178174

179-
180-
181175
sub_zeros = get_num_digits(NSUBS)
182176
subs = [ PREFIXES['subject']+ str(x).zfill(sub_zeros) for x in range(NSUBS)]
183177

@@ -193,17 +187,6 @@ def make_dummy_dataset(PATTERN='T%task%/S%session%/sub%subject%_%acquisition%_%r
193187
acq_zeros = get_num_digits(NACQS)
194188
acquisitions = [ PREFIXES['acquisition']+str(x).zfill(acq_zeros) for x in range(NACQS)]
195189

196-
# Create some dummy metadata
197-
n_channels = NCHANNELS
198-
sampling_freq = SFREQ # in Hertz
199-
info = mne.create_info(n_channels, sfreq=sampling_freq)
200-
201-
times = np.linspace(0, STOP, STOP*sampling_freq, endpoint=False)
202-
data = np.zeros((NCHANNELS,times.shape[0]))
203-
204-
raw = mne.io.RawArray(data, info)
205-
raw.set_channel_types({x:'eeg' for x in raw.ch_names})
206-
new_events = mne.make_fixed_length_events(raw, duration=STOP//NUMEVENTS)
207190

208191
for task in tasks:
209192
for session in sessions:
@@ -218,5 +201,116 @@ def make_dummy_dataset(PATTERN='T%task%/S%session%/sub%subject%_%acquisition%_%r
218201
dummy = dummy.replace('%acquisition%',acq)
219202
path = [data_dir] +dummy.split('/')
220203
fpath = os.path.join(*path)
221-
_write_raw_brainvision(raw,fpath,new_events,overwrite=True)
204+
dirpath = os.path.join(*path[:-1])
205+
os.makedirs(dirpath,exist_ok=True)
206+
if isinstance(EXAMPLE,list):
207+
for ff in EXAMPLE:
208+
fname, ext = os.path.splitext(ff)
209+
shutil.copyfile(ff, fpath+ext)
210+
if 'vmrk' in ext or 'vhdr' in ext:
211+
replace_brainvision_filename(fpath+ext,path[-1])
212+
else:
213+
fname, ext = os.path.splitext(EXAMPLE)
214+
shutil.copyfile(EXAMPLE, fpath+ext)
215+
216+
def get_dummy_raw(NCHANNELS = 5,
217+
SFREQ = 200,
218+
STOP = 10,
219+
NUMEVENTS = 10,
220+
):
221+
"""
222+
Create a dummy MNE Raw file given some parameters.
223+
224+
Parameters
225+
----------
226+
NCHANNELS : int, optional
227+
Number of channels.
228+
SFREQ : float, optional
229+
Sampling frequency of the data.
230+
STOP : float, optional
231+
Time duration of the data in seconds.
232+
NUMEVENTS : int, optional
233+
Number of events along the duration.
234+
"""
235+
# Create some dummy metadata
236+
n_channels = NCHANNELS
237+
sampling_freq = SFREQ # in Hertz
238+
info = mne.create_info(n_channels, sfreq=sampling_freq)
239+
240+
times = np.linspace(0, STOP, STOP*sampling_freq, endpoint=False)
241+
data = np.zeros((NCHANNELS,times.shape[0]))
242+
243+
raw = mne.io.RawArray(data, info)
244+
raw.set_channel_types({x:'eeg' for x in raw.ch_names})
245+
new_events = mne.make_fixed_length_events(raw, duration=STOP//NUMEVENTS)
222246

247+
return raw,new_events
248+
249+
def save_dummy_vhdr(fpath,dummy_args={}
250+
):
251+
"""
252+
Save a dummy vhdr file.
253+
254+
Parameters
255+
----------
256+
fpath : str, required
257+
Path where to save the file.
258+
kwargs : dict, optional
259+
Dictionary with the arguments of the get_dummy_raw function.
260+
261+
Returns
262+
-------
263+
List with the Paths of the desired vhdr file, if those were succesfully created,
264+
None otherwise.
265+
"""
266+
267+
raw,new_events = get_dummy_raw(**dummy_args)
268+
_write_raw_brainvision(raw,fpath,new_events,overwrite=True)
269+
eegpath =fpath.replace('.vhdr','.eeg')
270+
vmrkpath = fpath.replace('.vhdr','.vmrk')
271+
if all(os.path.isfile(x) for x in [fpath,eegpath,vmrkpath]):
272+
return [fpath,eegpath,vmrkpath]
273+
else:
274+
return None
275+
276+
def save_dummy_cnt(fpath,
277+
):
278+
"""
279+
Save a dummy cnt file.
280+
281+
Parameters
282+
----------
283+
fpath : str, required
284+
Path where to save the file.
285+
286+
Returns
287+
-------
288+
Path of the desired file if the file was succesfully created,
289+
None otherwise.
290+
"""
291+
fname = 'scan41_short.cnt'
292+
cnt_dict={'dataset_name': 'cnt_sample',
293+
'archive_name': 'scan41_short.cnt',
294+
'hash': 'md5:7ab589254e83e001e52bee31eae859db',
295+
'url': 'https://github.com/mne-tools/mne-testing-data/blob/master/CNT/scan41_short.cnt?raw=true',
296+
'folder_name': 'cnt_sample',
297+
}
298+
data_path = mne.datasets.fetch_dataset(cnt_dict)
299+
shutil.copyfile(os.path.join(data_path,'scan41_short.cnt'), fpath) #copyfile overwrites by default
300+
if os.path.isfile(fpath):
301+
return fpath
302+
else:
303+
return None
304+
305+
def replace_brainvision_filename(fpath,newname):
306+
if '.eeg' in newname:
307+
newname = newname.replace('.eeg','')
308+
if '.vmrk' in newname:
309+
newname = newname.replace('.vmrk','')
310+
for line in fileinput.input(fpath, inplace=True):
311+
if 'DataFile' in line:
312+
print(f'DataFile={newname}.eeg'.format(fileinput.filelineno(), line))
313+
elif 'MarkerFile' in line:
314+
print(f'MarkerFile={newname}.vmrk'.format(fileinput.filelineno(), line))
315+
else:
316+
print('{}'.format(line), end='')

sovabids/rules.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -270,6 +270,9 @@ def apply_rules_to_single_file(file,rules,bids_path,write=False,preview=False):
270270
bids_path = bids_path.copy()
271271
bids_path = bids_path.update(
272272
datatype=datatype, suffix=datatype, extension=ext)
273+
if bids_path.datatype in ['eeg', 'ieeg']:
274+
if ext not in ['.vhdr', '.edf', '.bdf', '.EDF']:
275+
bids_path.update(extension='.vhdr')
273276
##################################################################
274277

275278

tests/test_bids.py

Lines changed: 17 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,10 @@
1010
from sovabids.parsers import placeholder_to_regex,_modify_entities_of_placeholder_pattern
1111
from sovabids.rules import apply_rules,load_rules
1212
from sovabids.dicts import deep_merge_N
13-
from sovabids.datasets import make_dummy_dataset
13+
from sovabids.datasets import make_dummy_dataset,save_dummy_vhdr,save_dummy_cnt
1414
from sovabids.convert import convert_them
1515

16-
def dummy_dataset(pattern_type='placeholder',write=True,mode='python'):
16+
def dummy_dataset(pattern_type='placeholder',write=True,mode='python',format='.vhdr'):
1717

1818
# Getting current file path and then going to _data directory
1919
this_dir = os.path.dirname(__file__)
@@ -24,20 +24,22 @@ def dummy_dataset(pattern_type='placeholder',write=True,mode='python'):
2424
test_root = os.path.join(data_dir,'DUMMY')
2525
input_root = os.path.join(test_root,'DUMMY_SOURCE')
2626
mode_str = '_' + mode
27-
bids_path = os.path.join(test_root,'DUMMY_BIDS'+'_'+pattern_type+mode_str)
27+
bids_path = os.path.join(test_root,'DUMMY_BIDS'+'_'+pattern_type+mode_str+'_'+format.replace('.',''))
28+
29+
# Make example File
30+
if format == '.vhdr':
31+
example_fpath = save_dummy_vhdr(os.path.join(data_dir,'dummy.vhdr'))
32+
elif format == '.cnt':
33+
example_fpath = save_dummy_cnt(os.path.join(data_dir,'dummy.cnt'))
2834

2935
# PARAMS for making the dummy dataset
30-
DATA_PARAMS ={ 'PATTERN':'T%task%/S%session%/sub%subject%_%acquisition%_%run%',
36+
DATA_PARAMS ={ 'EXAMPLE':example_fpath,
37+
'PATTERN':'T%task%/S%session%/sub%subject%_%acquisition%_%run%',
3138
'DATASET' : 'DUMMY',
3239
'NSUBS' : 2,
3340
'NTASKS' : 2,
3441
'NRUNS' : 2,
3542
'NSESSIONS' : 2,
36-
'NCHANNELS' : 32,
37-
'NACQS' :2,
38-
'SFREQ' : 200,
39-
'STOP' : 10,
40-
'NUMEVENTS' : 10,
4143
'ROOT' : input_root
4244
}
4345

@@ -65,7 +67,7 @@ def dummy_dataset(pattern_type='placeholder',write=True,mode='python'):
6567
FIXED_PATTERN =DATA_PARAMS.get('PATTERN',None)
6668

6769
FIXED_PATTERN = _modify_entities_of_placeholder_pattern(FIXED_PATTERN,'append')
68-
FIXED_PATTERN = FIXED_PATTERN + '.' + 'vhdr'
70+
FIXED_PATTERN = FIXED_PATTERN + format
6971

7072
# Making the rules dictionary
7173
data={
@@ -82,13 +84,13 @@ def dummy_dataset(pattern_type='placeholder',write=True,mode='python'):
8284
},
8385
'non-bids':
8486
{
85-
'eeg_extension':'.vhdr',
87+
'eeg_extension':format,
8688
'path_analysis':{'pattern':FIXED_PATTERN},
8789
'code_execution':['print(\'some good code\')','print(raw.info)','print(some bad code)']
8890
},
8991
'channels':
90-
{'name':{'0':'ECG_CHAN','1':'EOG_CHAN'},
91-
'type':{'ECG_CHAN':'ECG','EOG_CHAN':'EOG'}}
92+
{'name':{'1':'ECG_CHAN','2':'EOG_CHAN'}, #Note example vhdr and CNT have these channels
93+
'type':{'ECG_CHAN':'ECG','EOG_CHAN':'EOG'}} # Names (keys) are after the rename of the previous line
9294
}
9395

9496
if pattern_type == 'regex':
@@ -228,6 +230,8 @@ def dummy_dataset(pattern_type='placeholder',write=True,mode='python'):
228230
print('okrpc')
229231
return file_mappings
230232
def test_dummy_dataset():
233+
# apparently it cannot download the cnt consistenly on the github actions machine
234+
#dummy_dataset('placeholder',write=True,format='.cnt') # Test cnt conversion
231235
dummy_dataset('placeholder',write=True)
232236
dummy_dataset('regex',write=True)
233237
dummy_dataset('placeholder',write=True,mode='cli')

tests/test_sova2coin.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
from sovabids.files import _get_files
99
from sovabids.settings import REPO_PATH
1010
from sovabids.parsers import _modify_entities_of_placeholder_pattern
11-
from sovabids.datasets import lemon_bidscoin_prepare,make_dummy_dataset
11+
from sovabids.datasets import lemon_bidscoin_prepare,make_dummy_dataset,save_dummy_vhdr
1212
import yaml
1313

1414
def test_sova2coin(dataset='dummy_bidscoin',noedit=True):
@@ -59,8 +59,11 @@ def test_sova2coin(dataset='dummy_bidscoin',noedit=True):
5959
shutil.rmtree(source_path)
6060
except:
6161
pass
62+
63+
# Make example VHDR File
64+
example_fpath = save_dummy_vhdr(os.path.join(data_dir,'dummy.vhdr'))
6265

63-
make_dummy_dataset(DATASET=dataset+'_input',NSUBS=3,NTASKS=2,NSESSIONS=2,NACQS=1,NRUNS=2,PATTERN=pat,ROOT=source_path)
66+
make_dummy_dataset(EXAMPLE=example_fpath,DATASET=dataset+'_input',NSUBS=3,NTASKS=2,NSESSIONS=2,NACQS=1,NRUNS=2,PATTERN=pat,ROOT=source_path)
6467

6568

6669
files = _get_files(source_path)

tests/test_web_validator.sh

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,16 @@
22
set -e
33
npm install --global npm@^7
44
npm install -g bids-validator
5-
bids-validator _data/DUMMY/DUMMY_BIDS_placeholder_python
6-
bids-validator _data/DUMMY/DUMMY_BIDS_regex_python
7-
bids-validator _data/DUMMY/DUMMY_BIDS_placeholder_cli
8-
bids-validator _data/DUMMY/DUMMY_BIDS_regex_cli
9-
bids-validator _data/DUMMY/DUMMY_BIDS_placeholder_rpc
10-
bids-validator _data/DUMMY/DUMMY_BIDS_regex_rpc
11-
bids-validator _data/DUMMY/DUMMY_BIDS_placeholder_cli
12-
bids-validator _data/DUMMY/DUMMY_BIDS_regex_cli
5+
bids-validator _data/DUMMY/DUMMY_BIDS_placeholder_cli_vhdr/
6+
# bids-validator _data/DUMMY_BIDS_placeholder_python_cnt/
7+
# apparently it cannot download the cnt
8+
bids-validator _data/DUMMY/DUMMY_BIDS_placeholder_python_vhdr/
9+
bids-validator _data/DUMMY/DUMMY_BIDS_placeholder_rpc_vhdr/
10+
bids-validator _data/DUMMY/DUMMY_BIDS_regex_cli_vhdr/
11+
bids-validator _data/DUMMY/DUMMY_BIDS_regex_python_vhdr/
12+
bids-validator _data/DUMMY/DUMMY_BIDS_regex_rpc_vhdr/
13+
1314

1415
bids-validator _data/dummy_bidscoin_output
1516
echo $?
16-
#bids-validator _data/DUMMY/DUMMY_SOURCE
17+
# bids-validator _data/DUMMY/DUMMY_SOURCE

0 commit comments

Comments
 (0)