-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathinterproscan.properties
125 lines (95 loc) · 4.49 KB
/
interproscan.properties
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
data.directory=/usr/local/interproscan/data
bin.directory=/usr/local/interproscan/bin
# Temporary files used by the analyses will be placed in directories here
# (the text [UNIQUE], if present, will be replaced by a value unique to your running instance)
temporary.file.directory.suffix=[UNIQUE]
temporary.file.directory=temp/${temporary.file.directory.suffix}
# Instructs I5 to completely clean up after a run
delete.temporary.directory.on.completion=true
# Perl and Python binaries to use
perl.command=perl
python3.command=python3
# Binary file locations (required for setup.py)
binary.hmmer3.path=${bin.directory}/hmmer/hmmer3/3.3
####################################
# Member databases HMMs
####################################
antifam.hmm.path=${data.directory}/antifam/7.0/AntiFam.hmm
gene3d.hmm.path=${data.directory}/gene3d/4.3.0/gene3d_main.hmm
hamap.hmm.path=${data.directory}/hamap/2023_05/hamap.hmm.lib
ncbifam.hmm.path=${data.directory}/ncbifam/15.0/ncbifam.hmm
panther.hmm.path=${data.directory}/panther/19.0/famhmm/binHmm
pfam-a.hmm.path=${data.directory}/pfam/37.1/pfam_a.hmm
pirsf.sfhmm.path=${data.directory}/pirsf/3.10/sf_hmm_all
pirsr.srhmm.path=${data.directory}/pirsr/2023_05/sr_hmm_all
sfld.hmm.path=${data.directory}/sfld/4/sfld.hmm
superfamily.hmm.path=${data.directory}/superfamily/1.75/hmmlib_1.75
# A list of TCP ports that should not be used for messaging
# (Apart from this, only ports > 1024 and < 65535 will be used)
tcp.port.exclusion.list=3879,3878,3881,3882
####################################
# Precalculated match lookup service
####################################
# By default, if the sequence already has matches available from the EBI, this service will look them
# up for you. Note: at present it will always return all the available matches, ignoring any -appl options
# set on the command line
precalculated.match.lookup.service.url=https://www.ebi.ac.uk/interpro/match-lookup
precalculated.match.protein.lookup.batch.size=100
precalculated.match.protein.insert.batch.size=500
precalculated.match.protein.insert.batch.size.nolookup=4000
# Proxy set up
precalculated.match.lookup.service.proxy.host=
precalculated.match.lookup.service.proxy.port=3128
# Exclude sites from output (residue level annotations)
exclude.sites.from.output=false
####################################
# Master/Standalone embedded workers
####################################
# Set the number of embedded workers to the number of processors that you would like to employ
# on the machine you are using to run InterProScan.
number.of.embedded.workers=6
maxnumber.of.embedded.workers=8
#################################
# Distributed mode (Cluster mode)
#################################
# Grid name
grid.name=lsf
#grid.name=other-cluster
# Project name for this run
user.digest=i5GridRun
# Grid jobs limit : number of jobs you are allowed to run on the cluster
grid.jobs.limit=1000
# Time between each bjobs/qstat command to check the status of jobs on the cluster
grid.check.interval.seconds=120
# Allow master interproscan to run binaries
master.can.run.binaries=true
# Deal with unknown step states
recover.unknown.step.state=false
# Grid submission commands (e.g. LSF bsub or SGE qsub) for starting remote workers
# Commands the master uses to start new remote workers
grid.master.submit.command=bsub -q QUEUE_NAME -M 8192
grid.master.submit.high.memory.command=bsub -q QUEUE_NAME -M 8192
# Commands a worker uses to start new remote workers
grid.worker.submit.command=bsub -q QUEUE_NAME -M 8192
grid.worker.submit.high.memory.command=bsub -q QUEUE_NAME -M 8192
# Command to start a new worker (new jvm)
worker.command=java -Xms2028M -Xmx9216M -jar interproscan-5.jar
# This may be identical to the worker.command argument above, however you may choose to select
# a machine with a much larger available memory, for use when a StepExecution fails.
worker.high.memory.command=java -Xms2028M -Xmx9216M -jar interproscan-5.jar
# Set the number of embedded workers to the number of processors that you would like to employ
# on the node machine on which the worker will run.
worker.number.of.embedded.workers=4
worker.maxnumber.of.embedded.workers=4
# Max number of connections to the master
master.maxconsumers=48
# Number of connections to the worker
worker.maxconsumers=32
# Throttled network?
grid.throttle=true
# Max number of jobs a tier 1 worker is allowed on its queue
worker.maxunfinished.jobs=32
# Network tier depth
max.tier.depth=1
# Active MQ JMS broker temporary data directory
jms.broker.temp.directory=activemq-data/localhost/tmp_storage