1
- from __future__ import print_function
2
1
# --------------------------------------------------------
3
2
# Fast R-CNN
4
3
# Copyright (c) 2015 Microsoft
5
4
# Licensed under The MIT License [see LICENSE for details]
6
5
# Written by Ross Girshick
7
6
# --------------------------------------------------------
8
7
9
- import os
10
- from os .path import join as pjoin
11
- import numpy as np
12
- from distutils .core import setup
13
- from distutils .extension import Extension
14
- from Cython .Distutils import build_ext
15
-
16
-
17
- def find_in_path (name , path ):
18
- "Find a file in a search path"
19
- # adapted fom http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
20
- for dir in path .split (os .pathsep ):
21
- binpath = pjoin (dir , name )
22
- if os .path .exists (binpath ):
23
- return os .path .abspath (binpath )
24
- return None
25
-
8
+ from __future__ import print_function
26
9
27
- # def locate_cuda():
28
- # """Locate the CUDA environment on the system
29
- #
30
- # Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
31
- # and values giving the absolute path to each directory.
32
- #
33
- # Starts by looking for the CUDAHOME env variable. If not found, everything
34
- # is based on finding 'nvcc' in the PATH.
35
- # """
36
- #
37
- # # first check if the CUDAHOME env variable is in use
38
- # if 'CUDAHOME' in os.environ:
39
- # home = os.environ['CUDAHOME']
40
- # nvcc = pjoin(home, 'bin', 'nvcc')
41
- # else:
42
- # # otherwise, search the PATH for NVCC
43
- # default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
44
- # nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
45
- # if nvcc is None:
46
- # raise EnvironmentError('The nvcc binary could not be '
47
- # 'located in your $PATH. Either add it to your path, or set $CUDAHOME')
48
- # home = os.path.dirname(os.path.dirname(nvcc))
49
- #
50
- # cudaconfig = {'home': home, 'nvcc': nvcc,
51
- # 'include': pjoin(home, 'include'),
52
- # 'lib64': pjoin(home, 'lib64')}
53
- # for k, v in cudaconfig.iteritems():
54
- # if not os.path.exists(v):
55
- # raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
56
- #
57
- # return cudaconfig
10
+ from Cython .Build import cythonize
11
+ from Cython .Distutils import build_ext
12
+ from setuptools import Extension
13
+ from setuptools import setup
58
14
15
+ import numpy as np
59
16
60
- # CUDA = locate_cuda()
61
17
62
18
# Obtain the numpy include directory. This logic works across numpy versions.
63
19
try :
@@ -66,70 +22,23 @@ def find_in_path(name, path):
66
22
numpy_include = np .get_numpy_include ()
67
23
68
24
69
- def customize_compiler_for_nvcc (self ):
70
- """inject deep into distutils to customize how the dispatch
71
- to gcc/nvcc works.
72
-
73
- If you subclass UnixCCompiler, it's not trivial to get your subclass
74
- injected in, and still have the right customizations (i.e.
75
- distutils.sysconfig.customize_compiler) run on it. So instead of going
76
- the OO route, I have this. Note, it's kindof like a wierd functional
77
- subclassing going on."""
78
-
79
- # tell the compiler it can processes .cu
80
- self .src_extensions .append ('.cu' )
81
-
82
- # save references to the default compiler_so and _comple methods
83
- default_compiler_so = self .compiler_so
84
- super = self ._compile
85
-
86
- # now redefine the _compile method. This gets executed for each
87
- # object but distutils doesn't have the ability to change compilers
88
- # based on source extension: we add it.
89
- def _compile (obj , src , ext , cc_args , extra_postargs , pp_opts ):
90
- print (extra_postargs )
91
- if os .path .splitext (src )[1 ] == '.cu' :
92
- # use the cuda for .cu files
93
- self .set_executable ('compiler_so' , CUDA ['nvcc' ])
94
- # use only a subset of the extra_postargs, which are 1-1 translated
95
- # from the extra_compile_args in the Extension class
96
- postargs = extra_postargs ['nvcc' ]
97
- else :
98
- postargs = extra_postargs ['gcc' ]
99
-
100
- super (obj , src , ext , cc_args , postargs , pp_opts )
101
- # reset the default compiler_so, which we might have changed for cuda
102
- self .compiler_so = default_compiler_so
103
-
104
- # inject our redefined _compile method into the class
105
- self ._compile = _compile
106
-
107
-
108
- # run the customize_compiler
109
- class custom_build_ext (build_ext ):
110
- def build_extensions (self ):
111
- customize_compiler_for_nvcc (self .compiler )
112
- build_ext .build_extensions (self )
113
-
114
-
115
25
ext_modules = [
116
26
Extension (
117
27
name = 'utils.cython_bbox' ,
118
28
sources = ['utils/cython_bbox.pyx' ],
119
- extra_compile_args = { 'gcc' : ['-Wno-cpp' ]} ,
29
+ extra_compile_args = ['-Wno-cpp' ],
120
30
include_dirs = [numpy_include ]
121
31
),
122
32
Extension (
123
33
name = 'utils.cython_nms' ,
124
34
sources = ['utils/cython_nms.pyx' ],
125
- extra_compile_args = { 'gcc' : ['-Wno-cpp' ]} ,
35
+ extra_compile_args = ['-Wno-cpp' ],
126
36
include_dirs = [numpy_include ]
127
37
)
128
38
]
129
39
130
40
setup (
131
41
name = 'mask_rcnn' ,
132
- ext_modules = ext_modules ,
133
- # inject our custom trigger
134
- cmdclass = {'build_ext' : custom_build_ext },
42
+ ext_modules = cythonize (ext_modules )
135
43
)
44
+
0 commit comments