|
1 | 1 | import os
|
2 | 2 | from pathlib import Path
|
3 | 3 |
|
| 4 | +import numpy as np |
4 | 5 | import torch.cuda
|
5 | 6 |
|
6 | 7 | from .clip import _download, available_models
|
@@ -37,24 +38,45 @@ def optimize_models(
|
37 | 38 | ):
|
38 | 39 | from nebullvm.api.functions import optimize_model
|
39 | 40 |
|
40 |
| - save_dir = os.path.expanduser("~/.cache/clip/nebullvm") |
41 |
| - Path(save_dir).mkdir(exist_ok=True) |
42 |
| - visual_save_dir = os.path.join(save_dir, "visual") |
43 |
| - Path(visual_save_dir).mkdir(exist_ok=True) |
44 |
| - text_save_dir = os.path.join(save_dir, "text") |
45 |
| - Path(text_save_dir).mkdir(exist_ok=True) |
46 | 41 | general_kwargs = {}
|
47 | 42 | general_kwargs.update(kwargs)
|
48 | 43 |
|
| 44 | + dynamic_info = { |
| 45 | + "inputs": [ |
| 46 | + {0: 'batch', 1: 'num_channels', 2: 'pixel_size', 3: 'pixel_size'} |
| 47 | + ], |
| 48 | + "outputs": [{0: 'batch'}], |
| 49 | + } |
| 50 | + |
49 | 51 | self._visual_model = optimize_model(
|
50 | 52 | self._visual_path,
|
51 |
| - input_data=[((torch.randn(1, 3, self.pixel_size, self.pixel_size),), 0)], |
| 53 | + input_data=[ |
| 54 | + ( |
| 55 | + ( |
| 56 | + np.random.randn(1, 3, self.pixel_size, self.pixel_size).astype( |
| 57 | + np.float32 |
| 58 | + ), |
| 59 | + ), |
| 60 | + 0, |
| 61 | + ) |
| 62 | + ], |
| 63 | + dynamic_info=dynamic_info, |
52 | 64 | **general_kwargs,
|
53 | 65 | )
|
54 | 66 |
|
| 67 | + dynamic_info = { |
| 68 | + "inputs": [ |
| 69 | + {0: 'batch', 1: 'num_tokens'}, |
| 70 | + ], |
| 71 | + "outputs": [ |
| 72 | + {0: 'batch'}, |
| 73 | + ], |
| 74 | + } |
| 75 | + |
55 | 76 | self._textual_model = optimize_model(
|
56 | 77 | self._textual_path,
|
57 |
| - input_data=[((torch.randint(0, 100, (1, 77)),), 0)], |
| 78 | + input_data=[((np.random.randint(0, 100, (1, 77)),), 0)], |
| 79 | + dynamic_info=dynamic_info, |
58 | 80 | **general_kwargs,
|
59 | 81 | )
|
60 | 82 |
|
|
0 commit comments