Skip to content

Commit 7a58255

Browse files
authored
Merge pull request #48 from stanfordnlp/zen/cachepath
Remove default cache directory
2 parents 306ff0d + 76fd589 commit 7a58255

20 files changed

+52
-52
lines changed

pyvene/models/blip/modelings_intervenable_blip.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@
9999
blip_wrapper_type_to_dimension_mapping = blip_type_to_dimension_mapping
100100

101101

102-
def create_blip(name="Salesforce/blip-vqa-base", cache_dir="../../.huggingface_cache"):
102+
def create_blip(name="Salesforce/blip-vqa-base", cache_dir=None):
103103
"""Creates a GPT2 model, config, and tokenizer from the given name and revision"""
104104
from transformers import BlipConfig, BlipProcessor, BlipForQuestionAnswering
105105

pyvene/models/gpt2/modelings_intervenable_gpt2.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@
6363
gpt2_lm_type_to_dimension_mapping = gpt2_type_to_dimension_mapping
6464

6565

66-
def create_gpt2(name="gpt2", cache_dir="../../.huggingface_cache"):
66+
def create_gpt2(name="gpt2", cache_dir=None):
6767
"""Creates a GPT2 model, config, and tokenizer from the given name and revision"""
6868
from transformers import GPT2Model, GPT2Tokenizer, GPT2Config
6969

@@ -74,7 +74,7 @@ def create_gpt2(name="gpt2", cache_dir="../../.huggingface_cache"):
7474
return config, tokenizer, gpt
7575

7676

77-
def create_gpt2_lm(name="gpt2", config=None, cache_dir="../../.huggingface_cache"):
77+
def create_gpt2_lm(name="gpt2", config=None, cache_dir=None):
7878
"""Creates a GPT2 LM, config, and tokenizer from the given name and revision"""
7979
from transformers import GPT2LMHeadModel, GPT2Tokenizer, GPT2Config
8080

pyvene/models/gpt_neo/modelings_intervenable_gpt_neo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@
6464

6565

6666
def create_gpt_neo(
67-
name="roneneldan/TinyStories-33M", cache_dir="../../.huggingface_cache"
67+
name="roneneldan/TinyStories-33M", cache_dir=None
6868
):
6969
"""Creates a GPT2 model, config, and tokenizer from the given name and revision"""
7070
from transformers import GPTNeoForCausalLM, GPT2Tokenizer, GPTNeoConfig

pyvene/models/gpt_neox/modelings_intervenable_gpt_neox.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@
6363
gpt_neox_lm_type_to_dimension_mapping = gpt_neox_type_to_dimension_mapping
6464

6565

66-
def create_gpt_neox(name="EleutherAI/pythia-70m", cache_dir="../../.huggingface_cache"):
66+
def create_gpt_neox(name="EleutherAI/pythia-70m", cache_dir=None):
6767
"""Creates a GPT2 model, config, and tokenizer from the given name and revision"""
6868
from transformers import GPTNeoXForCausalLM, AutoTokenizer, GPTNeoXConfig
6969

pyvene/models/gru/modelings_intervenable_gru.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -69,7 +69,7 @@
6969
gru_lm_type_to_dimension_mapping = gru_type_to_dimension_mapping
7070

7171

72-
def create_gru(config, tokenizer_name=None, cache_dir="../../.huggingface_cache"):
72+
def create_gru(config, tokenizer_name=None, cache_dir=None):
7373
"""Creates a GRU model, config, and tokenizer from the given name and revision"""
7474
from transformers import AutoTokenizer
7575
from models.gru.modelings_gru import GRUModel
@@ -82,7 +82,7 @@ def create_gru(config, tokenizer_name=None, cache_dir="../../.huggingface_cache"
8282
return config, tokenizer, mlp
8383

8484

85-
def create_gru_lm(config, tokenizer_name=None, cache_dir="../../.huggingface_cache"):
85+
def create_gru_lm(config, tokenizer_name=None, cache_dir=None):
8686
"""Creates a GRU model, config, and tokenizer from the given name and revision"""
8787
from transformers import AutoTokenizer
8888
from models.gru.modelings_gru import GRULMHeadModel
@@ -96,7 +96,7 @@ def create_gru_lm(config, tokenizer_name=None, cache_dir="../../.huggingface_cac
9696

9797

9898
def create_gru_classifier(
99-
config, tokenizer_name=None, cache_dir="../../.huggingface_cache"
99+
config, tokenizer_name=None, cache_dir=None
100100
):
101101
"""Creates a GRU model, config, and tokenizer from the given name and revision"""
102102
from transformers import AutoTokenizer

pyvene/models/llama/modelings_intervenable_llama.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ def split_heads(tensor, num_heads, attn_head_size):
7070

7171

7272
def create_llama(
73-
name="sharpbai/alpaca-7b-merged", cache_dir="../../.huggingface_cache"
73+
name="sharpbai/alpaca-7b-merged", cache_dir=None
7474
):
7575
"""Creates a LLaMA Causal LM model, config, and tokenizer from the given name and revision"""
7676
from transformers import LlamaForCausalLM, LlamaTokenizer, LlamaConfig

pyvene/models/mlp/modelings_intervenable_mlp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636

3737

3838
def create_mlp_classifier(
39-
config, tokenizer_name=None, cache_dir="../../.huggingface_cache"
39+
config, tokenizer_name=None, cache_dir=None
4040
):
4141
"""Creates a MLP model, config, and tokenizer from the given name and revision"""
4242
from transformers import AutoTokenizer

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010

1111
setup(
1212
name="pyvene",
13-
version="0.0.5",
13+
version="0.0.6",
1414
description="Use Activation Intervention to Interpret Causal Mechanism of Model",
1515
long_description=long_description,
1616
long_description_content_type='text/markdown',

tutorials/advanced_tutorials/IOI_with_DAS.ipynb

Lines changed: 19 additions & 19 deletions
Large diffs are not rendered by default.

tutorials/advanced_tutorials/Intervened_Model_Generation.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@
105105
}
106106
],
107107
"source": [
108-
"config, tokenizer, tinystory = create_gpt_neo(cache_dir=\"../../../.huggingface_cache\")"
108+
"config, tokenizer, tinystory = create_gpt_neo()"
109109
]
110110
},
111111
{

0 commit comments

Comments
 (0)