Skip to content
This repository was archived by the owner on Sep 22, 2025. It is now read-only.

Commit ba89871

Browse files
committed
Merge branch 'develop' into r0.5
2 parents 9b74b91 + d767ac8 commit ba89871

File tree

11 files changed

+77
-83
lines changed

11 files changed

+77
-83
lines changed

explainer/attributions/attributions.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -215,11 +215,11 @@ def __init__(self,
215215
self.shap_values = self.explainer.shap_values(self.targets, nsamples=nsamples)
216216
self.info_panel = force_plot_info_panel
217217

218-
def visualize(self) -> None:
218+
def visualize(self):
219219
'''
220220
Display the force plot of the of the target example(s)
221221
'''
222-
self.force_plot(self.explainer.expected_value, self.shap_values[0], self.targets)
222+
return self.force_plot(self.explainer.expected_value, self.shap_values[0], self.targets)
223223

224224

225225
class PartitionExplainer(FeatureAttributions):

notebooks/explainer/cifar_with_attributions/TorchVision_CIFAR_Interpret.ipynb

Lines changed: 23 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,19 @@
77
"# Explaining Custom CNN CIFAR-10 Classification Using the Attributions Explainer"
88
]
99
},
10+
{
11+
"cell_type": "code",
12+
"execution_count": null,
13+
"metadata": {},
14+
"outputs": [],
15+
"source": [
16+
"# temp workaround for captum bug\n",
17+
"# issue on captum: https://github.com/pytorch/captum/issues/1114\n",
18+
"# Note: this matplotlib version is not guaranteed to work with any other application of intel-xai\n",
19+
"\n",
20+
"! pip install -U matplotlib==3.6.3 --no-deps"
21+
]
22+
},
1023
{
1124
"cell_type": "code",
1225
"execution_count": null,
@@ -206,6 +219,15 @@
206219
"net.eval()"
207220
]
208221
},
222+
{
223+
"cell_type": "code",
224+
"execution_count": null,
225+
"metadata": {},
226+
"outputs": [],
227+
"source": [
228+
"import matplotlib"
229+
]
230+
},
209231
{
210232
"cell_type": "code",
211233
"execution_count": null,
@@ -231,13 +253,6 @@
231253
"attributions.smoothgrad(net).visualize(input,labels[ind],original_image,\"Smooth Grad\")\n",
232254
"attributions.featureablation(net).visualize(input,labels[ind],original_image,\"Feature Ablation\")"
233255
]
234-
},
235-
{
236-
"cell_type": "code",
237-
"execution_count": null,
238-
"metadata": {},
239-
"outputs": [],
240-
"source": []
241256
}
242257
],
243258
"metadata": {
@@ -256,7 +271,7 @@
256271
"name": "python",
257272
"nbconvert_exporter": "python",
258273
"pygments_lexer": "ipython3",
259-
"version": "3.9.6"
274+
"version": "3.9.16"
260275
},
261276
"vscode": {
262277
"interpreter": {

notebooks/explainer/heart_disease_with_attributions/heart_disease.ipynb

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -240,13 +240,6 @@
240240
"ke = attributions.kernel_explainer(model, X_train.iloc[1:101, :], X_train.iloc[0, :])\n",
241241
"ke.visualize()"
242242
]
243-
},
244-
{
245-
"cell_type": "code",
246-
"execution_count": null,
247-
"metadata": {},
248-
"outputs": [],
249-
"source": []
250243
}
251244
],
252245
"metadata": {
@@ -265,7 +258,7 @@
265258
"name": "python",
266259
"nbconvert_exporter": "python",
267260
"pygments_lexer": "ipython3",
268-
"version": "3.9.6"
261+
"version": "3.9.16"
269262
},
270263
"vscode": {
271264
"interpreter": {

notebooks/explainer/mnist_with_attributions_and_metrics/mnist.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -272,7 +272,7 @@
272272
"source": [
273273
"# instatiate gradient explainer object\n",
274274
"# run the deep explainer\n",
275-
"grViz = attributions.gradient_explainer(model, X_test[:100], X_test[matches[:6]], 2, classes)\n",
275+
"grViz = attributions.gradient_explainer(model, X_test[:100], X_test[matches[:6]], classes, 2)\n",
276276
"grViz.visualize()"
277277
]
278278
},
@@ -309,7 +309,7 @@
309309
"name": "python",
310310
"nbconvert_exporter": "python",
311311
"pygments_lexer": "ipython3",
312-
"version": "3.9.6"
312+
"version": "3.9.16"
313313
}
314314
},
315315
"nbformat": 4,

notebooks/explainer/multimodal_cancer_detection/Multimodal_Cancer_Detection.ipynb

Lines changed: 32 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,9 @@
1616
"cell_type": "code",
1717
"execution_count": null,
1818
"id": "ad8a9723-5dbe-44eb-9baa-eca188e435f2",
19-
"metadata": {},
19+
"metadata": {
20+
"scrolled": true
21+
},
2022
"outputs": [],
2123
"source": [
2224
"import numpy as np\n",
@@ -705,29 +707,29 @@
705707
"from explainer import cam\n",
706708
"final_image_dim = (224, 224)\n",
707709
"targetLayer = viz_model._model.layer4\n",
708-
"xgradcam = cam.xgradcam(viz_model._model, targetLayer, \n",
710+
"xgc = cam.x_gradcam(viz_model._model, targetLayer, \n",
709711
" label_map_func('Normal'), \n",
710712
" images[0],\n",
711713
" final_image_dim,\n",
712714
" 'cpu')\n",
713715
"\n",
714-
"xgradcam.visualize()\n",
716+
"xgc.visualize()\n",
715717
"\n",
716-
"xgradcam = cam.xgradcam(viz_model._model, targetLayer, \n",
718+
"xgc = cam.x_gradcam(viz_model._model, targetLayer, \n",
717719
" label_map_func('Normal'), \n",
718720
" images[1],\n",
719721
" final_image_dim,\n",
720722
" 'cpu')\n",
721723
"\n",
722-
"xgradcam.visualize()\n",
724+
"xgc.visualize()\n",
723725
"\n",
724-
"xgradcam = cam.xgradcam(viz_model._model, targetLayer, \n",
726+
"xgc = cam.x_gradcam(viz_model._model, targetLayer, \n",
725727
" label_map_func('Normal'), \n",
726728
" images[2],\n",
727729
" final_image_dim,\n",
728730
" 'cpu')\n",
729731
"\n",
730-
"xgradcam.visualize()"
732+
"xgc.visualize()"
731733
]
732734
},
733735
{
@@ -777,30 +779,29 @@
777779
"\n",
778780
"final_image_dim = (224, 224)\n",
779781
"targetLayer = viz_model._model.layer4\n",
780-
"#targetLayer = \"_IPEXConv2d-169\"\n",
781-
"xgradcam = cam.xgradcam(viz_model._model, targetLayer, \n",
782+
"xgc = cam.x_gradcam(viz_model._model, targetLayer, \n",
782783
" label_map_func('Benign'), \n",
783784
" images[0],\n",
784785
" final_image_dim,\n",
785786
" 'cpu')\n",
786787
"\n",
787-
"xgradcam.visualize()\n",
788+
"xgc.visualize()\n",
788789
"\n",
789-
"xgradcam = cam.xgradcam(viz_model._model, targetLayer, \n",
790+
"xgc = cam.x_gradcam(viz_model._model, targetLayer, \n",
790791
" label_map_func('Benign'), \n",
791792
" images[1],\n",
792793
" final_image_dim,\n",
793794
" 'cpu')\n",
794795
"\n",
795-
"xgradcam.visualize()\n",
796+
"xgc.visualize()\n",
796797
"\n",
797-
"xgradcam = cam.xgradcam(viz_model._model, targetLayer, \n",
798+
"xgc = cam.x_gradcam(viz_model._model, targetLayer, \n",
798799
" label_map_func('Benign'), \n",
799800
" images[2],\n",
800801
" final_image_dim,\n",
801802
" 'cpu')\n",
802803
"\n",
803-
"xgradcam.visualize()"
804+
"xgc.visualize()"
804805
]
805806
},
806807
{
@@ -1121,7 +1122,7 @@
11211122
"source": [
11221123
"import transformers\n",
11231124
"transformers.set_seed(1)\n",
1124-
"nlp_history = nlp_model.train(train_nlp_dataset, output_dir, epochs=3, use_trainer=True)"
1125+
"nlp_history = nlp_model.train(train_nlp_dataset, output_dir, epochs=3, use_trainer=True, seed=1)"
11251126
]
11261127
},
11271128
{
@@ -1281,7 +1282,7 @@
12811282
"outputs": [],
12821283
"source": [
12831284
"from explainer import attributions\n",
1284-
"partition_explainer = attributions.partition_explainer(f, r\"\\W+\", test_nlp_dataset.class_names)(np.array(mal_classified_as_ben_text))\n",
1285+
"partition_explainer = attributions.partition_text_explainer(f, test_nlp_dataset.class_names, np.array(mal_classified_as_ben_text), r\"\\W+\")\n",
12851286
"partition_explainer.visualize()"
12861287
]
12871288
},
@@ -1305,8 +1306,8 @@
13051306
"outputs": [],
13061307
"source": [
13071308
"from intel_extension_for_transformers.optimization.trainer import NLPTrainer\n",
1308-
"from intel_extension_for_transformers import objectives, OptimizedModel, QuantizationConfig\n",
1309-
"from intel_extension_for_transformers import metrics as nlptk_metrics"
1309+
"from intel_extension_for_transformers.optimization import objectives, OptimizedModel, QuantizationConfig\n",
1310+
"from intel_extension_for_transformers.optimization import metrics as nlptk_metrics"
13101311
]
13111312
},
13121313
{
@@ -1389,7 +1390,7 @@
13891390
"metadata": {},
13901391
"outputs": [],
13911392
"source": [
1392-
"quantized_model.save(os.path.join(output_dir, 'quantized_BERT'))\n",
1393+
"quantizer.save_model(os.path.join(output_dir, 'quantized_BERT'))\n",
13931394
"nlp_model._model.config.save_pretrained(os.path.join(output_dir, 'quantized_BERT'))"
13941395
]
13951396
},
@@ -1504,7 +1505,7 @@
15041505
"viz_weight = test_viz_metrics[1]\n",
15051506
"\n",
15061507
"# final weight of nlp is its overall validation accuracy\n",
1507-
"nlp_weight = eval_acc\n",
1508+
"nlp_weight = test_nlp_metrics['eval_accuracy']\n",
15081509
"\n",
15091510
"def convert_nomenclature(df_pid):\n",
15101511
" return 'P' + df_pid[:-1] + '_' + df_pid[-1]\n",
@@ -1597,7 +1598,7 @@
15971598
"oh_y_pred = np.eye(n_values)[y_pred]\n",
15981599
"y_true = [label_map_func(i) for i in label]\n",
15991600
"\n",
1600-
"ensemble_cm = metrics.confusion_matrix(oh_y_true, oh_y_pred, test_nlp_dataset.class_names)\n",
1601+
"ensemble_cm = metrics.confusion_matrix(y_true, oh_y_pred, test_nlp_dataset.class_names)\n",
16011602
"ensemble_cm.visualize()\n",
16021603
"print(ensemble_cm.report)"
16031604
]
@@ -1640,42 +1641,40 @@
16401641
"final_image_dim = (224, 224)\n",
16411642
"targetLayer = viz_model._model.layer4\n",
16421643
"#targetLayer = \"_IPEXConv2d-169\"\n",
1643-
"xgradcam = cam.xgradcam(viz_model._model, targetLayer, \n",
1644+
"xgc = cam.x_gradcam(viz_model._model, targetLayer, \n",
16441645
" label_map_func('Malignant'), \n",
16451646
" images[0],\n",
16461647
" final_image_dim,\n",
16471648
" 'cpu')\n",
16481649
"\n",
1649-
"xgradcam.visualize()\n",
1650+
"xgc.visualize()\n",
16501651
"\n",
1651-
"xgradcam = cam.xgradcam(viz_model._model, targetLayer, \n",
1652+
"xgc = cam.x_gradcam(viz_model._model, targetLayer, \n",
16521653
" label_map_func('Malignant'), \n",
16531654
" images[1],\n",
16541655
" final_image_dim,\n",
16551656
" 'cpu')\n",
16561657
"\n",
1657-
"xgradcam.visualize()\n",
1658+
"xgc.visualize()\n",
16581659
"\n",
1659-
"xgradcam = cam.xgradcam(viz_model._model, targetLayer, \n",
1660+
"xgc = cam.x_gradcam(viz_model._model, targetLayer, \n",
16601661
" label_map_func('Malignant'), \n",
16611662
" images[2],\n",
16621663
" final_image_dim,\n",
16631664
" 'cpu')\n",
16641665
"\n",
1665-
"xgradcam.visualize()\n",
1666+
"xgc.visualize()\n",
16661667
"\n",
1667-
"xgradcam = cam.xgradcam(viz_model._model, targetLayer, \n",
1668+
"xgc = cam.x_gradcam(viz_model._model, targetLayer, \n",
16681669
" label_map_func('Malignant'), \n",
16691670
" images[3],\n",
16701671
" final_image_dim,\n",
16711672
" 'cpu')\n",
16721673
"\n",
1673-
"xgradcam.visualize()\n",
1674+
"xgc.visualize()\n",
16741675
"text_for_shap = np.expand_dims(np.array(ensemble_results.iloc[image_idx]['text']), axis=0)\n",
1675-
"ensemble_partition_explainer = attributions.partition_explainer(f, r\"\\W+\", test_nlp_dataset.class_names)\\\n",
1676-
" (text_for_shap)\n",
1677-
"ensemble_partition_explainer.visualize()\n",
1678-
"\n"
1676+
"ensemble_partition_explainer = attributions.partition_text_explainer(f, test_nlp_dataset.class_names, text_for_shap, r\"\\W+\")\n",
1677+
"ensemble_partition_explainer.visualize()"
16791678
]
16801679
},
16811680
{

notebooks/explainer/multimodal_cancer_detection/README.md

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -37,11 +37,9 @@ The `dataset_utils.py` holds the supporting functions that prepare the image and
3737

3838
To run `Multimodal_Cancer_Detection.ipynb`, install the following dependencies:
3939
1. [Intel® Explainable AI](https://github.com/IntelAI/intel-xai-tools)
40-
2. [Intel® Transfer Learning Tool](https://github.com/IntelAI/transfer-learning)
40+
2. `pip install intel-transfer-learning-tool==0.5`
4141
3. `pip install intel-extension-for-transformers`
4242
4. `pip install scikit-image`
43-
5. `pip install jupyterlab`
44-
6. `pip install jupyter-dash`
4543
7. `pip install nltk`
4644
8. `pip install docx2txt`
4745
9. `pip install openpyxl`

notebooks/explainer/newsgroups_with_attributions_and_metrics/README.md

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,6 @@ This notebook demonstrates how to use the attributions explainer API to explain
1616

1717
To run `partitionexplainer.ipynb`, install the following dependencies:
1818
1. [Intel® Explainable AI](https://github.com/IntelAI/intel-xai-tools)
19-
2. pip install jupyter-dash
2019

2120
## References
2221

notebooks/explainer/newsgroups_with_attributions_and_metrics/partitionexplainer.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -273,7 +273,7 @@
273273
" preds = model.predict(X_batch)\n",
274274
" return preds\n",
275275
"\n",
276-
"partition_explainer = attributions.partition_explainer(make_predictions, r\"\\W+\", selected_categories)(X_batch_text)"
276+
"partition_explainer = attributions.partition_text_explainer(make_predictions, selected_categories, X_batch_text, r\"\\W+\")"
277277
]
278278
},
279279
{
@@ -468,7 +468,7 @@
468468
"name": "python",
469469
"nbconvert_exporter": "python",
470470
"pygments_lexer": "ipython3",
471-
"version": "3.9.6"
471+
"version": "3.9.16"
472472
}
473473
},
474474
"nbformat": 4,

notebooks/explainer/transfer_learning_text_classification/PyTorch_Text_Classifier_fine_tuning_with_Attributions.ipynb

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -490,6 +490,7 @@
490490
"\n",
491491
" # If training_args are given, we use the `Trainer` API to train the model\n",
492492
" if self.training_args:\n",
493+
" self.model.train()\n",
493494
" self.trainer = Trainer(model=self.model,\n",
494495
" args=self.training_args,\n",
495496
" train_dataset=self.train_ds,\n",
@@ -534,6 +535,7 @@
534535
" def evaluate(self, batch_size=16):\n",
535536
" \n",
536537
" if self.trainer:\n",
538+
" self.model.eval()\n",
537539
" metrics = self.trainer.evaluate()\n",
538540
" for key in metrics.keys():\n",
539541
" print(\"{}: {}\".format(key, metrics[key]))\n",
@@ -872,8 +874,7 @@
872874
"from explainer import attributions\n",
873875
"# Get shap values\n",
874876
"text_for_shap = dataset.dataset['test'][:10]['text']\n",
875-
"partition_explainer = attributions.partition_explainer(f, r\"\\W+\", dataset.class_labels.names)\n",
876-
"partition_explainer(text_for_shap)"
877+
"partition_explainer = attributions.partition_text_explainer(f, dataset.class_labels.names, text_for_shap, r\"\\W+\", )"
877878
]
878879
},
879880
{

notebooks/explainer/transfer_learning_text_classification/README.md

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,10 @@ The notebook performs the following steps:
2020

2121
## Running the notebook
2222

23-
To run the notebook, follow the instructions to setup the [PyTorch notebook environment](/notebooks#pytorch-environment).
23+
24+
To run `PyTorch_Text_Classifier_fine_tuning_with_Attributions.ipynb`, install the following dependencies:
25+
1. [Intel® Explainable AI](https://github.com/IntelAI/intel-xai-tools)
26+
2. `pip install intel-transfer-learning-tool==0.5`
2427

2528
## References
2629

0 commit comments

Comments
 (0)