AseemD commited on
Commit
426d38f
·
verified ·
1 Parent(s): a66b125

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -32
app.py CHANGED
@@ -72,7 +72,7 @@ def main():
72
  if dataset == "Financial":
73
  # 1. Load the dataset
74
  X = load_dataset(dataset)
75
- st.write(f"Loan Approval Dataset Sample")
76
  st.write(X.head())
77
 
78
  # 2. Select interpretability method
@@ -89,18 +89,19 @@ def main():
89
  X = X.astype(float)
90
  explainer = shap.Explainer(model)
91
  shap_values = explainer(X)
92
-
93
- # Visualize SHAP values
94
  idx = st.slider("Select Test Instance", 0, len(X) - 1, 0)
95
- st.write("SHAP Force Plot for a Single Prediction")
96
- fig1 = shap.force_plot(explainer.expected_value, shap_values[idx].values, X.iloc[idx], matplotlib=True, show=False)
97
- st.pyplot(fig1)
98
- st.write("SHAP Summary Plot")
99
- fig2 =shap.summary_plot(shap_values, X, show=False)
100
- st.pyplot(fig2)
101
- st.write("SHAP Bar Plot")
102
- fig3 =shap.summary_plot(shap_values, X, plot_type="bar", show=False)
103
- st.pyplot(fig3)
 
 
 
104
 
105
  elif method == "LIME":
106
  st.subheader("3. Interpretability using LIME")
@@ -119,26 +120,27 @@ def main():
119
  idx = st.slider("Select Test Instance", 0, len(X_test) - 1, 0)
120
 
121
  # Explain the prediction instance using LIME
122
- explainer = lime.lime_tabular.LimeTabularExplainer(
123
- X_train,
124
- feature_names=list(X.columns),
125
- class_names=target,
126
- discretize_continuous=True,
127
- )
128
- exp = explainer.explain_instance(
129
- X_test[idx],
130
- model.predict_proba,
131
- )
132
-
133
- # Visualize the explanation
134
- st.write("LIME Explanation")
135
- exp.save_to_file('lime_explanation.html')
136
- HtmlFile = open(f'lime_explanation.html', 'r', encoding='utf-8')
137
- components.html(HtmlFile.read(), height=600)
138
- st.write('True label:', labels[str(y_test[idx])])
139
- st.write("Effect of Predictors")
140
- fig = exp.as_pyplot_figure()
141
- st.pyplot(fig, bbox_inches='tight')
 
142
 
143
 
144
  if __name__ == "__main__":
 
72
  if dataset == "Financial":
73
  # 1. Load the dataset
74
  X = load_dataset(dataset)
75
+ st.write(f"{dataset} Dataset Sample")
76
  st.write(X.head())
77
 
78
  # 2. Select interpretability method
 
89
  X = X.astype(float)
90
  explainer = shap.Explainer(model)
91
  shap_values = explainer(X)
 
 
92
  idx = st.slider("Select Test Instance", 0, len(X) - 1, 0)
93
+
94
+ # Visualize SHAP values
95
+ if st.button("Explain Prediction"):
96
+ st.write("SHAP Force Plot for a Single Prediction")
97
+ fig = shap.force_plot(explainer.expected_value, shap_values[idx].values, X.iloc[idx], matplotlib=True, show=False)
98
+ st.pyplot(fig, bbox_inches='tight')
99
+ st.write("SHAP Summary Plot")
100
+ fig =shap.summary_plot(shap_values, X, show=False)
101
+ st.pyplot(fig, bbox_inches='tight')
102
+ st.write("SHAP Bar Plot")
103
+ fig =shap.summary_plot(shap_values, X, plot_type="bar", show=False)
104
+ st.pyplot(fig, bbox_inches='tight')
105
 
106
  elif method == "LIME":
107
  st.subheader("3. Interpretability using LIME")
 
120
  idx = st.slider("Select Test Instance", 0, len(X_test) - 1, 0)
121
 
122
  # Explain the prediction instance using LIME
123
+ if st.button("Explain Prediction"):
124
+ explainer = lime.lime_tabular.LimeTabularExplainer(
125
+ X_train,
126
+ feature_names=list(X.columns),
127
+ class_names=target,
128
+ discretize_continuous=True,
129
+ )
130
+ exp = explainer.explain_instance(
131
+ X_test[idx],
132
+ model.predict_proba,
133
+ )
134
+
135
+ # Visualize the explanation
136
+ st.write("LIME Explanation")
137
+ st.write('True label:', labels[str(y_test[idx])])
138
+ exp.save_to_file('lime_explanation.html')
139
+ HtmlFile = open(f'lime_explanation.html', 'r', encoding='utf-8')
140
+ components.html(HtmlFile.read(), height=600)
141
+ st.write("Effect of Predictors")
142
+ fig = exp.as_pyplot_figure()
143
+ st.pyplot(fig, bbox_inches='tight')
144
 
145
 
146
  if __name__ == "__main__":