PART II B): German Credit Score Classification Model EXPLAINABILITY, BIAS & FAIRNESS (Gender as protected variable).

By: Krishna J

Importing necessary libraries

In [1]:
#!pip install --upgrade tensorflow==1.15.0
In [2]:
import pandas as pd
import numpy as np
import seaborn               as sns
import matplotlib.pyplot     as plt
from sklearn.model_selection import train_test_split
#from sklearn.ensemble        import RandomForestClassifier
#from sklearn.linear_model    import LogisticRegression
from sklearn.preprocessing   import MinMaxScaler, StandardScaler
from sklearn.base            import TransformerMixin
from sklearn.pipeline        import Pipeline, FeatureUnion
from typing                  import List, Union, Dict
# Warnings will be used to silence various model warnings for tidier output
import warnings
warnings.filterwarnings('ignore')
%matplotlib inline 
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
np.random.seed(0)
#!pip install fairlearn
#!pip install aif360
#!pip install shap
#!pip install eli5
#!pip install BlackBoxAuditing

Importing source dataset

In [3]:
German_df = pd.read_csv('C:/Users/krish/Downloads/German-reduced_upd.csv')

print(German_df.shape)
print (German_df.columns)
(1000, 11)
Index(['CurrentAcc_None', 'NumMonths', 'CreditHistory_Delay',
       'CreditHistory_none/paid', 'Collateral_savings/life_insurance',
       'CurrentAcc_GE200', 'Purpose_repairs', 'Purpose_radio/tv', 'Gender',
       'Age', 'CreditStatus'],
      dtype='object')
In [4]:
German_df.head()
Out[4]:
CurrentAcc_None NumMonths CreditHistory_Delay CreditHistory_none/paid Collateral_savings/life_insurance CurrentAcc_GE200 Purpose_repairs Purpose_radio/tv Gender Age CreditStatus
0 0 6 0 0 0 0 0 1 1 1 1
1 0 48 0 1 0 0 0 1 0 0 0
2 1 12 0 0 0 0 0 0 1 1 1
3 0 42 0 1 1 0 0 0 1 1 1
4 0 24 1 0 0 0 0 0 1 1 0
In [5]:
#feature_list = ['Gender','Age','Marital_Status','NumMonths','Savings_<500','Savings_none','Dependents','Property_rent','Job_management/self-emp/officer/highly qualif emp','Debtors_guarantor','Purpose_CarNew',                           'Purpose_furniture/equip','CreditHistory_none/paid','Purpose_CarUsed','CreditAmount','CreditStatus']
feature_list=['CurrentAcc_None', 'NumMonths', 'CreditHistory_Delay',
       'CreditHistory_none/paid', 'Collateral_savings/life_insurance',
       'CurrentAcc_GE200', 'Purpose_repairs', 'Purpose_radio/tv', 'Gender',
       'Age', 'CreditStatus']
In [6]:
X = German_df.iloc[:, :-1]
y = German_df['CreditStatus']
X.head()
y.head()
Out[6]:
CurrentAcc_None NumMonths CreditHistory_Delay CreditHistory_none/paid Collateral_savings/life_insurance CurrentAcc_GE200 Purpose_repairs Purpose_radio/tv Gender Age
0 0 6 0 0 0 0 0 1 1 1
1 0 48 0 1 0 0 0 1 0 0
2 1 12 0 0 0 0 0 0 1 1
3 0 42 0 1 1 0 0 0 1 1
4 0 24 1 0 0 0 0 0 1 1
Out[6]:
0    1
1    0
2    1
3    1
4    0
Name: CreditStatus, dtype: int64

from imblearn.over_sampling import ADASYN from collections import Counter

ada = ADASYN(random_state=40) print('Original dataset shape {}'.format(Counter(y))) X_res, y_res = ada.fit_resample(X,y) print('Resampled dataset shape {}'.format(Counter(y_res)))

German_df=X = pd.DataFrame(np.column_stack((X_res, y_res)))

In [7]:
German_df.head()
Out[7]:
CurrentAcc_None NumMonths CreditHistory_Delay CreditHistory_none/paid Collateral_savings/life_insurance CurrentAcc_GE200 Purpose_repairs Purpose_radio/tv Gender Age CreditStatus
0 0 6 0 0 0 0 0 1 1 1 1
1 0 48 0 1 0 0 0 1 0 0 0
2 1 12 0 0 0 0 0 0 1 1 1
3 0 42 0 1 1 0 0 0 1 1 1
4 0 24 1 0 0 0 0 0 1 1 0

German_df.columns=feature_list German_df.head()

Metrics to calculate model fairness necessary libraries

In [8]:
from aif360.datasets import GermanDataset
from aif360.metrics import BinaryLabelDatasetMetric

def fair_metrics(fname, dataset, pred, pred_is_dataset=False):
    filename = fname
    if pred_is_dataset:
        dataset_pred = pred
    else:
        dataset_pred = dataset.copy()
        dataset_pred.labels = pred

    cols = ['Accuracy', 'F1', 'DI','SPD', 'EOD', 'AOD', 'ERD', 'CNT', 'TI']
    obj_fairness = [[1,1,1,0,0,0,0,1,0]]

    fair_metrics = pd.DataFrame(data=obj_fairness, index=['objective'], columns=cols)

    for attr in dataset_pred.protected_attribute_names:
        idx = dataset_pred.protected_attribute_names.index(attr)
        privileged_groups =  [{attr:dataset_pred.privileged_protected_attributes[idx][0]}]
        unprivileged_groups = [{attr:dataset_pred.unprivileged_protected_attributes[idx][0]}]

        classified_metric = ClassificationMetric(dataset,
                                                     dataset_pred,
                                                     unprivileged_groups=unprivileged_groups,
                                                     privileged_groups=privileged_groups)

        metric_pred = BinaryLabelDatasetMetric(dataset_pred,
                                                     unprivileged_groups=unprivileged_groups,
                                                     privileged_groups=privileged_groups)

        distortion_metric = SampleDistortionMetric(dataset,
                                                     dataset_pred,
                                                     unprivileged_groups=unprivileged_groups,
                                                     privileged_groups=privileged_groups)

        acc = classified_metric.accuracy()
        f1_sc = 2 * (classified_metric.precision() * classified_metric.recall()) / (classified_metric.precision() + classified_metric.recall())

        mt = [acc, f1_sc,
                        classified_metric.disparate_impact(),
                        classified_metric.mean_difference(),
                        classified_metric.equal_opportunity_difference(),
                        classified_metric.average_odds_difference(),
                        classified_metric.error_rate_difference(),
                        metric_pred.consistency(),
                        classified_metric.theil_index()
                    ]
        w_row = []
        print('Computing fairness of the model.')
        for i in mt:
            #print("%.8f"%i)
            w_row.append("%.8f"%i)
        with open(filename, 'a') as csvfile:
            csvwriter = csv.writer(csvfile)
            csvwriter.writerow(w_row)
        row = pd.DataFrame([mt],
                           columns  = cols,
                           index = [attr]
                          )
        fair_metrics = fair_metrics.append(row)
    fair_metrics = fair_metrics.replace([-np.inf, np.inf], 2)
    return fair_metrics

def get_fair_metrics_and_plot(fname, data, model, plot=False, model_aif=False):
    pred = model.predict(data).labels if model_aif else model.predict(data.features)
    fair = fair_metrics(fname, data, pred)
    if plot:
        pass

    return fair

def get_model_performance(X_test, y_true, y_pred, probs):
    accuracy = accuracy_score(y_true, y_pred)
    matrix = confusion_matrix(y_true, y_pred)
    f1 = f1_score(y_true, y_pred)
    return accuracy, matrix, f1

def plot_model_performance(model, X_test, y_true):
    y_pred = model.predict(X_test)
    probs = model.predict_proba(X_test)
    accuracy, matrix, f1 = get_model_performance(X_test, y_true, y_pred, probs)

Local file to load metric values

In [9]:
filename= 'C:/Users/krish/Downloads/main_pjt_final - Copy/may18/filename_mainpjt_results_gender_may18_upd.csv'

Converting data to aif compatible format

Since we are dealing with binary label dataset we are using aif360 class BiaryLabelDataset here with target label as CreditStatus and protected attributes as age,gender,marital status. Refer part 11 for more details on protected attributes and privileged classes.

In [10]:
# Fairness metrics
from aif360.metrics import BinaryLabelDatasetMetric
from aif360.explainers import MetricTextExplainer
from aif360.metrics import ClassificationMetric
# Get DF into IBM format
from aif360 import datasets
#converting to aif dataset
aif_dataset = datasets.BinaryLabelDataset(favorable_label = 1, unfavorable_label = 0, df=German_df,
                                                      label_names=["CreditStatus"],
                                                     protected_attribute_names=["Gender"],
                                              privileged_protected_attributes = [1])
In [11]:
#dataset_orig = GermanDataset(protected_attribute_names=['sex'],
#                            privileged_classes=[[1]],
#                            features_to_keep=['age', 'sex', 'employment', 'housing', 'savings', 'credit_amount', 'month', 'purpose'],
#                            custom_preprocessing=custom_preprocessing)

Splitting data to train and test sets

In [12]:
#privileged_groups = [{'Age':1},{' Gender': 1},{'Marital_Status':1}]
#unprivileged_groups = [{'Age':0},{'Gender': 0},{'Marital_Status':0}]
In [13]:
privileged_groups = [{'Gender': 1}]
unprivileged_groups = [{'Gender': 0}]
In [14]:
data_orig_train, data_orig_test = aif_dataset.split([0.8], shuffle=True)

X_train = data_orig_train.features
y_train = data_orig_train.labels.ravel()

X_test = data_orig_test.features
y_test = data_orig_test.labels.ravel()
In [15]:
X_train.shape
X_test.shape
Out[15]:
(800, 10)
Out[15]:
(200, 10)
In [16]:
data_orig_test.labels[:10].ravel()
Out[16]:
array([1., 0., 1., 0., 1., 0., 1., 1., 1., 1.])
In [17]:
data_orig_train.labels[:10].ravel()
Out[17]:
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 0.])

Testing bias with respect to protected variable

In [18]:
metric_orig_train = BinaryLabelDatasetMetric(data_orig_train, 
                                             unprivileged_groups=unprivileged_groups,
                                             privileged_groups=privileged_groups)
print("Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_orig_train.mean_difference())
Difference in mean outcomes between unprivileged and privileged groups = -0.115809

A non zero value indicates bias.

Building ML model

Considering ensemble models for our study.

1. RANDOM FOREST CLASSIFIER MODEL

In [19]:
#Seting the Hyper Parameters
param_grid = {"max_depth": [3,5,7, 10,None],
              "n_estimators":[3,5,10,25,50,150],
              "max_features": [4,7,15,20]}
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import GridSearchCV
#Creating the classifier
rf_model = RandomForestClassifier(random_state=40)
grid_search = GridSearchCV(rf_model, param_grid=param_grid, cv=5, scoring='recall', verbose=0)
model_rf = grid_search
In [20]:
mdl_rf = model_rf.fit(data_orig_train.features, data_orig_train.labels.ravel())
In [21]:
from sklearn.metrics import confusion_matrix
conf_mat_rf = confusion_matrix(data_orig_test.labels.ravel(), model_rf.predict(data_orig_test.features))
conf_mat_rf
from sklearn.metrics import accuracy_score
print(accuracy_score(data_orig_test.labels.ravel(), model_rf.predict(data_orig_test.features)))
Out[21]:
array([[  4,  59],
       [  1, 136]], dtype=int64)
0.7
In [22]:
unique, counts = np.unique(data_orig_test.labels.ravel(), return_counts=True)
dict(zip(unique, counts))
Out[22]:
{0.0: 63, 1.0: 137}

1.a. Feature importance of model

In [23]:
importances = model_rf.best_estimator_.feature_importances_
indices = np.argsort(importances)
features = data_orig_train.feature_names
#https://stackoverflow.com/questions/48377296/get-feature-importance-from-gridsearchcv
In [24]:
importances
Out[24]:
array([0.45669439, 0.21364021, 0.01568385, 0.09049872, 0.00545895,
       0.01824632, 0.00670235, 0.05110462, 0.04135576, 0.10061484])
In [25]:
importances[indices]
Out[25]:
array([0.00545895, 0.00670235, 0.01568385, 0.01824632, 0.04135576,
       0.05110462, 0.09049872, 0.10061484, 0.21364021, 0.45669439])
In [26]:
features
Out[26]:
['CurrentAcc_None',
 'NumMonths',
 'CreditHistory_Delay',
 'CreditHistory_none/paid',
 'Collateral_savings/life_insurance',
 'CurrentAcc_GE200',
 'Purpose_repairs',
 'Purpose_radio/tv',
 'Gender',
 'Age']
In [27]:
plt.figure(figsize=(20,30))
plt.title('Feature Importances')
plt.barh(range(len(indices)), importances[indices], color='b', align='center')
plt.yticks(range(len(indices)), [features[i] for i in indices])
plt.xlabel('Relative Importance')
plt.show()
Out[27]:
<Figure size 1440x2160 with 0 Axes>
Out[27]:
Text(0.5, 1.0, 'Feature Importances')
Out[27]:
<BarContainer object of 10 artists>
Out[27]:
([<matplotlib.axis.YTick at 0x2551c4a0c48>,
  <matplotlib.axis.YTick at 0x2551c4a4648>,
  <matplotlib.axis.YTick at 0x2551c4abfc8>,
  <matplotlib.axis.YTick at 0x2551c4f7ec8>,
  <matplotlib.axis.YTick at 0x2551c4f7748>,
  <matplotlib.axis.YTick at 0x2551c4e1f08>,
  <matplotlib.axis.YTick at 0x2551c4f7a08>,
  <matplotlib.axis.YTick at 0x2551c4fed88>,
  <matplotlib.axis.YTick at 0x2551c4fec08>,
  <matplotlib.axis.YTick at 0x2551c51dac8>],
 [Text(0, 0, 'Collateral_savings/life_insurance'),
  Text(0, 0, 'Purpose_repairs'),
  Text(0, 0, 'CreditHistory_Delay'),
  Text(0, 0, 'CurrentAcc_GE200'),
  Text(0, 0, 'Gender'),
  Text(0, 0, 'Purpose_radio/tv'),
  Text(0, 0, 'CreditHistory_none/paid'),
  Text(0, 0, 'Age'),
  Text(0, 0, 'NumMonths'),
  Text(0, 0, 'CurrentAcc_None')])
Out[27]:
Text(0.5, 0, 'Relative Importance')

1.b. Model Explainability/interpretability

1.b.1 Using SHAP (SHapley Additive exPlanations)

In [28]:
import shap

Test data interpretation

In [29]:
rf_explainer = shap.KernelExplainer(model_rf.predict, data_orig_test.features)
rf_shap_values = rf_explainer.shap_values(data_orig_test.features,nsamples=50)
#https://towardsdatascience.com/explain-any-models-with-the-shap-values-use-the-kernelexplainer-79de9464897a
Using 200 background data samples could cause slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to summarize the background as K samples.
In [30]:
rf_shap_values
Out[30]:
array([[-0.09145515, -0.05341869,  0.        , ...,  0.06919924,
         0.10000652, -0.0950095 ],
       [ 0.        , -0.02772054,  0.        , ...,  0.01111922,
         0.00604221,  0.03555911],
       [ 0.        ,  0.0130656 ,  0.        , ...,  0.00600523,
         0.00119922,  0.0090243 ],
       ...,
       [ 0.00903226,  0.01234167,  0.        , ..., -0.00694954,
         0.00357798,  0.00699763],
       [ 0.        ,  0.00358795,  0.00033193, ..., -0.00836003,
         0.00382242,  0.01152767],
       [ 0.        , -0.04180513,  0.        , ..., -0.01309524,
         0.        ,  0.07241472]])
In [31]:
rf_explainer.expected_value
Out[31]:
0.9749999999999999
In [32]:
y_test_predict=model_rf.predict(data_orig_test.features)
y_test_predict[:12]
data_orig_test.labels[:12].ravel()
data_orig_test.features[:2,:]
Out[32]:
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Out[32]:
array([1., 0., 1., 0., 1., 0., 1., 1., 1., 1., 1., 1.])
Out[32]:
array([[ 0., 36.,  0.,  1.,  0.,  1.,  0.,  1.,  1.,  0.],
       [ 0., 36.,  0.,  1.,  0.,  0.,  0.,  1.,  1.,  1.]])
In [33]:
y_test_predict.mean()
Out[33]:
0.975

The explainer expected value is the average model predicted value on input data. Shapely helps to understand how individual features impact the output of each individual instance. The shapely values are model predicted values which may not coincide with actual y test values due to prediction error.

link=”logit” argument converts the logit values to probability

In [34]:
shap.initjs()
shap.force_plot(rf_explainer.expected_value,rf_shap_values[0],data_orig_test.features[0],data_orig_test.feature_names,link='logit')
#https://github.com/slundberg/shap
#https://github.com/slundberg/shap/issues/279
#https://github.com/slundberg/shap/issues/977
shap.initjs()
shap.force_plot(rf_explainer.expected_value,rf_shap_values[0],data_orig_test.features[0],data_orig_test.feature_names)
Out[34]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
Out[34]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.

Features in blue pushes the base value towards lowest values and features in red moves base levels towards higher values.

Shapley values calculate the importance of a feature by comparing what a model predicts with and without the feature. However, since the order in which a model sees features can affect its predictions, this is done in every possible order, so that the features are fairly compared.

The SHAP plot shows features that contribute to pushing the output from the base value (average model output) to the actual predicted value.

In [35]:
shap.initjs()
shap.force_plot(rf_explainer.expected_value,rf_shap_values[1], data_orig_test.features[1],data_orig_test.feature_names,link='logit')
shap.initjs()
shap.force_plot(rf_explainer.expected_value,rf_shap_values[1], data_orig_test.features[1],data_orig_test.feature_names)
Out[35]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
Out[35]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [36]:
data_orig_test.feature_names
Out[36]:
['CurrentAcc_None',
 'NumMonths',
 'CreditHistory_Delay',
 'CreditHistory_none/paid',
 'Collateral_savings/life_insurance',
 'CurrentAcc_GE200',
 'Purpose_repairs',
 'Purpose_radio/tv',
 'Gender',
 'Age']
In [37]:
shap.force_plot(rf_explainer.expected_value,
                rf_shap_values, data_orig_test.features[:,:],feature_names = data_orig_test.feature_names)
Out[37]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [38]:
p = shap.summary_plot(rf_shap_values, data_orig_test.features, feature_names=data_orig_test.feature_names,plot_type="bar") 
display(p)
None

Variables with higher impact are displayed at the credit history, credit amount,num of months.

In [39]:
shap.decision_plot(rf_explainer.expected_value, rf_shap_values,feature_names=data_orig_test.feature_names)
  • The x-axis represents the model's output. In this case, the units are log odds.
  • The plot is centered on the x-axis at explainer.expected_value.
  • All SHAP values are relative to the model's expected value like a linear model's effects are relative to the intercept.
  • The y-axis lists the model's features.
  • By default, the features are ordered by descending importance. The importance is calculated over the observations plotted. This is usually different than the importance ordering for the entire dataset.
  • In addition to feature importance ordering, the decision plot also supports hierarchical cluster feature ordering and user-defined feature ordering.
  • Each observation's prediction is represented by a colored line. At the top of the plot, each line strikes the x-axis at its corresponding observation's predicted value. This value determines the color of the line on a spectrum.
  • Moving from the bottom of the plot to the top, SHAP values for each feature are added to the model's base value. This shows how each feature contributes to the overall prediction.
  • At the bottom of the plot, the observations converge at explainer.expected_value https://slundberg.github.io/shap/notebooks/plots/decision_plot.html

Like the force plot, the decision plot supports link='logit' to transform log odds to probabilities.

In [40]:
shap.decision_plot(rf_explainer.expected_value, rf_shap_values,feature_names=data_orig_test.feature_names,link='logit')
In [41]:
shap.plots._waterfall.waterfall_legacy(rf_explainer.expected_value, rf_shap_values[0],feature_names=data_orig_test.feature_names)

For first instace of input,out of all the displayed variables, CreditHistory is playing major role is pushing the target variable outcome towards predicting 1.

Interpretation of graph: https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html

f(x)- model output impacted by features; E(f(x))- expected output.

One the fundemental properties of Shapley values is that they always sum up to the difference between the game outcome when all players are present and the game outcome when no players are present. For machine learning models this means that SHAP values of all the input features will always sum up to the difference between baseline (expected) model output and the current model output for the prediction being explained.

Shapley values calculate the importance of a feature by comparing what a model predicts with and without the feature. However, since the order in which a model sees features can affect its predictions, this is done in every possible order, so that the features are fairly compared. https://medium.com/@gabrieltseng/interpreting-complex-models-with-shap-values-1c187db6ec83

In [42]:
shap.plots._waterfall.waterfall_legacy(rf_explainer.expected_value, rf_shap_values[1],feature_names=data_orig_test.feature_names)

For second instace of input,out of all the displayed variables, credit history is playing major role is pushing the target variable outcome towards predicting 1.

1.b.2 Using ELI5

In [43]:
#!pip install eli5
import eli5
In [44]:
from eli5.sklearn import PermutationImportance
In [45]:
perm_rf = PermutationImportance(mdl_rf).fit(data_orig_test.features, data_orig_test.labels.ravel())

Feature Importance

In [46]:
perm_imp_1=eli5.show_weights(perm_rf,feature_names = data_orig_test.feature_names)
perm_imp_1
plt.show()
Out[46]:
Weight Feature
0.0263 ± 0.0198 NumMonths
0.0146 ± 0.0131 Purpose_radio/tv
0.0102 ± 0.0117 Age
0.0073 ± 0.0092 Gender
0.0073 ± 0.0000 CurrentAcc_GE200
0.0029 ± 0.0072 CreditHistory_none/paid
0.0015 ± 0.0058 CurrentAcc_None
0 ± 0.0000 Purpose_repairs
0 ± 0.0000 Collateral_savings/life_insurance
0 ± 0.0000 CreditHistory_Delay
  • eli5 provides a way to compute feature importances for any black-box estimator by measuring how score decreases when a feature is not available; the method is also known as “permutation importance” or “Mean Decrease Accuracy (MDA)”.
  • The first number in each row shows how much model performance decreased with a random shuffling (in this case, using "accuracy" as the performance metric).

  • Like most things in data science, there is some randomness to the exact performance change from a shuffling a column. We measure the amount of randomness in our permutation importance calculation by repeating the process with multiple shuffles. The number after the ± measures how performance varied from one-reshuffling to the next.

  • You'll occasionally see negative values for permutation importances. In those cases, the predictions on the shuffled (or noisy) data happened to be more accurate than the real data. This happens when the feature didn't matter (should have had an importance close to 0), but random chance caused the predictions on shuffled data to be more accurate. This is more common with small datasets, like the one in this example, because there is more room for luck/chance.

https://www.kaggle.com/dansbecker/permutation-importance

1.c. Measuring fairness

Of Baseline model

In [47]:
import pandas as pd
import csv
import os
import numpy as np
import sys
from aif360.metrics import *
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve, auc
plot_model_performance(mdl_rf, X_test, y_test)
In [48]:
fair_rf = get_fair_metrics_and_plot(filename, data_orig_test, mdl_rf)
fair_rf
Computing fairness of the model.
Out[48]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.0 1.000000 1.000000 0.000000 0.00000 0.000000 0.000000 1.000 0.000000
Gender 0.7 0.819277 0.957562 -0.041857 -0.02439 -0.069118 -0.106545 0.975 0.062379
In [49]:
type(data_orig_train)
Out[49]:
aif360.datasets.binary_label_dataset.BinaryLabelDataset

PRE PROCESSING

In [50]:
### Reweighing
from aif360.algorithms.preprocessing import Reweighing

RW_rf = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)

data_transf_train_rf_rw = RW_rf.fit_transform(data_orig_train)
#train and save model
rf_transf_rw = model_rf.fit(data_transf_train_rf_rw.features,
                     data_transf_train_rf_rw.labels.ravel())

data_transf_test_rf_rw = RW_rf.transform(data_orig_test)
fair_rf_rw = get_fair_metrics_and_plot(filename, data_transf_test_rf_rw, rf_transf_rw, plot=False)
Computing fairness of the model.
In [51]:
metric_transf_train = BinaryLabelDatasetMetric(data_transf_train_rf_rw, 
                                               unprivileged_groups=unprivileged_groups,
                                               privileged_groups=privileged_groups)
print("Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_transf_train.mean_difference())
Difference in mean outcomes between unprivileged and privileged groups = -0.000000
In [52]:
fair_rf_rw
Out[52]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000000 1.000000 1.000000 0.000000 0.00000 0.000000 0.000000 1.000 0.000000
Gender 0.684158 0.808162 0.966952 -0.032539 -0.02439 -0.069118 -0.195898 0.975 0.062379
In [53]:
from aif360.algorithms.preprocessing import DisparateImpactRemover

DIR_rf = DisparateImpactRemover()
data_transf_train_rf_dir = DIR_rf.fit_transform(data_orig_train)

# Train and save the model
rf_transf_dir = model_rf.fit(data_transf_train_rf_dir.features,data_transf_train_rf_dir.labels.ravel())
In [54]:
fair_dir_rf_dir = get_fair_metrics_and_plot(filename,data_orig_test, rf_transf_dir, plot=False)
fair_dir_rf_dir
Computing fairness of the model.
Out[54]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.00000 0.000000 0.000000 1.000 0.000000
Gender 0.705 0.821752 0.964258 -0.035008 -0.02439 -0.059118 -0.099696 0.973 0.062101

INPROCESSING

In [55]:
#!pip install --user --upgrade tensorflow==1.15.0
#2.2.0
#!pip uninstall tensorflow
In [56]:
#!pip install "tensorflow==1.15"
#!pip install --upgrade tensorflow-hub
In [57]:
#%tensorflow_version 1.15
import tensorflow  as tf
#from tensorflow.compat.v1 import variable_scope
print('Using TensorFlow version', tf.__version__)
Using TensorFlow version 1.15.0
In [58]:
#sess = tf.compat.v1.Session()
#import tensorflow as tf

sess = tf.compat.v1.Session()
In [59]:
#import tensorflow as tf
#sess = tf.Session()
tf.compat.v1.reset_default_graph()
In [60]:
from aif360.algorithms.inprocessing.adversarial_debiasing import AdversarialDebiasing
#with tf.variable_scope('debiased_classifier',reuse=tf.AUTO_REUSE):
with tf.compat.v1.Session() as sess:
    with tf.variable_scope('scope1',reuse=tf.AUTO_REUSE) as scope:
        debiased_model_rf_ad = AdversarialDebiasing(privileged_groups = privileged_groups,
                          unprivileged_groups = unprivileged_groups,
                          scope_name=scope,
                          num_epochs=10,
                          debias=True,
                          sess=sess)
#train and save the model
        debiased_model_rf_ad.fit(data_orig_train)
        fair_rf_ad = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_rf_ad, plot=False, model_aif=True)
WARNING:tensorflow:From C:\Users\krish\Anaconda3\lib\site-packages\aif360\algorithms\inprocessing\adversarial_debiasing.py:89: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.
Instructions for updating:
Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.
WARNING:tensorflow:From C:\Users\krish\Anaconda3\lib\site-packages\tensorflow_core\python\ops\nn_impl.py:183: where (from tensorflow.python.ops.array_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.where in 2.0, which has the same broadcast rule as np.where
epoch 0; iter: 0; batch classifier loss: 0.823524; batch adversarial loss: 0.732953
epoch 1; iter: 0; batch classifier loss: 0.716969; batch adversarial loss: 0.723881
epoch 2; iter: 0; batch classifier loss: 0.855076; batch adversarial loss: 0.739134
epoch 3; iter: 0; batch classifier loss: 0.757367; batch adversarial loss: 0.719561
epoch 4; iter: 0; batch classifier loss: 0.686314; batch adversarial loss: 0.722460
epoch 5; iter: 0; batch classifier loss: 0.646053; batch adversarial loss: 0.724609
epoch 6; iter: 0; batch classifier loss: 0.618586; batch adversarial loss: 0.722826
epoch 7; iter: 0; batch classifier loss: 0.670463; batch adversarial loss: 0.719570
epoch 8; iter: 0; batch classifier loss: 0.597921; batch adversarial loss: 0.713389
epoch 9; iter: 0; batch classifier loss: 0.589259; batch adversarial loss: 0.713714
Out[60]:
<aif360.algorithms.inprocessing.adversarial_debiasing.AdversarialDebiasing at 0x2551ec9a308>
Computing fairness of the model.
In [61]:
fair_rf_ad
Out[61]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.00000 0.000000 0.000000 1 0.000000
Gender 0.69 0.813253 0.907407 -0.092593 -0.04878 -0.139775 -0.120244 [0.977] 0.067752
In [62]:
from aif360.algorithms.inprocessing import PrejudiceRemover
debiased_model_pr_rf = PrejudiceRemover()

# Train and save the model
debiased_model_pr_rf.fit(data_orig_train)

fair_rf_pr = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_pr_rf, plot=False, model_aif=True)
fair_rf_pr
Out[62]:
<aif360.algorithms.inprocessing.prejudice_remover.PrejudiceRemover at 0x2551dc7a188>
Computing fairness of the model.
Out[62]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.71 0.811688 0.798822 -0.181887 -0.153455 -0.245958 -0.042111 [0.893] 0.115516
#
In [63]:
y_pred = debiased_model_pr_rf.predict(data_orig_test)


data_orig_test_pred = data_orig_test.copy(deepcopy=True)
In [64]:
# Prediction with the original RandomForest model
scores = np.zeros_like(data_orig_test.labels)
scores = mdl_rf.predict_proba(data_orig_test.features)[:,1].reshape(-1,1)
data_orig_test_pred.scores = scores

preds = np.zeros_like(data_orig_test.labels)
preds = mdl_rf.predict(data_orig_test.features).reshape(-1,1)
data_orig_test_pred.labels = preds

def format_probs(probs1):
    probs1 = np.array(probs1)
    probs0 = np.array(1-probs1)
    return np.concatenate((probs0, probs1), axis=1)

POST PROCESSING

In [65]:
from aif360.algorithms.postprocessing import EqOddsPostprocessing
EOPP_rf = EqOddsPostprocessing(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups,
                             seed=40)
EOPP_rf = EOPP_rf.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_rf_eopp = EOPP_rf.predict(data_orig_test_pred)
fair_rf_eo = fair_metrics(filename, data_orig_test, data_transf_test_pred_rf_eopp, pred_is_dataset=True)
Computing fairness of the model.
In [66]:
fair_rf_eo
Out[66]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.00000 1.000000 0.00000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.695 0.81571 0.990089 -0.00964 -0.013974 -0.015448 -0.088026 [0.975] 0.067496
In [67]:
from aif360.algorithms.postprocessing import CalibratedEqOddsPostprocessing
cost_constraint = "fnr"
CPP_rf = CalibratedEqOddsPostprocessing(privileged_groups = privileged_groups,
                                     unprivileged_groups = unprivileged_groups,
                                     cost_constraint=cost_constraint,
                                     seed=42)

CPP_rf = CPP_rf.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_rf_cpp = CPP_rf.predict(data_orig_test_pred)
fair_rf_ceo = fair_metrics(filename, data_orig_test, data_transf_test_pred_rf_cpp, pred_is_dataset=True)
Computing fairness of the model.
In [68]:
fair_rf_ceo
Out[68]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.00000 0.000000 0.000000 1 0.000000
Gender 0.69 0.814371 0.944444 -0.055556 -0.02439 -0.089118 -0.120244 [0.985] 0.062882
In [69]:
from aif360.algorithms.postprocessing import RejectOptionClassification
ROC_rf = RejectOptionClassification(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups)

ROC_rf = ROC_rf.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_rf_roc = ROC_rf.predict(data_orig_test_pred)
fair_rf_roc = fair_metrics(filename, data_orig_test, data_transf_test_pred_rf_roc, pred_is_dataset=True)
print('SUCCESS: completed 1 model.')
Computing fairness of the model.
SUCCESS: completed 1 model.
In [70]:
fair_rf_roc
Out[70]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.67 0.725 0.960526 -0.020548 -0.070884 -0.050057 0.055302 [0.8400000000000002] 0.319948

2. XGBoost Classifier

In [71]:
from xgboost import XGBClassifier
estimator = XGBClassifier(seed=40)

parameters = {
    'max_depth': range (2, 10, 2),
    'n_estimators': range(60, 240, 40),
    'learning_rate': [0.1, 0.01, 0.05]
}
grid_search = GridSearchCV(
    estimator=estimator,
    param_grid=parameters,
    scoring = 'recall',
    
    cv = 5,
    verbose=0
)

model_xg=grid_search
In [72]:
mdl_xgb = model_xg.fit(data_orig_train.features, data_orig_train.labels.ravel())
In [73]:
conf_mat_xg = confusion_matrix(data_orig_test.labels.ravel(), model_xg.predict(data_orig_test.features))
conf_mat_xg
from sklearn.metrics import accuracy_score
print(accuracy_score(data_orig_test.labels.ravel(), model_xg.predict(data_orig_test.features)))
Out[73]:
array([[ 13,  50],
       [  4, 133]], dtype=int64)
0.73

2.a. Feature importance of model

In [74]:
importances_xg = model_xg.best_estimator_.feature_importances_
indices_xg = np.argsort(importances_xg)
features = data_orig_train.feature_names
#https://stackoverflow.com/questions/48377296/get-feature-importance-from-gridsearchcv
In [75]:
importances_xg
Out[75]:
array([0.5974188 , 0.10316839, 0.        , 0.13026263, 0.        ,
       0.        , 0.        , 0.03108615, 0.10795941, 0.03010464],
      dtype=float32)
In [76]:
importances_xg[indices_xg]
Out[76]:
array([0.        , 0.        , 0.        , 0.        , 0.03010464,
       0.03108615, 0.10316839, 0.10795941, 0.13026263, 0.5974188 ],
      dtype=float32)
In [77]:
features
Out[77]:
['CurrentAcc_None',
 'NumMonths',
 'CreditHistory_Delay',
 'CreditHistory_none/paid',
 'Collateral_savings/life_insurance',
 'CurrentAcc_GE200',
 'Purpose_repairs',
 'Purpose_radio/tv',
 'Gender',
 'Age']
In [78]:
plt.figure(figsize=(20,30))
plt.title('Feature Importances')
plt.barh(range(len(indices_xg)), importances_xg[indices_xg], color='b', align='center')
plt.yticks(range(len(indices_xg)), [features[i] for i in indices_xg])
plt.xlabel('Relative Importance')
plt.show()
Out[78]:
<Figure size 1440x2160 with 0 Axes>
Out[78]:
Text(0.5, 1.0, 'Feature Importances')
Out[78]:
<BarContainer object of 10 artists>
Out[78]:
([<matplotlib.axis.YTick at 0x2552208a7c8>,
  <matplotlib.axis.YTick at 0x25522097588>,
  <matplotlib.axis.YTick at 0x255220c8108>,
  <matplotlib.axis.YTick at 0x255220f6248>,
  <matplotlib.axis.YTick at 0x255220f65c8>,
  <matplotlib.axis.YTick at 0x255220f6c08>,
  <matplotlib.axis.YTick at 0x255220fc248>,
  <matplotlib.axis.YTick at 0x255220fc608>,
  <matplotlib.axis.YTick at 0x255220fcec8>,
  <matplotlib.axis.YTick at 0x25522101548>],
 [Text(0, 0, 'CreditHistory_Delay'),
  Text(0, 0, 'Collateral_savings/life_insurance'),
  Text(0, 0, 'CurrentAcc_GE200'),
  Text(0, 0, 'Purpose_repairs'),
  Text(0, 0, 'Age'),
  Text(0, 0, 'Purpose_radio/tv'),
  Text(0, 0, 'NumMonths'),
  Text(0, 0, 'Gender'),
  Text(0, 0, 'CreditHistory_none/paid'),
  Text(0, 0, 'CurrentAcc_None')])
Out[78]:
Text(0.5, 0, 'Relative Importance')

2.b. Model Explainability/interpretability

2.b.1 Using SHAP (SHapley Additive exPlanations)

In [79]:
import shap
xg_shap_values_t1 = shap.KernelExplainer(mdl_xgb.predict,data_orig_train.features)
Using 800 background data samples could cause slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to summarize the background as K samples.

Test data interpretation

In [80]:
xgb_explainer = shap.KernelExplainer(mdl_xgb.predict, data_orig_test.features)
xgb_shap_values = xgb_explainer.shap_values(data_orig_test.features,nsamples=10)
#https://towardsdatascience.com/explain-any-models-with-the-shap-values-use-the-kernelexplainer-79de9464897a
Using 200 background data samples could cause slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to summarize the background as K samples.
In [81]:
xgb_shap_values
Out[81]:
array([[ 0.        , -0.915     ,  0.        , ...,  0.        ,
         0.        ,  0.        ],
       [-0.15428571, -0.51428571,  0.        , ...,  0.        ,
         0.        ,  0.        ],
       [ 0.        ,  0.085     ,  0.        , ...,  0.        ,
         0.        ,  0.        ],
       ...,
       [ 0.0425    ,  0.0425    ,  0.        , ...,  0.        ,
         0.        ,  0.        ],
       [ 0.        ,  0.085     ,  0.        , ...,  0.        ,
         0.        ,  0.        ],
       [ 0.        , -0.18125   ,  0.        , ...,  0.        ,
         0.        ,  0.        ]])
In [82]:
shap.initjs()
shap.force_plot(xgb_explainer.expected_value,xgb_shap_values[0,:], data_orig_test.features[0],data_orig_test.feature_names,link='logit')
#https://github.com/slundberg/shap
#https://github.com/slundberg/shap/issues/279
Out[82]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [83]:
shap.initjs()
shap.force_plot(xgb_explainer.expected_value,xgb_shap_values[1,:], data_orig_test.features[1],data_orig_test.feature_names,link='logit')
Out[83]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [84]:
shap.force_plot(xgb_explainer.expected_value,
                xgb_shap_values, data_orig_test.features[:,:],feature_names = data_orig_test.feature_names)
Out[84]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [85]:
p = shap.summary_plot(xgb_shap_values, data_orig_test.features, feature_names=data_orig_test.feature_names,plot_type="bar") 
display(p)
None

The variables with higher impact are the ones in the top.

In [86]:
shap.plots._waterfall.waterfall_legacy(xgb_explainer.expected_value, xgb_shap_values[0,:],feature_names=data_orig_test.feature_names)

Here credit history none/paid is moving target outcome towards right i.e., 1.

Interpretation of graph: https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html

f(x)- model output impacted by features; E(f(x))- expected output.

One the fundemental properties of Shapley values is that they always sum up to the difference between the game outcome when all players are present and the game outcome when no players are present. For machine learning models this means that SHAP values of all the input features will always sum up to the difference between baseline (expected) model output and the current model output for the prediction being explained.

In [87]:
shap.plots._waterfall.waterfall_legacy(xgb_explainer.expected_value, xgb_shap_values[1],feature_names=data_orig_test.feature_names)

Here Credit History and Age are moving the target result towards right.

2.b.2 Using ELI5

In [88]:
#!pip install eli5
import eli5
from eli5.sklearn import PermutationImportance
In [89]:
perm_xgb = PermutationImportance(mdl_xgb).fit(data_orig_test.features, data_orig_test.labels.ravel())

Feature Importance

In [90]:
perm_imp_2=eli5.show_weights(perm_xgb,feature_names = data_orig_test.feature_names)
perm_imp_2
plt.show()
Out[90]:
Weight Feature
0.0380 ± 0.0194 NumMonths
0.0204 ± 0.0312 CurrentAcc_None
0.0044 ± 0.0072 CreditHistory_none/paid
0 ± 0.0000 Age
0 ± 0.0000 Gender
0 ± 0.0000 Purpose_radio/tv
0 ± 0.0000 Purpose_repairs
0 ± 0.0000 CurrentAcc_GE200
0 ± 0.0000 Collateral_savings/life_insurance
0 ± 0.0000 CreditHistory_Delay

2.c. Measuring fairness

Of Baseline model

In [91]:
import pandas as pd
import csv
import os
import numpy as np
import sys
from aif360.metrics import *
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve, auc
plot_model_performance(mdl_xgb, X_test, y_test)
In [92]:
fair_xg = get_fair_metrics_and_plot(filename, data_orig_test, model_xg)
fair_xg
Computing fairness of the model.
Out[92]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.00000 1.00000 0.000000 0.00000 0.000000 0.000000 1.000 0.000000
Gender 0.73 0.83125 1.01643 0.014967 0.00686 -0.011955 -0.090817 0.963 0.074753

PRE PROCESSING

In [93]:
### Reweighing
from aif360.algorithms.preprocessing import Reweighing

RW_xg = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)

data_transf_train_xg_rw = RW_xg.fit_transform(data_orig_train)

#train and save model
xg_transf_rw = model_xg.fit(data_transf_train_xg_rw.features,
                     data_transf_train_xg_rw.labels.ravel())

data_transf_test_xg_rw = RW_xg.transform(data_orig_test)
fair_xg_rw = get_fair_metrics_and_plot(filename, data_transf_test_xg_rw, xg_transf_rw, plot=False)
Computing fairness of the model.
In [94]:
fair_xg_rw
Out[94]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00000 1.00000 1.000000 0.000000 0.00000 0.000000 0.000000 1.000 0.000000
Gender 0.71875 0.82244 1.037968 0.034313 0.00686 -0.011955 -0.167469 0.963 0.074753
In [95]:
from aif360.algorithms.preprocessing import DisparateImpactRemover

DIR_xg = DisparateImpactRemover()
data_transf_train_xg_dir = DIR_xg.fit_transform(data_orig_train)

# Train and save the model
xg_transf_dir = model_xg.fit(data_transf_train_xg_dir.features,data_transf_train_xg_dir.labels.ravel())
In [96]:
fair_dir_xg_dir = get_fair_metrics_and_plot(filename,data_orig_test, xg_transf_dir, plot=False)
fair_dir_xg_dir
Computing fairness of the model.
Out[96]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.00000 1.000000 0.000000 0.000000 0.00000 0.000000 1.000 0.000000
Gender 0.73 0.83125 0.961317 -0.035769 -0.027947 -0.07782 -0.090817 0.942 0.074753

INPROCESSING

In [97]:
#!pip install --user --upgrade tensorflow==1.15.0
#2.2.0
#!pip uninstall tensorflow
In [98]:
#!pip install "tensorflow==1.15"
#!pip install --upgrade tensorflow-hub
In [99]:
#%tensorflow_version 1.15
import tensorflow  as tf
#from tensorflow.compat.v1 import variable_scope
print('Using TensorFlow version', tf.__version__)
Using TensorFlow version 1.15.0
In [100]:
#sess = tf.compat.v1.Session()
#import tensorflow as tf

sess = tf.compat.v1.Session()
In [101]:
#import tensorflow as tf
#sess = tf.Session()
tf.compat.v1.reset_default_graph()
In [102]:
from aif360.algorithms.inprocessing.adversarial_debiasing import AdversarialDebiasing
#with tf.variable_scope('debiased_classifier',reuse=tf.AUTO_REUSE):
with tf.compat.v1.Session() as sess:
    with tf.variable_scope('scope1',reuse=tf.AUTO_REUSE) as scope:
        debiased_model_xg_ad = AdversarialDebiasing(privileged_groups = privileged_groups,
                          unprivileged_groups = unprivileged_groups,
                          scope_name=scope,
                          num_epochs=10,
                          debias=True,
                          sess=sess)
#train and save the model
        debiased_model_xg_ad.fit(data_orig_train)
        fair_xg_ad = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_xg_ad, plot=False, model_aif=True)
epoch 0; iter: 0; batch classifier loss: 0.979881; batch adversarial loss: 0.681213
epoch 1; iter: 0; batch classifier loss: 0.924482; batch adversarial loss: 0.710266
epoch 2; iter: 0; batch classifier loss: 0.738003; batch adversarial loss: 0.690696
epoch 3; iter: 0; batch classifier loss: 0.672916; batch adversarial loss: 0.644412
epoch 4; iter: 0; batch classifier loss: 0.780086; batch adversarial loss: 0.720220
epoch 5; iter: 0; batch classifier loss: 0.742021; batch adversarial loss: 0.677548
epoch 6; iter: 0; batch classifier loss: 0.725334; batch adversarial loss: 0.688967
epoch 7; iter: 0; batch classifier loss: 0.799759; batch adversarial loss: 0.674640
epoch 8; iter: 0; batch classifier loss: 0.844561; batch adversarial loss: 0.691688
epoch 9; iter: 0; batch classifier loss: 0.636004; batch adversarial loss: 0.642598
Out[102]:
<aif360.algorithms.inprocessing.adversarial_debiasing.AdversarialDebiasing at 0x25522526888>
Computing fairness of the model.
In [103]:
fair_xg_ad
Out[103]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.0 0.0 0.0 0.0 0.000000 1 0.000000
Gender 0.685 0.813056 1.0 0.0 0.0 0.0 -0.101725 [1.0] 0.058241
In [104]:
from aif360.algorithms.inprocessing import PrejudiceRemover
debiased_model_pr_xg = PrejudiceRemover()

# Train and save the model
debiased_model_pr_xg.fit(data_orig_train)

fair_xg_pr = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_pr_xg, plot=False, model_aif=True)
fair_xg_pr
Out[104]:
<aif360.algorithms.inprocessing.prejudice_remover.PrejudiceRemover at 0x255225203c8>
Computing fairness of the model.
Out[104]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.71 0.811688 0.798822 -0.181887 -0.153455 -0.245958 -0.042111 [0.893] 0.115516
#
In [105]:
y_pred = debiased_model_pr_xg.predict(data_orig_test)


data_orig_test_pred = data_orig_test.copy(deepcopy=True)
In [106]:
# Prediction with the original RandomForest model
scores = np.zeros_like(data_orig_test.labels)
scores = mdl_xgb.predict_proba(data_orig_test.features)[:,1].reshape(-1,1)
data_orig_test_pred.scores = scores

preds = np.zeros_like(data_orig_test.labels)
preds = mdl_xgb.predict(data_orig_test.features).reshape(-1,1)
data_orig_test_pred.labels = preds

def format_probs(probs1):
    probs1 = np.array(probs1)
    probs0 = np.array(1-probs1)
    return np.concatenate((probs0, probs1), axis=1)

POST PROCESSING

In [107]:
from aif360.algorithms.postprocessing import EqOddsPostprocessing
EOPP_xg = EqOddsPostprocessing(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups,
                             seed=40)
EOPP_xg = EOPP_xg.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_xg_eopp = EOPP_xg.predict(data_orig_test_pred)
fair_xg_eo = fair_metrics(filename, data_orig_test, data_transf_test_pred_xg_eopp, pred_is_dataset=True)
Computing fairness of the model.
In [108]:
fair_xg_eo
Out[108]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.00000 1.000000 0.000000 0.00000 0.000000 0.000000 1 0.00000
Gender 0.715 0.82243 1.008845 0.008118 -0.01753 0.004312 -0.060629 [0.935] 0.08078
In [109]:
from aif360.algorithms.postprocessing import CalibratedEqOddsPostprocessing
cost_constraint = "fnr"
CPP_xg = CalibratedEqOddsPostprocessing(privileged_groups = privileged_groups,
                                     unprivileged_groups = unprivileged_groups,
                                     cost_constraint=cost_constraint,
                                     seed=42)

CPP_xg = CPP_xg.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_xg_cpp = CPP_xg.predict(data_orig_test_pred)
fair_xg_ceo = fair_metrics(filename, data_orig_test, data_transf_test_pred_xg_cpp, pred_is_dataset=True)
Computing fairness of the model.
In [110]:
fair_xg_ceo
Out[110]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.00000 0.000000 0.000000 1 0.000000
Gender 0.71 0.823171 0.907537 -0.090563 -0.04878 -0.148236 -0.118214 [0.954] 0.066623
In [111]:
from aif360.algorithms.postprocessing import RejectOptionClassification
ROC_xg = RejectOptionClassification(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups)

ROC_xg = ROC_xg.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_xg_roc = ROC_xg.predict(data_orig_test_pred)
fair_xg_roc = fair_metrics(filename, data_orig_test, data_transf_test_pred_xg_roc, pred_is_dataset=True)
print('SUCCESS: completed 1 model.')
Computing fairness of the model.
SUCCESS: completed 1 model.
In [112]:
fair_xg_roc
Out[112]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.675 0.728033 1.074074 0.037037 -0.036077 0.025808 0.062151 [0.8760000000000001] 0.318399
In [ ]:
 
In [ ]:
 

3. XGBOOST with out hyper-parameter tuning

In [113]:
from xgboost import XGBClassifier
model_xgb2 = XGBClassifier(seed=40)
In [114]:
mdl_xgb2 = model_xgb2.fit(data_orig_train.features, data_orig_train.labels.ravel())
In [115]:
conf_mat_xg2 = confusion_matrix(data_orig_test.labels.ravel(), model_xgb2.predict(data_orig_test.features))
conf_mat_xg2
from sklearn.metrics import accuracy_score
print(accuracy_score(data_orig_test.labels.ravel(), model_xgb2.predict(data_orig_test.features)))
Out[115]:
array([[ 24,  39],
       [ 27, 110]], dtype=int64)
0.67

3.a. Feature importance of model

In [116]:
importances_xg2 = model_xgb2.feature_importances_
indices_xg2 = np.argsort(importances_xg2)
features2 = data_orig_train.feature_names
#https://stackoverflow.com/questions/48377296/get-feature-importance-from-gridsearchcv
In [117]:
importances_xg2
Out[117]:
array([0.39215407, 0.0720939 , 0.06759191, 0.06214546, 0.06246556,
       0.08996142, 0.03320486, 0.06720946, 0.07087763, 0.08229572],
      dtype=float32)
In [118]:
importances_xg2[indices_xg2]
Out[118]:
array([0.03320486, 0.06214546, 0.06246556, 0.06720946, 0.06759191,
       0.07087763, 0.0720939 , 0.08229572, 0.08996142, 0.39215407],
      dtype=float32)
In [119]:
features2
Out[119]:
['CurrentAcc_None',
 'NumMonths',
 'CreditHistory_Delay',
 'CreditHistory_none/paid',
 'Collateral_savings/life_insurance',
 'CurrentAcc_GE200',
 'Purpose_repairs',
 'Purpose_radio/tv',
 'Gender',
 'Age']
In [120]:
plt.figure(figsize=(20,30))
plt.title('Feature Importances')
plt.barh(range(len(indices_xg2)), importances_xg2[indices_xg2], color='b', align='center')
plt.yticks(range(len(indices_xg2)), [features2[i] for i in indices_xg2])
plt.xlabel('Relative Importance')
plt.show()
Out[120]:
<Figure size 1440x2160 with 0 Axes>
Out[120]:
Text(0.5, 1.0, 'Feature Importances')
Out[120]:
<BarContainer object of 10 artists>
Out[120]:
([<matplotlib.axis.YTick at 0x255254d07c8>,
  <matplotlib.axis.YTick at 0x255254cbe88>,
  <matplotlib.axis.YTick at 0x255254c9b48>,
  <matplotlib.axis.YTick at 0x25525514108>,
  <matplotlib.axis.YTick at 0x25525514708>,
  <matplotlib.axis.YTick at 0x25525514ec8>,
  <matplotlib.axis.YTick at 0x2552551b388>,
  <matplotlib.axis.YTick at 0x2552551ba48>,
  <matplotlib.axis.YTick at 0x25525520348>,
  <matplotlib.axis.YTick at 0x25525520d08>],
 [Text(0, 0, 'Purpose_repairs'),
  Text(0, 0, 'CreditHistory_none/paid'),
  Text(0, 0, 'Collateral_savings/life_insurance'),
  Text(0, 0, 'Purpose_radio/tv'),
  Text(0, 0, 'CreditHistory_Delay'),
  Text(0, 0, 'Gender'),
  Text(0, 0, 'NumMonths'),
  Text(0, 0, 'Age'),
  Text(0, 0, 'CurrentAcc_GE200'),
  Text(0, 0, 'CurrentAcc_None')])
Out[120]:
Text(0.5, 0, 'Relative Importance')

3.b. Model Explainability/interpretability

3.b.1 Using SHAP (SHapley Additive exPlanations)

In [121]:
import shap
xg_shap_values_t = shap.KernelExplainer(mdl_xgb2.predict,data_orig_train.features)
Using 800 background data samples could cause slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to summarize the background as K samples.

Test data interpretation

In [122]:
xgb_explainer2 = shap.KernelExplainer(mdl_xgb2.predict, data_orig_test.features)
xgb_shap_values2 = xgb_explainer2.shap_values(data_orig_test.features,nsamples=10)
#https://towardsdatascience.com/explain-any-models-with-the-shap-values-use-the-kernelexplainer-79de9464897a
Using 200 background data samples could cause slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to summarize the background as K samples.
In [123]:
xgb_shap_values2
Out[123]:
array([[ 0.        , -0.105     ,  0.        , ...,  0.395     ,
        -0.35375   ,  0.47      ],
       [-0.08558824, -0.42911765,  0.        , ...,  0.        ,
         0.        ,  0.        ],
       [ 0.        ,  0.        ,  0.        , ...,  0.17      ,
         0.        ,  0.085     ],
       ...,
       [ 0.1775    ,  0.0775    ,  0.        , ...,  0.        ,
         0.        ,  0.        ],
       [ 0.        ,  0.15125   ,  0.10375   , ...,  0.        ,
         0.        ,  0.        ],
       [ 0.        , -0.15833333,  0.        , ...,  0.        ,
         0.        ,  0.        ]])
In [124]:
shap.initjs()
shap.force_plot(xgb_explainer2.expected_value,xgb_shap_values2[0,:],  data_orig_test.features[0],data_orig_test.feature_names,link='logit')
#https://github.com/slundberg/shap
#https://github.com/slundberg/shap/issues/279
Out[124]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [125]:
shap.initjs()
shap.force_plot(xgb_explainer2.expected_value,xgb_shap_values2[1,:],  data_orig_test.features[1],data_orig_test.feature_names,link='logit')
Out[125]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [126]:
data_orig_test.feature_names
Out[126]:
['CurrentAcc_None',
 'NumMonths',
 'CreditHistory_Delay',
 'CreditHistory_none/paid',
 'Collateral_savings/life_insurance',
 'CurrentAcc_GE200',
 'Purpose_repairs',
 'Purpose_radio/tv',
 'Gender',
 'Age']
In [127]:
shap.force_plot(xgb_explainer2.expected_value,
                xgb_shap_values2, data_orig_test.features[:,:],feature_names = data_orig_test.feature_names)
Out[127]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [128]:
p = shap.summary_plot(xgb_shap_values2, data_orig_test.features, feature_names=data_orig_test.feature_names,plot_type="bar") 
display(p)
None

The variables with higher impact are at the top.

In [129]:
shap.plots._waterfall.waterfall_legacy(xgb_explainer2.expected_value, xgb_shap_values2[0,:],feature_names=data_orig_test.feature_names)

Interpretation of graph: https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html

  • f(x)- model output impacted by features; E(f(x))- expected output.

  • One the fundemental properties of Shapley values is that they always sum up to the difference between the game outcome when all players are present and the game outcome when no players are present. For machine learning models this means that SHAP values of all the input features will always sum up to the difference between baseline (expected) model output and the current model output for the prediction being explained.

In [130]:
shap.plots._waterfall.waterfall_legacy(xgb_explainer2.expected_value, xgb_shap_values2[1],feature_names=data_orig_test.feature_names)

3.b.2 Using ELI5

In [131]:
#!pip install eli5
import eli5
from eli5.sklearn import PermutationImportance
In [132]:
perm_xgb2 = PermutationImportance(mdl_xgb2).fit(data_orig_test.features, data_orig_test.labels.ravel())

Feature Importance

In [133]:
perm_imp_3=eli5.show_weights(perm_xgb2,feature_names = data_orig_test.feature_names)
perm_imp_3
plt.show()
Out[133]:
Weight Feature
0.0540 ± 0.0462 CurrentAcc_None
0.0430 ± 0.0427 CreditHistory_none/paid
0.0350 ± 0.0623 NumMonths
0.0130 ± 0.0185 CurrentAcc_GE200
0.0100 ± 0.0261 Purpose_radio/tv
0.0070 ± 0.0224 Age
0.0040 ± 0.0194 Collateral_savings/life_insurance
-0.0020 ± 0.0080 Purpose_repairs
-0.0050 ± 0.0179 CreditHistory_Delay
-0.0100 ± 0.0385 Gender

Explaining individual predictions

In [134]:
from eli5 import show_prediction
show_prediction(mdl_xgb2, data_orig_test.features[1], show_feature_values=True,feature_names = data_orig_test.feature_names)
Out[134]:

y=0.0 (probability 0.556, score -0.227) top features

Contribution? Feature Value
+0.707 CurrentAcc_None 0.000
+0.274 NumMonths 36.000
+0.212 Purpose_radio/tv 1.000
+0.071 CreditHistory_none/paid 1.000
+0.055 Collateral_savings/life_insurance 0.000
+0.047 CurrentAcc_GE200 0.000
-0.022 Gender 1.000
-0.025 CreditHistory_Delay 0.000
-0.030 Purpose_repairs 0.000
-0.072 Age 1.000
-0.992 <BIAS> 1.000
In [ ]:
 

3.c. Measuring fairness

Of Baseline model

In [135]:
import pandas as pd
import csv
import os
import numpy as np
import sys
from aif360.metrics import *
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve, auc
plot_model_performance(mdl_xgb2, X_test, y_test)
In [136]:
fair_xg2 = get_fair_metrics_and_plot(filename, data_orig_test, mdl_xgb2)
fair_xg2
Computing fairness of the model.
Out[136]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000 0.000000
Gender 0.67 0.769231 0.769157 -0.183409 -0.206047 -0.202254 0.055302 0.809 0.196757

PRE PROCESSING

In [137]:
### Reweighing
from aif360.algorithms.preprocessing import Reweighing

RW_xg2 = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)

data_transf_train_xg2_rw = RW_xg2.fit_transform(data_orig_train)

#train and save model
xg2_transf_rw = model_xgb2.fit(data_transf_train_xg2_rw.features,
                     data_transf_train_xg2_rw.labels.ravel())

data_transf_test_xg2_rw = RW_xg2.transform(data_orig_test)
fair_xg2_rw = get_fair_metrics_and_plot(filename, data_transf_test_xg2_rw, xg2_transf_rw, plot=False)
Computing fairness of the model.
In [138]:
fair_xg2_rw
Out[138]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000 0.000000
Gender 0.655381 0.755867 0.792456 -0.163082 -0.206047 -0.202254 0.025807 0.809 0.196757
In [139]:
from aif360.algorithms.preprocessing import DisparateImpactRemover

DIR_xg2 = DisparateImpactRemover()
data_transf_train_xg2_dir = DIR_xg2.fit_transform(data_orig_train)

# Train and save the model
xg2_transf_dir = model_xgb2.fit(data_transf_train_xg2_dir.features,data_transf_train_xg2_dir.labels.ravel())
In [140]:
fair_dir_xg2_dir = get_fair_metrics_and_plot(filename,data_orig_test, xg2_transf_dir, plot=False)
fair_dir_xg2_dir
Computing fairness of the model.
Out[140]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000 0.000000
Gender 0.685 0.790698 0.707123 -0.260781 -0.230183 -0.322784 -0.000254 0.853 0.148183

INPROCESSING

In [141]:
#!pip install --user --upgrade tensorflow==1.15.0
#2.2.0
#!pip uninstall tensorflow
In [142]:
#!pip install "tensorflow==1.15"
#!pip install --upgrade tensorflow-hub
In [143]:
#%tensorflow_version 1.15
import tensorflow  as tf
#from tensorflow.compat.v1 import variable_scope
print('Using TensorFlow version', tf.__version__)
Using TensorFlow version 1.15.0
In [144]:
#sess = tf.compat.v1.Session()
#import tensorflow as tf

sess = tf.compat.v1.Session()
In [145]:
#import tensorflow as tf
#sess = tf.Session()
tf.compat.v1.reset_default_graph()
In [146]:
from aif360.algorithms.inprocessing.adversarial_debiasing import AdversarialDebiasing
#with tf.variable_scope('debiased_classifier',reuse=tf.AUTO_REUSE):
with tf.compat.v1.Session() as sess:
    with tf.variable_scope('scope1',reuse=tf.AUTO_REUSE) as scope:
        debiased_model_xg2_ad = AdversarialDebiasing(privileged_groups = privileged_groups,
                          unprivileged_groups = unprivileged_groups,
                          scope_name=scope,
                          num_epochs=10,
                          debias=True,
                          sess=sess)
#train and save the model
        debiased_model_xg2_ad.fit(data_orig_train)
        fair_xg2_ad = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_xg2_ad, plot=False, model_aif=True)
epoch 0; iter: 0; batch classifier loss: 0.979881; batch adversarial loss: 0.681213
epoch 1; iter: 0; batch classifier loss: 0.924482; batch adversarial loss: 0.710266
epoch 2; iter: 0; batch classifier loss: 0.738003; batch adversarial loss: 0.690696
epoch 3; iter: 0; batch classifier loss: 0.672916; batch adversarial loss: 0.644412
epoch 4; iter: 0; batch classifier loss: 0.780086; batch adversarial loss: 0.720220
epoch 5; iter: 0; batch classifier loss: 0.742021; batch adversarial loss: 0.677548
epoch 6; iter: 0; batch classifier loss: 0.725334; batch adversarial loss: 0.688967
epoch 7; iter: 0; batch classifier loss: 0.799759; batch adversarial loss: 0.674640
epoch 8; iter: 0; batch classifier loss: 0.844561; batch adversarial loss: 0.691688
epoch 9; iter: 0; batch classifier loss: 0.636004; batch adversarial loss: 0.642598
Out[146]:
<aif360.algorithms.inprocessing.adversarial_debiasing.AdversarialDebiasing at 0x25525d50508>
Computing fairness of the model.
In [147]:
fair_xg2_ad
Out[147]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.0 0.0 0.0 0.0 0.000000 1 0.000000
Gender 0.685 0.813056 1.0 0.0 0.0 0.0 -0.101725 [1.0] 0.058241
In [148]:
from aif360.algorithms.inprocessing import PrejudiceRemover
debiased_model_pr_xg2 = PrejudiceRemover()

# Train and save the model
debiased_model_pr_xg2.fit(data_orig_train)

fair_xg2_pr = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_pr_xg2, plot=False, model_aif=True)
fair_xg2_pr
Out[148]:
<aif360.algorithms.inprocessing.prejudice_remover.PrejudiceRemover at 0x2552609fb48>
Computing fairness of the model.
Out[148]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.71 0.811688 0.798822 -0.181887 -0.153455 -0.245958 -0.042111 [0.893] 0.115516
#
In [149]:
y_pred = debiased_model_pr_xg2.predict(data_orig_test)


data_orig_test_pred = data_orig_test.copy(deepcopy=True)
In [150]:
# Prediction with the original RandomForest model
scores = np.zeros_like(data_orig_test.labels)
scores = mdl_xgb2.predict_proba(data_orig_test.features)[:,1].reshape(-1,1)
data_orig_test_pred.scores = scores

preds = np.zeros_like(data_orig_test.labels)
preds = mdl_xgb2.predict(data_orig_test.features).reshape(-1,1)
data_orig_test_pred.labels = preds

def format_probs(probs1):
    probs1 = np.array(probs1)
    probs0 = np.array(1-probs1)
    return np.concatenate((probs0, probs1), axis=1)

POST PROCESSING

In [151]:
from aif360.algorithms.postprocessing import EqOddsPostprocessing
EOPP_xg2 = EqOddsPostprocessing(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups,
                             seed=40)
EOPP_xg2 = EOPP_xg2.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_xg2_eopp = EOPP_xg2.predict(data_orig_test_pred)
fair_xg2_eo = fair_metrics(filename, data_orig_test, data_transf_test_pred_xg2_eopp, pred_is_dataset=True)
Computing fairness of the model.
In [152]:
fair_xg2_eo
Out[152]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.00000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.665 0.780328 0.98916 -0.009132 -0.021341 -0.016055 -0.053019 [0.8240000000000001] 0.149962
In [153]:
from aif360.algorithms.postprocessing import CalibratedEqOddsPostprocessing
cost_constraint = "fnr"
CPP_xg2 = CalibratedEqOddsPostprocessing(privileged_groups = privileged_groups,
                                     unprivileged_groups = unprivileged_groups,
                                     cost_constraint=cost_constraint,
                                     seed=42)

CPP_xg2 = CPP_xg2.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_xg2_cpp = CPP_xg2.predict(data_orig_test_pred)
fair_xg2_ceo = fair_metrics(filename, data_orig_test, data_transf_test_pred_xg2_cpp, pred_is_dataset=True)
Computing fairness of the model.
In [154]:
fair_xg2_ceo
Out[154]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.67 0.791139 0.633972 -0.363521 -0.292683 -0.444034 -0.020802 [0.908] 0.118718
In [155]:
from aif360.algorithms.postprocessing import RejectOptionClassification
ROC_xg2 = RejectOptionClassification(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups)

ROC_xg2 = ROC_xg2.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_xg2_roc = ROC_xg2.predict(data_orig_test_pred)
fair_xg2_roc = fair_metrics(filename, data_orig_test, data_transf_test_pred_xg2_roc, pred_is_dataset=True)
print('SUCCESS: completed 1 model.')
Computing fairness of the model.
SUCCESS: completed 1 model.
In [156]:
fair_xg2_roc
Out[156]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000 1.00000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.67 0.736 1.06813 0.037798 0.051067 -0.039082 -0.071537 [0.7979999999999998] 0.293243
In [ ]:
 

4. RANDOM FOREST CLASSIFIER MODEL WITH OUT HYPER-PARAMETER TUNING

In [157]:
#Creating the classifier
rf_model2 = RandomForestClassifier(random_state=40)
model_rf2=rf_model2
In [158]:
mdl_rf2 = model_rf2.fit(data_orig_train.features, data_orig_train.labels.ravel())
In [159]:
from sklearn.metrics import confusion_matrix
conf_mat_rf2 = confusion_matrix(data_orig_test.labels.ravel(), model_rf2.predict(data_orig_test.features))
conf_mat_rf2
from sklearn.metrics import accuracy_score
print(accuracy_score(data_orig_test.labels.ravel(), model_rf2.predict(data_orig_test.features)))
Out[159]:
array([[ 20,  43],
       [ 22, 115]], dtype=int64)
0.675
In [160]:
unique, counts = np.unique(data_orig_test.labels.ravel(), return_counts=True)
dict(zip(unique, counts))
Out[160]:
{0.0: 63, 1.0: 137}

4.a. Model Explainability/interpretability

4.a.1 Using SHAP (SHapley Additive exPlanations)

In [161]:
import shap
rf_shap_values_t2 = shap.KernelExplainer(mdl_rf2.predict,data_orig_train.features)
Using 800 background data samples could cause slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to summarize the background as K samples.

Test data interpretation

In [162]:
rf_explainer2 = shap.KernelExplainer(mdl_rf2.predict, data_orig_test.features)
rf_shap_values2 = rf_explainer2.shap_values(data_orig_test.features,nsamples=10)
#https://towardsdatascience.com/explain-any-models-with-the-shap-values-use-the-kernelexplainer-79de9464897a
Using 200 background data samples could cause slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to summarize the background as K samples.
In [163]:
rf_shap_values2
Out[163]:
array([[ 0.     ,  0.     ,  0.     , ...,  0.     ,  0.     ,  0.     ],
       [ 0.     ,  0.     ,  0.097  , ...,  0.     ,  0.     ,  0.     ],
       [ 0.     ,  0.     ,  0.     , ...,  0.21   ,  0.     ,  0.     ],
       ...,
       [ 0.1575 ,  0.0525 ,  0.     , ...,  0.     ,  0.     ,  0.     ],
       [ 0.     ,  0.125  ,  0.085  , ...,  0.     ,  0.     ,  0.     ],
       [ 0.     , -0.045  , -0.17125, ...,  0.     ,  0.     ,  0.     ]])
In [164]:
rf_explainer2.expected_value
rf_shap_values2
Out[164]:
0.7899999999999999
Out[164]:
array([[ 0.     ,  0.     ,  0.     , ...,  0.     ,  0.     ,  0.     ],
       [ 0.     ,  0.     ,  0.097  , ...,  0.     ,  0.     ,  0.     ],
       [ 0.     ,  0.     ,  0.     , ...,  0.21   ,  0.     ,  0.     ],
       ...,
       [ 0.1575 ,  0.0525 ,  0.     , ...,  0.     ,  0.     ,  0.     ],
       [ 0.     ,  0.125  ,  0.085  , ...,  0.     ,  0.     ,  0.     ],
       [ 0.     , -0.045  , -0.17125, ...,  0.     ,  0.     ,  0.     ]])
In [165]:
shap.initjs()
shap.force_plot(rf_explainer2.expected_value,rf_shap_values2[0,:],  data_orig_test.features[0],data_orig_test.feature_names,link='logit')
#https://github.com/slundberg/shap
#https://github.com/slundberg/shap/issues/279
Out[165]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [166]:
shap.initjs()
shap.force_plot(rf_explainer2.expected_value,rf_shap_values2[1,:], data_orig_test.features[1],data_orig_test.feature_names,link='logit')
Out[166]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [167]:
shap.initjs()
shap.force_plot(rf_explainer2.expected_value,rf_shap_values2[2,:], data_orig_test.features[2],data_orig_test.feature_names,link='logit')
Out[167]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [168]:
data_orig_test.feature_names
Out[168]:
['CurrentAcc_None',
 'NumMonths',
 'CreditHistory_Delay',
 'CreditHistory_none/paid',
 'Collateral_savings/life_insurance',
 'CurrentAcc_GE200',
 'Purpose_repairs',
 'Purpose_radio/tv',
 'Gender',
 'Age']
In [169]:
shap.force_plot(rf_explainer2.expected_value,
                rf_shap_values2, data_orig_test.features[:,:],feature_names = data_orig_test.feature_names)
Out[169]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [170]:
p = shap.summary_plot(rf_shap_values2, data_orig_test.features, feature_names=data_orig_test.feature_names,plot_type="bar") 
display(p)
None

Variables with higher impact are displayed at the top.

In [171]:
shap.plots._waterfall.waterfall_legacy(rf_explainer2.expected_value, rf_shap_values2[0,:],feature_names=data_orig_test.feature_names)

Interpretation of graph: https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html

f(x)- model output impacted by features; E(f(x))- expected output.

One the fundemental properties of Shapley values is that they always sum up to the difference between the game outcome when all players are present and the game outcome when no players are present. For machine learning models this means that SHAP values of all the input features will always sum up to the difference between baseline (expected) model output and the current model output for the prediction being explained.

In [172]:
shap.plots._waterfall.waterfall_legacy(rf_explainer2.expected_value, rf_shap_values2[1],feature_names=data_orig_test.feature_names)

4.a.2 Using ELI5

In [173]:
#!pip install eli5
import eli5
from eli5.sklearn import PermutationImportance
In [174]:
perm_rf2 = PermutationImportance(mdl_rf2).fit(data_orig_test.features, data_orig_test.labels.ravel())
In [175]:
data_orig_test.labels[:10,:].ravel()
Out[175]:
array([1., 0., 1., 0., 1., 0., 1., 1., 1., 1.])

Feature Importance

In [176]:
perm_imp_11=eli5.show_weights(perm_rf2,feature_names = data_orig_test.feature_names)
perm_imp_11
plt.show()
Out[176]:
Weight Feature
0.0410 ± 0.0471 CurrentAcc_None
0.0270 ± 0.0647 NumMonths
0.0240 ± 0.0232 CurrentAcc_GE200
0.0230 ± 0.0301 CreditHistory_none/paid
0.0180 ± 0.0280 Age
0.0110 ± 0.0371 Purpose_radio/tv
-0.0020 ± 0.0162 Collateral_savings/life_insurance
-0.0040 ± 0.0354 Gender
-0.0040 ± 0.0075 Purpose_repairs
-0.0130 ± 0.0136 CreditHistory_Delay

Explaining individual predictions

In [177]:
show_prediction(mdl_rf2, data_orig_test.features[0], show_feature_values=True,feature_names = data_orig_test.feature_names)
Out[177]:

y=1.0 (probability 0.900) top features

Contribution? Feature Value
+0.707 <BIAS> 1.000
+0.221 CurrentAcc_GE200 1.000
+0.080 Purpose_radio/tv 1.000
+0.026 Gender 1.000
+0.000 CreditHistory_Delay 0.000
-0.000 Purpose_repairs 0.000
-0.003 Collateral_savings/life_insurance 0.000
-0.008 CreditHistory_none/paid 1.000
-0.011 NumMonths 36.000
-0.022 Age 0.000
-0.090 CurrentAcc_None 0.000
In [178]:
from eli5 import show_prediction
show_prediction(mdl_rf2, data_orig_test.features[1], show_feature_values=True,feature_names = data_orig_test.feature_names)
Out[178]:

y=1.0 (probability 0.536) top features

Contribution? Feature Value
+0.707 <BIAS> 1.000
+0.066 Purpose_radio/tv 1.000
+0.048 Age 1.000
+0.022 Gender 1.000
+0.004 Purpose_repairs 0.000
+0.002 CreditHistory_Delay 0.000
-0.003 CurrentAcc_GE200 0.000
-0.007 Collateral_savings/life_insurance 0.000
-0.030 CreditHistory_none/paid 1.000
-0.134 CurrentAcc_None 0.000
-0.140 NumMonths 36.000

4.b. Measuring fairness

Of Baseline model

In [179]:
import pandas as pd
import csv
import os
import numpy as np
import sys
from aif360.metrics import *
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve, auc
plot_model_performance(mdl_rf2, X_test, y_test)
In [180]:
fair = get_fair_metrics_and_plot(filename, data_orig_test, mdl_rf2)
fair
Computing fairness of the model.
Out[180]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000 0.000000
Gender 0.675 0.779661 0.856173 -0.118214 -0.084096 -0.181279 -0.064688 0.817 0.169886
In [181]:
type(data_orig_train)
Out[181]:
aif360.datasets.binary_label_dataset.BinaryLabelDataset

PRE PROCESSING

In [182]:
### Reweighing
from aif360.algorithms.preprocessing import Reweighing

RW_rf2 = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)

data_transf_train_rf2_rw = RW_rf2.fit_transform(data_orig_train)

#train and save model
rf2_transf_rw = model_rf2.fit(data_transf_train_rf2_rw.features,
                     data_transf_train_rf2_rw.labels.ravel())

data_transf_test_rf2_rw = RW_rf2.transform(data_orig_test)
fair_rf2_rw = get_fair_metrics_and_plot(filename, data_transf_test_rf2_rw, rf2_transf_rw, plot=False)
Computing fairness of the model.
In [183]:
fair_rf_rw
Out[183]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000000 1.000000 1.000000 0.000000 0.00000 0.000000 0.000000 1.000 0.000000
Gender 0.684158 0.808162 0.966952 -0.032539 -0.02439 -0.069118 -0.195898 0.975 0.062379
In [184]:
from aif360.algorithms.preprocessing import DisparateImpactRemover

DIR_rf2 = DisparateImpactRemover()
data_transf_train_rf2_dir = DIR_rf2.fit_transform(data_orig_train)

# Train and save the model
rf2_transf_dir = model_rf2.fit(data_transf_train_rf2_dir.features,data_transf_train_rf2_dir.labels.ravel())
In [185]:
fair_dir_rf2_dir = get_fair_metrics_and_plot(filename,data_orig_test, rf2_transf_dir, plot=False)
fair_dir_rf2_dir
Computing fairness of the model.
Out[185]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000 0.000000
Gender 0.665 0.778878 0.802662 -0.173009 -0.080539 -0.277962 -0.129122 0.846 0.155172
In [186]:
conf_mat_rf2_dir = confusion_matrix(data_orig_test.labels.ravel(), rf2_transf_dir.predict(data_orig_test.features))
conf_mat_rf2_dir
from sklearn.metrics import accuracy_score
print(accuracy_score(data_orig_test.labels.ravel(), rf2_transf_dir.predict(data_orig_test.features)))
Out[186]:
array([[ 15,  48],
       [ 19, 118]], dtype=int64)
0.665

INPROCESSING

In [187]:
#!pip install --user --upgrade tensorflow==1.15.0
#2.2.0
#!pip uninstall tensorflow
In [188]:
#!pip install "tensorflow==1.15"
#!pip install --upgrade tensorflow-hub
In [189]:
#%tensorflow_version 1.15
import tensorflow  as tf
#from tensorflow.compat.v1 import variable_scope
print('Using TensorFlow version', tf.__version__)
Using TensorFlow version 1.15.0
In [190]:
#sess = tf.compat.v1.Session()
#import tensorflow as tf

sess = tf.compat.v1.Session()
In [191]:
#import tensorflow as tf
#sess = tf.Session()
tf.compat.v1.reset_default_graph()
In [192]:
from aif360.algorithms.inprocessing.adversarial_debiasing import AdversarialDebiasing
#with tf.variable_scope('debiased_classifier',reuse=tf.AUTO_REUSE):
with tf.compat.v1.Session() as sess:
    with tf.variable_scope('scope1',reuse=tf.AUTO_REUSE) as scope:
        debiased_model_rf2_ad = AdversarialDebiasing(privileged_groups = privileged_groups,
                          unprivileged_groups = unprivileged_groups,
                          scope_name=scope,
                          num_epochs=10,
                          debias=True,
                          sess=sess)
#train and save the model
        debiased_model_rf2_ad.fit(data_orig_train)
        fair_rf2_ad = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_rf2_ad, plot=False, model_aif=True)
epoch 0; iter: 0; batch classifier loss: 0.979881; batch adversarial loss: 0.681213
epoch 1; iter: 0; batch classifier loss: 0.924482; batch adversarial loss: 0.710266
epoch 2; iter: 0; batch classifier loss: 0.738003; batch adversarial loss: 0.690696
epoch 3; iter: 0; batch classifier loss: 0.672916; batch adversarial loss: 0.644412
epoch 4; iter: 0; batch classifier loss: 0.780086; batch adversarial loss: 0.720220
epoch 5; iter: 0; batch classifier loss: 0.742021; batch adversarial loss: 0.677548
epoch 6; iter: 0; batch classifier loss: 0.725334; batch adversarial loss: 0.688967
epoch 7; iter: 0; batch classifier loss: 0.799759; batch adversarial loss: 0.674640
epoch 8; iter: 0; batch classifier loss: 0.844561; batch adversarial loss: 0.691688
epoch 9; iter: 0; batch classifier loss: 0.636004; batch adversarial loss: 0.642598
Out[192]:
<aif360.algorithms.inprocessing.adversarial_debiasing.AdversarialDebiasing at 0x2552a3e9e48>
Computing fairness of the model.
In [193]:
fair_rf2_ad
Out[193]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.0 0.0 0.0 0.0 0.000000 1 0.000000
Gender 0.685 0.813056 1.0 0.0 0.0 0.0 -0.101725 [1.0] 0.058241
In [194]:
from aif360.algorithms.inprocessing import PrejudiceRemover
debiased_model_pr_rf2 = PrejudiceRemover()

# Train and save the model
debiased_model_pr_rf2.fit(data_orig_train)

fair_rf2_pr = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_pr_rf2, plot=False, model_aif=True)
fair_rf2_pr
Out[194]:
<aif360.algorithms.inprocessing.prejudice_remover.PrejudiceRemover at 0x2552a762bc8>
Computing fairness of the model.
Out[194]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.71 0.811688 0.798822 -0.181887 -0.153455 -0.245958 -0.042111 [0.893] 0.115516
#
In [195]:
y_pred = debiased_model_pr_rf2.predict(data_orig_test)


data_orig_test_pred = data_orig_test.copy(deepcopy=True)
In [196]:
# Prediction with the original RandomForest model
scores = np.zeros_like(data_orig_test.labels)
scores = mdl_rf2.predict_proba(data_orig_test.features)[:,1].reshape(-1,1)
data_orig_test_pred.scores = scores

preds = np.zeros_like(data_orig_test.labels)
preds = mdl_rf2.predict(data_orig_test.features).reshape(-1,1)
data_orig_test_pred.labels = preds

def format_probs(probs1):
    probs1 = np.array(probs1)
    probs0 = np.array(1-probs1)
    return np.concatenate((probs0, probs1), axis=1)

POST PROCESSING

In [197]:
from aif360.algorithms.postprocessing import EqOddsPostprocessing
EOPP_rf2 = EqOddsPostprocessing(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups,
                             seed=40)
EOPP_rf2 = EOPP_rf2.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_rf2_eopp = EOPP_rf2.predict(data_orig_test_pred)
fair_rf2_eo = fair_metrics(filename, data_orig_test, data_transf_test_pred_rf2_eopp, pred_is_dataset=True)
Computing fairness of the model.
In [198]:
fair_rf2_eo
Out[198]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.645 0.773163 1.013889 0.012177 -0.007368 0.027855 -0.055048 [0.875] 0.140876
In [199]:
from aif360.algorithms.postprocessing import CalibratedEqOddsPostprocessing
cost_constraint = "fnr"
CPP_rf2 = CalibratedEqOddsPostprocessing(privileged_groups = privileged_groups,
                                     unprivileged_groups = unprivileged_groups,
                                     cost_constraint=cost_constraint,
                                     seed=42)

CPP_rf2 = CPP_rf2.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_rf2_cpp = CPP_rf2.predict(data_orig_test_pred)
fair_rf2_ceo = fair_metrics(filename, data_orig_test, data_transf_test_pred_rf2_cpp, pred_is_dataset=True)
Computing fairness of the model.
In [200]:
fair_rf2_ceo
Out[200]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.00000
Gender 0.685 0.802508 0.713477 -0.282598 -0.184705 -0.390045 -0.101725 [0.917] 0.10261
In [201]:
from aif360.algorithms.postprocessing import RejectOptionClassification
ROC_rf2 = RejectOptionClassification(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups)

ROC_rf2 = ROC_rf2.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_rf2_roc = ROC_rf2.predict(data_orig_test_pred)
fair_rf2_roc = fair_metrics(filename, data_orig_test, data_transf_test_pred_rf2_roc, pred_is_dataset=True)
print('SUCCESS: completed 1 model.')
Computing fairness of the model.
SUCCESS: completed 1 model.
In [202]:
fair_rf2_roc
Out[202]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.635 0.672646 1.108075 0.045155 -0.015498 0.027636 0.058092 [0.78] 0.396715

5. KNN

In [203]:
from sklearn import neighbors
n_neighbors = 15
knn = neighbors.KNeighborsClassifier(n_neighbors, weights='distance')
In [204]:
knn.fit(data_orig_train.features, data_orig_train.labels.ravel())
Out[204]:
KNeighborsClassifier(n_neighbors=15, weights='distance')
In [205]:
conf_mat_knn = confusion_matrix(data_orig_test.labels.ravel(), knn.predict(data_orig_test.features))
conf_mat_knn
from sklearn.metrics import accuracy_score
print(accuracy_score(data_orig_test.labels.ravel(), knn.predict(data_orig_test.features)))
Out[205]:
array([[ 26,  37],
       [ 30, 107]], dtype=int64)
0.665

5.a. Model Explainability/interpretability

5.a.1 Using SHAP (SHapley Additive exPlanations)

In [206]:
knn_explainer = shap.KernelExplainer(knn.predict, data_orig_test.features)
knn_shap_values = knn_explainer.shap_values(data_orig_test.features,nsamples=10)
Using 200 background data samples could cause slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to summarize the background as K samples.
In [207]:
#shap.dependence_plot(0, knn_shap_values, data_orig_test.features)
In [208]:
# plot the SHAP values for the 0th observation 
shap.force_plot(knn_explainer.expected_value,knn_shap_values[0,:],  data_orig_test.features[0],data_orig_test.feature_names,link='logit') 
Out[208]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [209]:
# plot the SHAP values for the 1st observation 
shap.force_plot(knn_explainer.expected_value,knn_shap_values[1,:],  data_orig_test.features[1],data_orig_test.feature_names,link='logit') 
Out[209]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [210]:
shap.force_plot(knn_explainer.expected_value, knn_shap_values,  data_orig_test.feature_names,link='logit')
Out[210]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [211]:
shap.summary_plot(knn_shap_values, data_orig_test.features,feature_names=data_orig_test.feature_names, plot_type="violin")

Feature Importance

perm_imp_11=eli5.show_weights(knn,feature_names = data_orig_test.feature_names) perm_imp_11 plt.show()

Explaining individual predictions

In [212]:
from eli5 import show_prediction
show_prediction(knn, data_orig_test.features[1], show_feature_values=True,feature_names = data_orig_test.feature_names)
Out[212]:
Error: estimator KNeighborsClassifier(n_neighbors=15, weights='distance') is not supported

5.b. Measuring fairness

Of Baseline model

In [213]:
import pandas as pd
import csv
import os
import numpy as np
import sys
from aif360.metrics import *
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve, auc
plot_model_performance(knn, X_test, y_test)
In [214]:
fair = get_fair_metrics_and_plot(filename, data_orig_test, knn)
fair
Computing fairness of the model.
Out[214]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.00 0.00000
Gender 0.665 0.761566 0.772487 -0.174531 -0.174797 -0.215091 0.023085 0.79 0.21339

PRE PROCESSING

In [215]:
### Reweighing
from aif360.algorithms.preprocessing import Reweighing

RW_knn = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)

data_transf_train_knn = RW_knn.fit_transform(data_orig_train)

# Train and save the model
knn_transf_rw = knn.fit(data_transf_train_knn.features,
                     data_transf_train_knn.labels.ravel())

data_transf_test_knn_rw = RW_knn.transform(data_orig_test)
fair_knn_rw = get_fair_metrics_and_plot(filename, data_transf_test_knn_rw, knn_transf_rw, plot=False)
Computing fairness of the model.
In [216]:
fair_knn_rw
Out[216]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.00 0.00000
Gender 0.650903 0.748534 0.802117 -0.150164 -0.174797 -0.215091 0.000303 0.79 0.21339
In [217]:
from aif360.algorithms.preprocessing import DisparateImpactRemover

DIR = DisparateImpactRemover()
data_transf_train_knn_dir = DIR.fit_transform(data_orig_train)
# Train and save the model
knn_transf_dir = knn.fit(data_transf_train_knn_dir.features,
                     data_transf_train_knn_dir.labels.ravel())
In [218]:
fair_knn_dir = get_fair_metrics_and_plot(filename, data_orig_test, knn_transf_dir, plot=False)
fair_knn_dir
Computing fairness of the model.
Out[218]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.00 0.000000
Gender 0.62 0.739726 0.596092 -0.351344 -0.324441 -0.389913 0.062912 0.85 0.212702

INPROCESSING

In [219]:
#!pip install tensorflow
import tensorflow  as tf
#from tensorflow.compat.v1 import variable_scope
print('Using TensorFlow version', tf.__version__)
Using TensorFlow version 1.15.0
In [220]:
#sess = tf.compat.v1.Session()
#import tensorflow as tf

sess = tf.compat.v1.Session()
In [221]:
#import tensorflow as tf
#sess = tf.Session()
tf.compat.v1.reset_default_graph()
In [222]:
from aif360.algorithms.inprocessing.adversarial_debiasing import AdversarialDebiasing
#with tf.variable_scope('debiased_classifier',reuse=tf.AUTO_REUSE):
with tf.compat.v1.Session() as sess:
    with tf.variable_scope('scope4',reuse=tf.AUTO_REUSE) as scope:
        debiased_model_knn_ad = AdversarialDebiasing(privileged_groups = privileged_groups,
                          unprivileged_groups = unprivileged_groups,
                          scope_name=scope,
                          num_epochs=10,
                          debias=True,
                          sess=sess)
        debiased_model_knn_ad.fit(data_orig_train)
        fair_knn_ad = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_knn_ad, plot=False, model_aif=True)
epoch 0; iter: 0; batch classifier loss: 0.876255; batch adversarial loss: 0.720002
epoch 1; iter: 0; batch classifier loss: 0.856246; batch adversarial loss: 0.847130
epoch 2; iter: 0; batch classifier loss: 0.709311; batch adversarial loss: 0.884368
epoch 3; iter: 0; batch classifier loss: 0.647673; batch adversarial loss: 0.944726
epoch 4; iter: 0; batch classifier loss: 0.663243; batch adversarial loss: 1.008083
epoch 5; iter: 0; batch classifier loss: 0.807228; batch adversarial loss: 0.984833
epoch 6; iter: 0; batch classifier loss: 0.859303; batch adversarial loss: 0.905210
epoch 7; iter: 0; batch classifier loss: 0.826720; batch adversarial loss: 0.956221
epoch 8; iter: 0; batch classifier loss: 0.802898; batch adversarial loss: 0.910227
epoch 9; iter: 0; batch classifier loss: 0.815773; batch adversarial loss: 0.909956
Out[222]:
<aif360.algorithms.inprocessing.adversarial_debiasing.AdversarialDebiasing at 0x2552c9d96c8>
Computing fairness of the model.
In [223]:
fair_knn_ad
Out[223]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.0 0.0 0.0 0.0 0.000000 1 0.000000
Gender 0.685 0.813056 1.0 0.0 0.0 0.0 -0.101725 [1.0] 0.058241
In [224]:
from aif360.algorithms.inprocessing import PrejudiceRemover
debiased_model_knn_pr = PrejudiceRemover()

# Train and save the model
debiased_model_knn_pr.fit(data_orig_train)

fair_knn_pr = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_knn_pr, plot=False, model_aif=True)
fair_knn_pr
Out[224]:
<aif360.algorithms.inprocessing.prejudice_remover.PrejudiceRemover at 0x2552c95a488>
Computing fairness of the model.
Out[224]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.71 0.811688 0.798822 -0.181887 -0.153455 -0.245958 -0.042111 [0.893] 0.115516
#
In [225]:
y_pred = debiased_model_knn_pr.predict(data_orig_test)

data_orig_test_pred = data_orig_test.copy(deepcopy=True)
In [226]:
# Prediction with the original RandomForest model
scores = np.zeros_like(data_orig_test.labels)
scores = knn.predict_proba(data_orig_test.features)[:,1].reshape(-1,1)
data_orig_test_pred.scores = scores

preds = np.zeros_like(data_orig_test.labels)
preds = knn.predict(data_orig_test.features).reshape(-1,1)
data_orig_test_pred.labels = preds

def format_probs(probs1):
    probs1 = np.array(probs1)
    probs0 = np.array(1-probs1)
    return np.concatenate((probs0, probs1), axis=1)

POST PROCESSING

In [227]:
from aif360.algorithms.postprocessing import EqOddsPostprocessing
EOPP_knn = EqOddsPostprocessing(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups,
                             seed=40)
EOPP_knn = EOPP_knn.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_knn_eop = EOPP_knn.predict(data_orig_test_pred)
fair_knn_eo = fair_metrics(filename, data_orig_test, data_transf_test_pred_knn_eop, pred_is_dataset=True)
Computing fairness of the model.
In [228]:
fair_knn_eo
Out[228]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.00000 0.000000 1 0.000000
Gender 0.675 0.790997 1.000583 0.000507 0.041413 -0.05314 -0.140791 [0.87] 0.128574
In [229]:
from aif360.algorithms.postprocessing import CalibratedEqOddsPostprocessing
cost_constraint = "fnr"
CPP_knn = CalibratedEqOddsPostprocessing(privileged_groups = privileged_groups,
                                     unprivileged_groups = unprivileged_groups,
                                     cost_constraint=cost_constraint,
                                     seed=40)

CPP_knn = CPP_knn.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_knn_cp = CPP_knn.predict(data_orig_test_pred)
fair_knn_ceo = fair_metrics(filename, data_orig_test, data_transf_test_pred_knn_cp, pred_is_dataset=True)
Computing fairness of the model.
In [230]:
fair_knn_ceo
Out[230]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.00000 0.00000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.645 0.776025 0.62963 -0.37037 -0.341463 -0.401501 0.046423 [0.919] 0.130436
In [231]:
from aif360.algorithms.postprocessing import RejectOptionClassification
ROC_knn = RejectOptionClassification(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups)

ROC_knn = ROC_knn.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_knn_roc = ROC_knn.predict(data_orig_test_pred) 
fair_knn_roc = fair_metrics(filename, data_orig_test, data_transf_test_pred_knn_roc, pred_is_dataset=True)
print('SUCCESS: completed 1 model.')
Computing fairness of the model.
SUCCESS: completed 1 model.
In [232]:
fair_knn_roc
Out[232]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.00000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.55 0.575472 1.05144 0.019026 -0.008892 0.000938 0.043125 [0.778] 0.511702

6. Logistic Regression

In [233]:
from sklearn.linear_model import LogisticRegression

lr = LogisticRegression()
In [234]:
lr.fit(data_orig_train.features, data_orig_train.labels.ravel())
Out[234]:
LogisticRegression()
In [235]:
conf_mat_lr = confusion_matrix(data_orig_test.labels.ravel(), lr.predict(data_orig_test.features))
conf_mat_lr
from sklearn.metrics import accuracy_score
print(accuracy_score(data_orig_test.labels.ravel(), lr.predict(data_orig_test.features)))
Out[235]:
array([[ 19,  44],
       [ 12, 125]], dtype=int64)
0.72

6.a. Model Explainability/interpretability

6.a.1 Using SHAP (SHapley Additive exPlanations)

In [236]:
lr_explainer = shap.KernelExplainer(lr.predict, data_orig_test.features)
lr_shap_values = lr_explainer.shap_values(data_orig_test.features,nsamples=10)
Using 200 background data samples could cause slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to summarize the background as K samples.
In [237]:
# plot the SHAP values for the 0th observation 
shap.force_plot(lr_explainer.expected_value,lr_shap_values[0,:],  data_orig_test.features[0],data_orig_test.feature_names,link='logit') 
Out[237]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [238]:
# plot the SHAP values for the 1st observation 
shap.force_plot(lr_explainer.expected_value,lr_shap_values[1,:],  data_orig_test.features[1],data_orig_test.feature_names,link='logit') 
Out[238]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [239]:
shap.force_plot(lr_explainer.expected_value, lr_shap_values,  data_orig_test.feature_names,link='logit')
Out[239]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [240]:
shap.summary_plot(lr_shap_values, data_orig_test.features,feature_names=data_orig_test.feature_names, plot_type="violin")

Feature Importance

perm_imp_11=eli5.show_weights(knn,feature_names = data_orig_test.feature_names) perm_imp_11 plt.show()

Explaining individual predictions

In [241]:
from eli5 import show_prediction
show_prediction(lr, data_orig_test.features[1], show_feature_values=True,feature_names = data_orig_test.feature_names)
Out[241]:

y=1.0 (probability 0.593, score 0.376) top features

Contribution? Feature Value
+0.771 <BIAS> 1.000
+0.589 Purpose_radio/tv 1.000
+0.518 Age 1.000
+0.506 Gender 1.000
-0.659 CreditHistory_none/paid 1.000
-1.349 NumMonths 36.000

6.b. Measuring fairness

Of Baseline model

In [242]:
import pandas as pd
import csv
import os
import numpy as np
import sys
from aif360.metrics import *
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve, auc
plot_model_performance(lr, X_test, y_test)
In [243]:
fair_lr = get_fair_metrics_and_plot(filename, data_orig_test, lr)
fair_lr
Computing fairness of the model.
Out[243]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.00 0.000000
Gender 0.72 0.816993 0.757856 -0.218924 -0.188262 -0.291823 -0.028412 0.89 0.114498

PRE PROCESSING

In [244]:
### Reweighing
from aif360.algorithms.preprocessing import Reweighing

RW_lr = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)

data_transf_train_lr = RW_lr.fit_transform(data_orig_train)

# Train and save the model
lr_transf_rw = lr.fit(data_transf_train_knn.features,
                     data_transf_train_knn.labels.ravel())

data_transf_test_lr_rw = RW_lr.transform(data_orig_test)
fair_lr_rw = get_fair_metrics_and_plot(filename, data_transf_test_lr_rw, lr_transf_rw, plot=False)
Computing fairness of the model.
In [245]:
fair_lr_rw
Out[245]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.00 0.000000
Gender 0.699632 0.801753 0.790647 -0.187588 -0.188262 -0.291823 -0.070141 0.89 0.114498
In [246]:
from aif360.algorithms.preprocessing import DisparateImpactRemover

DIR = DisparateImpactRemover()
data_transf_train_lr_dir = DIR.fit_transform(data_orig_train)
# Train and save the model
lr_transf_dir = lr.fit(data_transf_train_lr_dir.features,
                     data_transf_train_lr_dir.labels.ravel())
In [247]:
fair_lr_dir = get_fair_metrics_and_plot(filename, data_orig_test, lr_transf_dir, plot=False)
fair_lr_dir
Computing fairness of the model.
Out[247]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.00 0.000000
Gender 0.72 0.816993 0.757856 -0.218924 -0.188262 -0.291823 -0.028412 0.89 0.114498

INPROCESSING

In [248]:
#!pip install tensorflow
import tensorflow  as tf
#from tensorflow.compat.v1 import variable_scope
print('Using TensorFlow version', tf.__version__)
Using TensorFlow version 1.15.0
In [249]:
#sess = tf.compat.v1.Session()
#import tensorflow as tf

sess = tf.compat.v1.Session()
In [250]:
#import tensorflow as tf
#sess = tf.Session()
tf.compat.v1.reset_default_graph()
In [251]:
from aif360.algorithms.inprocessing.adversarial_debiasing import AdversarialDebiasing
#with tf.variable_scope('debiased_classifier',reuse=tf.AUTO_REUSE):
with tf.compat.v1.Session() as sess:
    with tf.variable_scope('scope5',reuse=tf.AUTO_REUSE) as scope:
        debiased_model_lr_ad = AdversarialDebiasing(privileged_groups = privileged_groups,
                          unprivileged_groups = unprivileged_groups,
                          scope_name=scope,
                          num_epochs=10,
                          debias=True,
                          sess=sess)
        debiased_model_lr_ad.fit(data_orig_train)
        fair_lr_ad = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_lr_ad, plot=False, model_aif=True)
epoch 0; iter: 0; batch classifier loss: 0.736848; batch adversarial loss: 0.630137
epoch 1; iter: 0; batch classifier loss: 0.783892; batch adversarial loss: 0.717277
epoch 2; iter: 0; batch classifier loss: 0.752375; batch adversarial loss: 0.609749
epoch 3; iter: 0; batch classifier loss: 0.718494; batch adversarial loss: 0.586989
epoch 4; iter: 0; batch classifier loss: 0.688502; batch adversarial loss: 0.665726
epoch 5; iter: 0; batch classifier loss: 0.661678; batch adversarial loss: 0.605836
epoch 6; iter: 0; batch classifier loss: 0.631300; batch adversarial loss: 0.594107
epoch 7; iter: 0; batch classifier loss: 0.599903; batch adversarial loss: 0.607220
epoch 8; iter: 0; batch classifier loss: 0.656513; batch adversarial loss: 0.699276
epoch 9; iter: 0; batch classifier loss: 0.607014; batch adversarial loss: 0.578153
Out[251]:
<aif360.algorithms.inprocessing.adversarial_debiasing.AdversarialDebiasing at 0x2552c86e108>
Computing fairness of the model.
In [252]:
fair_lr_ad
Out[252]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.705 0.820669 0.952008 -0.046677 -0.013974 -0.092371 -0.125063 [0.968] 0.066931
In [253]:
from aif360.algorithms.inprocessing import PrejudiceRemover
debiased_model_lr_pr = PrejudiceRemover()

# Train and save the model
debiased_model_lr_pr.fit(data_orig_train)

fair_lr_pr = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_lr_pr, plot=False, model_aif=True)
fair_lr_pr
Out[253]:
<aif360.algorithms.inprocessing.prejudice_remover.PrejudiceRemover at 0x2552c75c188>
Computing fairness of the model.
Out[253]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.71 0.811688 0.798822 -0.181887 -0.153455 -0.245958 -0.042111 [0.893] 0.115516
#
In [254]:
y_pred = debiased_model_lr_pr.predict(data_orig_test)

data_orig_test_pred = data_orig_test.copy(deepcopy=True)
In [255]:
# Prediction with the original RandomForest model
scores = np.zeros_like(data_orig_test.labels)
scores = lr.predict_proba(data_orig_test.features)[:,1].reshape(-1,1)
data_orig_test_pred.scores = scores

preds = np.zeros_like(data_orig_test.labels)
preds = lr.predict(data_orig_test.features).reshape(-1,1)
data_orig_test_pred.labels = preds

def format_probs(probs1):
    probs1 = np.array(probs1)
    probs0 = np.array(1-probs1)
    return np.concatenate((probs0, probs1), axis=1)

POST PROCESSING

In [256]:
from aif360.algorithms.postprocessing import EqOddsPostprocessing
EOPP_lr = EqOddsPostprocessing(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups,
                             seed=40)
EOPP_lr = EOPP_lr.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_lr_eop = EOPP_lr.predict(data_orig_test_pred)
fair_lr_eo = fair_metrics(filename, data_orig_test, data_transf_test_pred_lr_eop, pred_is_dataset=True)
Computing fairness of the model.
In [257]:
fair_lr_eo
Out[257]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.000000 0.00000 1 0.000000
Gender 0.69 0.797386 1.011141 0.009386 -0.017785 0.005723 -0.04414 [0.823] 0.132424
In [258]:
from aif360.algorithms.postprocessing import CalibratedEqOddsPostprocessing
cost_constraint = "fnr"
CPP_lr = CalibratedEqOddsPostprocessing(privileged_groups = privileged_groups,
                                     unprivileged_groups = unprivileged_groups,
                                     cost_constraint=cost_constraint,
                                     seed=40)

CPP_lr = CPP_lr.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_lr_cp = CPP_lr.predict(data_orig_test_pred)
fair_lr_ceo = fair_metrics(filename, data_orig_test, data_transf_test_pred_lr_cp, pred_is_dataset=True)
Computing fairness of the model.
In [259]:
fair_lr_ceo
Out[259]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.00000 0.000000 1 0.000000
Gender 0.695 0.806349 0.709483 -0.280568 -0.209096 -0.37224 -0.062659 [0.902] 0.106886
In [260]:
from aif360.algorithms.postprocessing import RejectOptionClassification
ROC_lr = RejectOptionClassification(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups)

ROC_lr = ROC_lr.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_lr_roc = ROC_lr.predict(data_orig_test_pred) 
fair_lr_roc = fair_metrics(filename, data_orig_test, data_transf_test_pred_lr_roc, pred_is_dataset=True)
print('SUCCESS: completed 1 model.')
Computing fairness of the model.
SUCCESS: completed 1 model.
In [261]:
fair_lr_roc
Out[261]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.00000
Gender 0.655 0.701299 1.033769 0.015728 -0.043191 -0.006211 0.060122 [0.8570000000000002] 0.35686
In [ ]:
 

7. SVM

In [262]:
from sklearn.svm import SVC
#gs = grid_search_cv.best_estimator_
svm = SVC(C=0.85, break_ties=False, cache_size=200, class_weight=None, coef0=0.0,
    decision_function_shape='ovr', degree=3, gamma='scale', kernel='linear',
    max_iter=-1, random_state=42, shrinking=True, tol=0.001, probability=True,
    verbose=False)
svm.fit(data_orig_train.features, data_orig_train.labels.ravel())
Out[262]:
SVC(C=0.85, kernel='linear', probability=True, random_state=42)
In [263]:
conf_mat_svm = confusion_matrix(data_orig_test.labels.ravel(), svm.predict(data_orig_test.features))
conf_mat_svm
from sklearn.metrics import accuracy_score
print(accuracy_score(data_orig_test.labels.ravel(), svm.predict(data_orig_test.features)))
Out[263]:
array([[ 14,  49],
       [  6, 131]], dtype=int64)
0.725

7.a. Model Explainability/interpretability

7.a.1 Using SHAP (SHapley Additive exPlanations)

In [264]:
svm_explainer = shap.KernelExplainer(svm.predict, data_orig_test.features)
svm_shap_values = svm_explainer.shap_values(data_orig_test.features,nsamples=10)
Using 200 background data samples could cause slower run times. Consider using shap.sample(data, K) or shap.kmeans(data, K) to summarize the background as K samples.
In [265]:
# plot the SHAP values for the 0th observation 
shap.force_plot(svm_explainer.expected_value,svm_shap_values[0,:],  data_orig_test.features[0],data_orig_test.feature_names,link='logit') 
Out[265]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [266]:
# plot the SHAP values for the 1st observation 
shap.force_plot(svm_explainer.expected_value,svm_shap_values[1,:],  data_orig_test.features[1],data_orig_test.feature_names,link='logit') 
Out[266]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [267]:
shap.force_plot(svm_explainer.expected_value, svm_shap_values,  data_orig_test.feature_names,link='logit')
Out[267]:
Visualization omitted, Javascript library not loaded!
Have you run `initjs()` in this notebook? If this notebook was from another user you must also trust this notebook (File -> Trust notebook). If you are viewing this notebook on github the Javascript has been stripped for security. If you are using JupyterLab this error is because a JupyterLab extension has not yet been written.
In [268]:
shap.summary_plot(svm_shap_values, data_orig_test.features,feature_names=data_orig_test.feature_names, plot_type="violin")

Feature Importance

perm_imp_11=eli5.show_weights(knn,feature_names = data_orig_test.feature_names) perm_imp_11 plt.show()

Explaining individual predictions

In [269]:
from eli5 import show_prediction
show_prediction(svm, data_orig_test.features[1], show_feature_values=True,feature_names = data_orig_test.feature_names)
Out[269]:

y=1.0 (probability 0.597, score 0.384) top features

Contribution? Feature Value
+0.730 <BIAS> 1.000
+0.462 Age 1.000
+0.462 Gender 1.000
+0.462 Purpose_radio/tv 1.000
-0.346 CreditHistory_none/paid 1.000
-1.385 NumMonths 36.000

7.b. Measuring fairness

Of Baseline model

In [270]:
import pandas as pd
import csv
import os
import numpy as np
import sys
from aif360.metrics import *
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, roc_curve, auc
plot_model_performance(svm, X_test, y_test)
In [271]:
fair_svm = get_fair_metrics_and_plot(filename, data_orig_test, svm)
fair_svm
Computing fairness of the model.
Out[271]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.00000 0.000000 0.000000 0.000000 1.000 0.000000
Gender 0.725 0.826498 0.848608 -0.14206 -0.111535 -0.206537 -0.072298 0.919 0.084797

PRE PROCESSING

In [272]:
### Reweighing
from aif360.algorithms.preprocessing import Reweighing

RW_svm = Reweighing(unprivileged_groups=unprivileged_groups, privileged_groups=privileged_groups)

data_transf_train_svm = RW_svm.fit_transform(data_orig_train)

# Train and save the model
svm_transf_rw = svm.fit(data_transf_train_knn.features,
                     data_transf_train_knn.labels.ravel())

data_transf_test_svm_rw = RW_svm.transform(data_orig_test)
fair_svm_rw = get_fair_metrics_and_plot(filename, data_transf_test_svm_rw, svm_transf_rw, plot=False)
Computing fairness of the model.
In [273]:
fair_svm_rw
Out[273]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000 0.000000
Gender 0.706444 0.813123 0.875846 -0.115706 -0.111535 -0.206537 -0.132259 0.919 0.084797
In [274]:
from aif360.algorithms.preprocessing import DisparateImpactRemover

DIR = DisparateImpactRemover()
data_transf_train_svm_dir = DIR.fit_transform(data_orig_train)
# Train and save the model
svm_transf_dir = svm.fit(data_transf_train_svm_dir.features,
                     data_transf_train_svm_dir.labels.ravel())
In [275]:
fair_svm_dir = get_fair_metrics_and_plot(filename, data_orig_test, svm_transf_dir, plot=False)
fair_svm_dir
Computing fairness of the model.
Out[275]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1.000 0.000000
Gender 0.72 0.822785 0.854847 -0.135211 -0.101118 -0.201328 -0.079148 0.914 0.090076

INPROCESSING

In [276]:
#!pip install tensorflow
import tensorflow  as tf
#from tensorflow.compat.v1 import variable_scope
print('Using TensorFlow version', tf.__version__)
Using TensorFlow version 1.15.0
In [277]:
#sess = tf.compat.v1.Session()
#import tensorflow as tf

sess = tf.compat.v1.Session()
In [278]:
#import tensorflow as tf
#sess = tf.Session()
tf.compat.v1.reset_default_graph()
In [279]:
from aif360.algorithms.inprocessing.adversarial_debiasing import AdversarialDebiasing
#with tf.variable_scope('debiased_classifier',reuse=tf.AUTO_REUSE):
with tf.compat.v1.Session() as sess:
    with tf.variable_scope('scope6',reuse=tf.AUTO_REUSE) as scope:
        debiased_model_svm_ad = AdversarialDebiasing(privileged_groups = privileged_groups,
                          unprivileged_groups = unprivileged_groups,
                          scope_name=scope,
                          num_epochs=10,
                          debias=True,
                          sess=sess)
        debiased_model_svm_ad.fit(data_orig_train)
        fair_svm_ad = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_svm_ad, plot=False, model_aif=True)
epoch 0; iter: 0; batch classifier loss: 0.736848; batch adversarial loss: 0.630137
epoch 1; iter: 0; batch classifier loss: 0.783892; batch adversarial loss: 0.717277
epoch 2; iter: 0; batch classifier loss: 0.752375; batch adversarial loss: 0.609749
epoch 3; iter: 0; batch classifier loss: 0.718494; batch adversarial loss: 0.586989
epoch 4; iter: 0; batch classifier loss: 0.688502; batch adversarial loss: 0.665726
epoch 5; iter: 0; batch classifier loss: 0.661678; batch adversarial loss: 0.605836
epoch 6; iter: 0; batch classifier loss: 0.631300; batch adversarial loss: 0.594107
epoch 7; iter: 0; batch classifier loss: 0.599903; batch adversarial loss: 0.607220
epoch 8; iter: 0; batch classifier loss: 0.656513; batch adversarial loss: 0.699276
epoch 9; iter: 0; batch classifier loss: 0.607014; batch adversarial loss: 0.578153
Out[279]:
<aif360.algorithms.inprocessing.adversarial_debiasing.AdversarialDebiasing at 0x2553394e888>
Computing fairness of the model.
In [280]:
fair_svm_ad
Out[280]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.705 0.820669 0.952008 -0.046677 -0.013974 -0.092371 -0.125063 [0.968] 0.066931
In [281]:
from aif360.algorithms.inprocessing import PrejudiceRemover
debiased_model_svm_pr = PrejudiceRemover()

# Train and save the model
debiased_model_svm_pr.fit(data_orig_train)

fair_svm_pr = get_fair_metrics_and_plot(filename, data_orig_test, debiased_model_svm_pr, plot=False, model_aif=True)
fair_svm_pr
Out[281]:
<aif360.algorithms.inprocessing.prejudice_remover.PrejudiceRemover at 0x25533abdc48>
Computing fairness of the model.
Out[281]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.71 0.811688 0.798822 -0.181887 -0.153455 -0.245958 -0.042111 [0.893] 0.115516
#
In [282]:
y_pred = debiased_model_svm_pr.predict(data_orig_test)

data_orig_test_pred = data_orig_test.copy(deepcopy=True)
In [283]:
# Prediction with the original RandomForest model
scores = np.zeros_like(data_orig_test.labels)
scores = svm.predict_proba(data_orig_test.features)[:,1].reshape(-1,1)
data_orig_test_pred.scores = scores

preds = np.zeros_like(data_orig_test.labels)
preds = svm.predict(data_orig_test.features).reshape(-1,1)
data_orig_test_pred.labels = preds

def format_probs(probs1):
    probs1 = np.array(probs1)
    probs0 = np.array(1-probs1)
    return np.concatenate((probs0, probs1), axis=1)

POST PROCESSING

In [284]:
from aif360.algorithms.postprocessing import EqOddsPostprocessing
EOPP_svm = EqOddsPostprocessing(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups,
                             seed=40)
EOPP_svm = EOPP_svm.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_svm_eop = EOPP_svm.predict(data_orig_test_pred)
fair_svm_eo = fair_metrics(filename, data_orig_test, data_transf_test_pred_svm_eop, pred_is_dataset=True)
Computing fairness of the model.
In [285]:
fair_svm_eo
Out[285]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.000 1.000000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.715 0.821317 0.996101 -0.003551 -0.007114 -0.028942 -0.085997 [0.91] 0.085635
In [286]:
from aif360.algorithms.postprocessing import CalibratedEqOddsPostprocessing
cost_constraint = "fnr"
CPP_svm = CalibratedEqOddsPostprocessing(privileged_groups = privileged_groups,
                                     unprivileged_groups = unprivileged_groups,
                                     cost_constraint=cost_constraint,
                                     seed=40)

CPP_svm = CPP_svm.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_svm_cp = CPP_svm.predict(data_orig_test_pred)
fair_svm_ceo = fair_metrics(filename, data_orig_test, data_transf_test_pred_svm_cp, pred_is_dataset=True)
Computing fairness of the model.
In [287]:
fair_svm_ceo
Out[287]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.0 1.0000 1.000000 0.000000 0.000000 0.000000 0.000000 1 0.000000
Gender 0.7 0.8125 0.805359 -0.187976 -0.135925 -0.258732 -0.081177 [0.914] 0.091659
In [288]:
from aif360.algorithms.postprocessing import RejectOptionClassification
ROC_svm = RejectOptionClassification(privileged_groups = privileged_groups,
                             unprivileged_groups = unprivileged_groups)

ROC_svm = ROC_svm.fit(data_orig_test, data_orig_test_pred)
data_transf_test_pred_svm_roc = ROC_svm.predict(data_orig_test_pred) 
fair_svm_roc = fair_metrics(filename, data_orig_test, data_transf_test_pred_svm_roc, pred_is_dataset=True)
print('SUCCESS: completed 1 model.')
Computing fairness of the model.
SUCCESS: completed 1 model.
In [289]:
fair_svm_roc
Out[289]:
Accuracy F1 DI SPD EOD AOD ERD CNT TI
objective 1.00 1.000 1.00000 0.000000 0.000000 0.00000 0.000000 1 0.00000
Gender 0.69 0.752 1.06813 0.037798 -0.004573 0.00156 0.006596 [0.8470000000000002] 0.27749