Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

minimal fix to resolve #707 #716

Closed
wants to merge 9 commits into from
12 changes: 12 additions & 0 deletions causalml/metrics/visualize.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,10 @@ def get_cumlift(
or treatment_effect_col in df.columns
)

assert not (
(df[[outcome_col, treatment_col, treatment_effect_col]].isnull().values.any())
)

df = df.copy()
np.random.seed(random_seed)
random_cols = []
Expand Down Expand Up @@ -219,6 +223,10 @@ def get_qini(
or treatment_effect_col in df.columns
)

assert not (
(df[[outcome_col, treatment_col, treatment_effect_col]].isnull().values.any())
)

df = df.copy()
np.random.seed(random_seed)
random_cols = []
Expand Down Expand Up @@ -315,6 +323,8 @@ def get_tmlegain(
or p_col in df.columns
)

assert not ((df[[outcome_col, treatment_col, p_col]].isnull().values.any()))

inference_col = [x for x in inference_col if x in df.columns]

# Initialize TMLE
Expand Down Expand Up @@ -421,6 +431,8 @@ def get_tmleqini(
or p_col in df.columns
)

assert not ((df[[outcome_col, treatment_col, p_col]].isnull().values.any()))

inference_col = [x for x in inference_col if x in df.columns]

# Initialize TMLE
Expand Down
4 changes: 1 addition & 3 deletions tests/test_cevae.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,7 @@ def test_CEVAE():
# check the accuracy of the ite accuracy
ite = cevae.predict(X).flatten()

auuc_metrics = pd.DataFrame(
{"ite": ite, "W": treatment, "y": y, "treatment_effect_col": tau}
)
auuc_metrics = pd.DataFrame({"ite": ite, "W": treatment, "y": y, "tau": tau})

cumgain = get_cumgain(
auuc_metrics, outcome_col="y", treatment_col="W", treatment_effect_col="tau"
Expand Down
2 changes: 1 addition & 1 deletion tests/test_ivlearner.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def test_drivlearner():
"cate_p": cate_p.flatten(),
"W": treatment,
"y": y,
"treatment_effect_col": tau,
"tau": tau,
}
)

Expand Down
48 changes: 24 additions & 24 deletions tests/test_meta_learners.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def test_BaseSRegressor(generate_regression_data):
"cate_p": cate_p.flatten(),
"W": treatment,
"y": y,
"treatment_effect_col": tau,
"tau": tau,
}
)

Expand Down Expand Up @@ -179,7 +179,7 @@ def test_BaseTLearner(generate_regression_data):
"cate_p": cate_p.flatten(),
"W": treatment,
"y": y,
"treatment_effect_col": tau,
"tau": tau,
}
)

Expand Down Expand Up @@ -229,7 +229,7 @@ def test_BaseTRegressor(generate_regression_data):
"cate_p": cate_p.flatten(),
"W": treatment,
"y": y,
"treatment_effect_col": tau,
"tau": tau,
}
)

Expand Down Expand Up @@ -268,7 +268,7 @@ def test_MLPTRegressor(generate_regression_data):
"cate_p": cate_p.flatten(),
"W": treatment,
"y": y,
"treatment_effect_col": tau,
"tau": tau,
}
)

Expand Down Expand Up @@ -307,7 +307,7 @@ def test_XGBTRegressor(generate_regression_data):
"cate_p": cate_p.flatten(),
"W": treatment,
"y": y,
"treatment_effect_col": tau,
"tau": tau,
}
)

Expand Down Expand Up @@ -346,7 +346,7 @@ def test_BaseXLearner(generate_regression_data):
"cate_p": cate_p.flatten(),
"W": treatment,
"y": y,
"treatment_effect_col": tau,
"tau": tau,
}
)

Expand Down Expand Up @@ -398,7 +398,7 @@ def test_BaseXRegressor(generate_regression_data):
"cate_p": cate_p.flatten(),
"W": treatment,
"y": y,
"treatment_effect_col": tau,
"tau": tau,
}
)

Expand Down Expand Up @@ -439,7 +439,7 @@ def test_BaseXLearner_without_p(generate_regression_data):
"cate_p": cate_p.flatten(),
"W": treatment,
"y": y,
"treatment_effect_col": tau,
"tau": tau,
}
)

Expand Down Expand Up @@ -478,7 +478,7 @@ def test_BaseXRegressor_without_p(generate_regression_data):
"cate_p": cate_p.flatten(),
"W": treatment,
"y": y,
"treatment_effect_col": tau,
"tau": tau,
}
)

Expand Down Expand Up @@ -517,7 +517,7 @@ def test_BaseRLearner(generate_regression_data):
"cate_p": cate_p.flatten(),
"W": treatment,
"y": y,
"treatment_effect_col": tau,
"tau": tau,
}
)

Expand Down Expand Up @@ -568,7 +568,7 @@ def test_BaseRRegressor(generate_regression_data):
"cate_p": cate_p.flatten(),
"W": treatment,
"y": y,
"treatment_effect_col": tau,
"tau": tau,
}
)

Expand Down Expand Up @@ -607,7 +607,7 @@ def test_BaseRLearner_without_p(generate_regression_data):
"cate_p": cate_p.flatten(),
"W": treatment,
"y": y,
"treatment_effect_col": tau,
"tau": tau,
}
)

Expand Down Expand Up @@ -646,7 +646,7 @@ def test_BaseRRegressor_without_p(generate_regression_data):
"cate_p": cate_p.flatten(),
"W": treatment,
"y": y,
"treatment_effect_col": tau,
"tau": tau,
}
)

Expand Down Expand Up @@ -698,15 +698,15 @@ def test_BaseSClassifier(generate_classification_data):
"tau_pred": tau_pred.flatten(),
"W": df_test["treatment_group_key"].values,
CONVERSION: df_test[CONVERSION].values,
"treatment_effect_col": df_test["treatment_effect"].values,
"tau": df_test["treatment_effect"].values,
}
)

cumgain = get_cumgain(
auuc_metrics,
outcome_col=CONVERSION,
treatment_col="W",
treatment_effect_col="treatment_effect_col",
treatment_effect_col="tau",
)

# Check if the cumulative gain when using the model's prediction is
Expand Down Expand Up @@ -742,15 +742,15 @@ def test_BaseTClassifier(generate_classification_data):
"tau_pred": tau_pred.flatten(),
"W": df_test["treatment_group_key"].values,
CONVERSION: df_test[CONVERSION].values,
"treatment_effect_col": df_test["treatment_effect"].values,
"tau": df_test["treatment_effect"].values,
}
)

cumgain = get_cumgain(
auuc_metrics,
outcome_col=CONVERSION,
treatment_col="W",
treatment_effect_col="treatment_effect_col",
treatment_effect_col="tau",
)

# Check if the cumulative gain when using the model's prediction is
Expand Down Expand Up @@ -812,15 +812,15 @@ def test_BaseXClassifier(generate_classification_data):
"tau_pred": tau_pred.flatten(),
"W": df_test["treatment_group_key"].values,
CONVERSION: df_test[CONVERSION].values,
"treatment_effect_col": df_test["treatment_effect"].values,
"tau": df_test["treatment_effect"].values,
}
)

cumgain = get_cumgain(
auuc_metrics,
outcome_col=CONVERSION,
treatment_col="W",
treatment_effect_col="treatment_effect_col",
treatment_effect_col="tau",
)

# Check if the cumulative gain when using the model's prediction is
Expand Down Expand Up @@ -861,15 +861,15 @@ def test_BaseRClassifier(generate_classification_data):
"tau_pred": tau_pred.flatten(),
"W": df_test["treatment_group_key"].values,
CONVERSION: df_test[CONVERSION].values,
"treatment_effect_col": df_test["treatment_effect"].values,
"tau": df_test["treatment_effect"].values,
}
)

cumgain = get_cumgain(
auuc_metrics,
outcome_col=CONVERSION,
treatment_col="W",
treatment_effect_col="treatment_effect_col",
treatment_effect_col="tau",
)

# Check if the cumulative gain when using the model's prediction is
Expand Down Expand Up @@ -912,15 +912,15 @@ def test_BaseRClassifier_with_sample_weights(generate_classification_data):
"tau_pred": tau_pred.flatten(),
"W": df_test["treatment_group_key"].values,
CONVERSION: df_test[CONVERSION].values,
"treatment_effect_col": df_test["treatment_effect"].values,
"tau": df_test["treatment_effect"].values,
}
)

cumgain = get_cumgain(
auuc_metrics,
outcome_col=CONVERSION,
treatment_col="W",
treatment_effect_col="treatment_effect_col",
treatment_effect_col="tau",
)

# Check if the cumulative gain when using the model's prediction is
Expand Down Expand Up @@ -1005,7 +1005,7 @@ def test_BaseDRLearner(generate_regression_data):
"cate_p": cate_p.flatten(),
"W": treatment,
"y": y,
"treatment_effect_col": tau,
"tau": tau,
}
)

Expand Down
14 changes: 14 additions & 0 deletions tests/test_visualize.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import pandas as pd
import numpy as np
import pytest
from causalml.metrics.visualize import get_cumlift


def test_visualize_get_cumlift_errors_on_nan():
df = pd.DataFrame(
[[0, np.nan, 0.5], [1, np.nan, 0.1], [1, 1, 0.4], [0, 1, 0.3], [1, 1, 0.2]],
columns=["w", "y", "pred"],
)

with pytest.raises(Exception):
get_cumlift(df)
Loading