We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
2 parents ee94efe + 99c9224 commit 0d6ccd5Copy full SHA for 0d6ccd5
setup.py
@@ -4,12 +4,12 @@
4
from setuptools import find_packages, setup
5
6
INSTALL_REQUIRES = (
7
- "click~=8.1",
+ "click>=8.1",
8
"elex-solver>=2.0.1",
9
- "pandas~=2.1",
10
- "boto3~=1.28",
11
- "python-dotenv~=1.0",
12
- "scipy~=1.11",
+ "pandas>=2.2",
+ "boto3>=1.34",
+ "python-dotenv>=1.0",
+ "scipy>=1.14",
13
)
14
15
THIS_FILE_DIR = os.path.dirname(__file__)
src/elexmodel/client.py
@@ -516,7 +516,8 @@ def compute_evaluation(self, historical_estimates, results, merge_on, group_by,
516
x[f"raw_results_{estimand}"], x[f"pred_{estimand}"], type_="mape"
517
),
518
}
519
- )
+ ),
520
+ include_groups=False,
521
522
523
for alpha in prediction_intervals:
@@ -532,7 +533,8 @@ def compute_evaluation(self, historical_estimates, results, merge_on, group_by,
532
533
x[lower_string], x[upper_string], x[f"raw_results_{estimand}"]
534
535
536
537
538
539
error_df = error_df.merge(alpha_df, left_index=True, right_index=True)
540
src/elexmodel/distributions/GaussianModel.py
@@ -112,7 +112,8 @@ def to_aggregate(x):
112
"sigma_upper_bound": self.beta
113
* math_utils.boot_sigma(x.upper_bounds.values, conf=(3 + alpha) / 4, winsorize=self.winsorize),
114
115
116
117
118
.reset_index(drop=drop_index)
119
src/elexmodel/models/GaussianElectionModel.py
@@ -174,7 +174,8 @@ def get_aggregate_prediction_intervals(
174
"nonreporting_weight_sum": np.sum(x[f"last_election_results_{estimand}"]),
175
"nonreporting_weight_ssum": np.sum(np.power(x[f"last_election_results_{estimand}"], 2)),
176
177
178
179
180
.reset_index(drop=False)
181
tests/distributions/test_gaussian_model.py
@@ -74,11 +74,14 @@ def test_get_n_units_per_group_simple():
74
75
# we now test this per group
76
units_per_group = gaussian_model._get_n_units_per_group(df1, df2, ["c1"])
77
-
78
- assert units_per_group.iloc[0]["n"] == 1.0
79
- assert units_per_group.iloc[1]["n"] == 2.0
80
- assert units_per_group.iloc[2]["n"] == 0.0 # d is third since merginging df2 onto df1 and 0.0 because not in df1
81
- assert units_per_group.iloc[3]["n"] == 1.0
+ # setting the index to check the values
+ # since there's no way to guarantee merge order
+ units_per_group = units_per_group.set_index("c1")
+
+ assert units_per_group.loc["a"]["n"] == 1.0
82
+ assert units_per_group.loc["b"]["n"] == 2.0
83
+ assert units_per_group.loc["c"]["n"] == 1.0
84
+ assert units_per_group.loc["d"]["n"] == 0.0 # d is 0.0 because not in df1
85
86
87
def test_get_n_units_per_group(va_governor_precinct_data):
tests/models/test_nonparametric_election_model.py
@@ -206,10 +206,10 @@ def test_aggregation_simple():
206
df3 = model._get_reporting_aggregate_votes(df1, df2, aggregate=["c1", "c2"], estimand=estimand)
207
assert pd.DataFrame(
208
{
209
- "c1": ["a", "a", "b", "b", "a", "d"],
210
- "c2": ["x", "y", "y", "z", "w", "t"],
211
- f"results_{estimand}": [5.0, 9.0, 1.0, 15.0, 5.0, 1.0],
212
- "reporting": [2.0, 1.0, 1.0, 3.0, 1.0, 1.0],
+ "c1": ["a", "a", "a", "b", "b", "d"],
+ "c2": ["w", "x", "y", "y", "z", "t"],
+ f"results_{estimand}": [5.0, 5.0, 9.0, 1.0, 15.0, 1.0],
+ "reporting": [1.0, 2.0, 1.0, 1.0, 3.0, 1.0],
213
214
).equals(df3)
215
tox.ini
@@ -1,5 +1,5 @@
1
[tox]
2
-envlist=py3.10,py3.11
+envlist=py3.11
3
skipdist=True
[base]
0 commit comments