Skip to content

Commit 0a78cdb

Browse files
authored
Merge branch 'dev_1.16.0' into awp_adv
2 parents a437631 + 19259d7 commit 0a78cdb

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

46 files changed

+3760
-982
lines changed

.github/workflows/dockerhub.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ jobs:
3535
with:
3636
images: adversarialrobustnesstoolbox/releases
3737
tags: |
38-
type=raw,value={{branch}}-1.14.1-{{sha}}
38+
type=raw,value={{branch}}-1.15.1-{{sha}}
3939
type=semver,pattern={{version}}
4040
4141
- name: Build and push Docker image

README-cn.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Adversarial Robustness Toolbox (ART) v1.14
1+
# Adversarial Robustness Toolbox (ART) v1.15
22
<p align="center">
33
<img src="docs/images/art_lfai.png?raw=true" width="467" title="ART logo">
44
</p>

README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
# Adversarial Robustness Toolbox (ART) v1.14
1+
# Adversarial Robustness Toolbox (ART) v1.15
22
<p align="center">
33
<img src="docs/images/art_lfai.png?raw=true" width="467" title="ART logo">
44
</p>

art/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
from art import preprocessing
1313

1414
# Semantic Version
15-
__version__ = "1.14.1"
15+
__version__ = "1.15.1"
1616

1717
# pylint: disable=C0103
1818

art/attacks/evasion/auto_conjugate_gradient.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -463,7 +463,9 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
463463

464464
# self.eta = np.full((self.batch_size, 1, 1, 1), 2 * self.eps_step).astype(ART_NUMPY_DTYPE)
465465
_batch_size = x_k.shape[0]
466-
eta = np.full((_batch_size, 1, 1, 1), self.eps_step).astype(ART_NUMPY_DTYPE)
466+
eta = np.full((_batch_size,) + (1,) * len(self.estimator.input_shape), self.eps_step).astype(
467+
ART_NUMPY_DTYPE
468+
)
467469
self.count_condition_1 = np.zeros(shape=(_batch_size,))
468470
gradk_1 = np.zeros_like(x_k)
469471
cgradk_1 = np.zeros_like(x_k)
@@ -650,4 +652,4 @@ def get_beta(gradk, gradk_1, cgradk_1):
650652
betak = -(_gradk * delta_gradk).sum(axis=1) / (
651653
(_cgradk_1 * delta_gradk).sum(axis=1) + np.finfo(ART_NUMPY_DTYPE).eps
652654
)
653-
return betak.reshape((_batch_size, 1, 1, 1))
655+
return betak.reshape((_batch_size,) + (1,) * (len(gradk.shape) - 1))

art/attacks/evasion/auto_projected_gradient_descent.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -458,7 +458,9 @@ def generate(self, x: np.ndarray, y: Optional[np.ndarray] = None, **kwargs) -> n
458458

459459
# modification for image-wise stepsize update
460460
_batch_size = x_k.shape[0]
461-
eta = np.full((_batch_size, 1, 1, 1), self.eps_step).astype(ART_NUMPY_DTYPE)
461+
eta = np.full((_batch_size,) + (1,) * len(self.estimator.input_shape), self.eps_step).astype(
462+
ART_NUMPY_DTYPE
463+
)
462464
self.count_condition_1 = np.zeros(shape=(_batch_size,))
463465

464466
for k_iter in trange(self.max_iter, desc="AutoPGD - iteration", leave=False, disable=not self.verbose):

art/attacks/poisoning/perturbations/image_perturbations.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,6 @@
2121
from typing import Optional, Tuple
2222

2323
import numpy as np
24-
from PIL import Image
2524

2625

2726
def add_single_bd(x: np.ndarray, distance: int = 2, pixel_value: int = 1) -> np.ndarray:
@@ -112,6 +111,8 @@ def insert_image(
112111
:param blend: The blending factor
113112
:return: Backdoored image.
114113
"""
114+
from PIL import Image
115+
115116
n_dim = len(x.shape)
116117
if n_dim == 4:
117118
return np.array(

art/defences/detector/poison/clustering_analyzer.py

+4-5
Original file line numberDiff line numberDiff line change
@@ -68,10 +68,9 @@ def analyze_by_size(self, separated_clusters: List[np.ndarray]) -> Tuple[np.ndar
6868
all_assigned_clean = []
6969
nb_classes = len(separated_clusters)
7070
nb_clusters = len(np.unique(separated_clusters[0]))
71-
summary_poison_clusters: np.ndarray = np.zeros((nb_classes, nb_clusters))
71+
summary_poison_clusters: np.ndarray = np.zeros((nb_classes, nb_clusters), dtype=object)
7272

7373
for i, clusters in enumerate(separated_clusters):
74-
7574
# assume that smallest cluster is poisonous and all others are clean
7675
sizes = np.bincount(clusters)
7776
total_dp_in_class = np.sum(sizes)
@@ -98,8 +97,8 @@ def analyze_by_size(self, separated_clusters: List[np.ndarray]) -> Tuple[np.ndar
9897

9998
report["Class_" + str(i)] = report_class
10099

101-
report["suspicious_clusters"] = report["suspicious_clusters"] + np.sum(summary_poison_clusters).item()
102-
return np.asarray(all_assigned_clean), summary_poison_clusters, report
100+
report["suspicious_clusters"] = report["suspicious_clusters"] + np.sum(summary_poison_clusters)
101+
return np.asarray(all_assigned_clean, dtype=object), summary_poison_clusters, report
103102

104103
def analyze_by_distance(
105104
self,
@@ -187,7 +186,7 @@ def analyze_by_distance(
187186
assigned_clean = self.assign_class(clusters, clean_clusters, np.array(poison_clusters))
188187
all_assigned_clean.append(assigned_clean)
189188

190-
all_assigned_clean_array = np.asarray(all_assigned_clean)
189+
all_assigned_clean_array = np.asarray(all_assigned_clean, dtype=object)
191190
return all_assigned_clean_array, summary_poison_clusters, report
192191

193192
def analyze_by_relative_size(

art/defences/preprocessor/spatial_smoothing.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
from typing import Optional, Tuple
3131

3232
import numpy as np
33-
from scipy.ndimage.filters import median_filter
33+
from scipy.ndimage import median_filter
3434

3535
from art.utils import CLIP_VALUES_TYPE
3636
from art.defences.preprocessor.preprocessor import Preprocessor

art/defences/trainer/adversarial_trainer_trades_pytorch.py

+15-5
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
from art.estimators.classification.pytorch import PyTorchClassifier
3434
from art.data_generators import DataGenerator
3535
from art.attacks.attack import EvasionAttack
36+
from art.utils import check_and_transform_label_format
3637

3738
if TYPE_CHECKING:
3839
import torch
@@ -97,6 +98,15 @@ def fit(
9798
ind = np.arange(len(x))
9899

99100
logger.info("Adversarial Training TRADES")
101+
y = check_and_transform_label_format(y, nb_classes=self.classifier.nb_classes)
102+
103+
if validation_data is not None:
104+
(x_test, y_test) = validation_data
105+
y_test = check_and_transform_label_format(y_test, nb_classes=self.classifier.nb_classes)
106+
107+
x_preprocessed_test, y_preprocessed_test = self._classifier._apply_preprocessing( # pylint: disable=W0212
108+
x_test, y_test, fit=True
109+
)
100110

101111
for i_epoch in trange(nb_epochs, desc="Adversarial Training TRADES - Epochs"):
102112
# Shuffle the examples
@@ -107,7 +117,6 @@ def fit(
107117
train_n = 0.0
108118

109119
for batch_id in range(nb_batches):
110-
111120
# Create batch data
112121
x_batch = x[ind[batch_id * batch_size : min((batch_id + 1) * batch_size, x.shape[0])]].copy()
113122
y_batch = y[ind[batch_id * batch_size : min((batch_id + 1) * batch_size, x.shape[0])]]
@@ -125,9 +134,9 @@ def fit(
125134

126135
# compute accuracy
127136
if validation_data is not None:
128-
(x_test, y_test) = validation_data
129-
output = np.argmax(self.predict(x_test), axis=1)
130-
nb_correct_pred = np.sum(output == np.argmax(y_test, axis=1))
137+
output = np.argmax(self.predict(x_preprocessed_test), axis=1)
138+
nb_correct_pred = np.sum(output == np.argmax(y_preprocessed_test, axis=1))
139+
131140
logger.info(
132141
"epoch: %s time(s): %.1f loss: %.4f acc(tr): %.4f acc(val): %.4f",
133142
i_epoch,
@@ -188,7 +197,6 @@ def fit_generator(
188197
train_n = 0.0
189198

190199
for batch_id in range(nb_batches): # pylint: disable=W0612
191-
192200
# Create batch data
193201
x_batch, y_batch = generator.get_batch()
194202
x_batch = x_batch.copy()
@@ -232,6 +240,8 @@ def _batch_process(self, x_batch: np.ndarray, y_batch: np.ndarray) -> Tuple[floa
232240
x_batch_pert = self._attack.generate(x_batch, y=y_batch)
233241

234242
# Apply preprocessing
243+
y_batch = check_and_transform_label_format(y_batch, nb_classes=self.classifier.nb_classes)
244+
235245
x_preprocessed, y_preprocessed = self._classifier._apply_preprocessing( # pylint: disable=W0212
236246
x_batch, y_batch, fit=True
237247
)

art/estimators/certification/__init__.py

+18-4
Original file line numberDiff line numberDiff line change
@@ -2,12 +2,26 @@
22
This module contains certified classifiers.
33
"""
44
import importlib
5-
from art.estimators.certification import randomized_smoothing
6-
from art.estimators.certification import derandomized_smoothing
5+
from art.estimators.certification.randomized_smoothing.randomized_smoothing import RandomizedSmoothingMixin
6+
from art.estimators.certification.randomized_smoothing.numpy import NumpyRandomizedSmoothing
7+
from art.estimators.certification.randomized_smoothing.tensorflow import TensorFlowV2RandomizedSmoothing
8+
from art.estimators.certification.randomized_smoothing.pytorch import PyTorchRandomizedSmoothing
9+
from art.estimators.certification.derandomized_smoothing.derandomized_smoothing import DeRandomizedSmoothingMixin
10+
from art.estimators.certification.derandomized_smoothing.pytorch import PyTorchDeRandomizedSmoothing
11+
from art.estimators.certification.derandomized_smoothing.tensorflow import TensorFlowV2DeRandomizedSmoothing
712

813
if importlib.util.find_spec("torch") is not None:
9-
from art.estimators.certification import deep_z
10-
from art.estimators.certification import interval
14+
from art.estimators.certification.deep_z.deep_z import ZonoDenseLayer
15+
from art.estimators.certification.deep_z.deep_z import ZonoBounds
16+
from art.estimators.certification.deep_z.deep_z import ZonoConv
17+
from art.estimators.certification.deep_z.deep_z import ZonoReLU
18+
from art.estimators.certification.deep_z.pytorch import PytorchDeepZ
19+
from art.estimators.certification.interval.interval import PyTorchIntervalDense
20+
from art.estimators.certification.interval.interval import PyTorchIntervalConv2D
21+
from art.estimators.certification.interval.interval import PyTorchIntervalReLU
22+
from art.estimators.certification.interval.interval import PyTorchIntervalFlatten
23+
from art.estimators.certification.interval.interval import PyTorchIntervalBounds
24+
from art.estimators.certification.interval.pytorch import PyTorchIBPClassifier
1125
else:
1226
import warnings
1327

art/estimators/certification/randomized_smoothing/__init__.py

+6-1
Original file line numberDiff line numberDiff line change
@@ -4,5 +4,10 @@
44
from art.estimators.certification.randomized_smoothing.randomized_smoothing import RandomizedSmoothingMixin
55

66
from art.estimators.certification.randomized_smoothing.numpy import NumpyRandomizedSmoothing
7-
from art.estimators.certification.randomized_smoothing.tensorflow import TensorFlowV2RandomizedSmoothing
87
from art.estimators.certification.randomized_smoothing.pytorch import PyTorchRandomizedSmoothing
8+
from art.estimators.certification.randomized_smoothing.tensorflow import TensorFlowV2RandomizedSmoothing
9+
from art.estimators.certification.randomized_smoothing.smooth_mix.pytorch import PyTorchSmoothMix
10+
from art.estimators.certification.randomized_smoothing.macer.pytorch import PyTorchMACER
11+
from art.estimators.certification.randomized_smoothing.macer.tensorflow import TensorFlowV2MACER
12+
from art.estimators.certification.randomized_smoothing.smooth_adv.pytorch import PyTorchSmoothAdv
13+
from art.estimators.certification.randomized_smoothing.smooth_adv.tensorflow import TensorFlowV2SmoothAdv

art/estimators/certification/randomized_smoothing/macer/__init__.py

Whitespace-only changes.

0 commit comments

Comments
 (0)