Skip to content

Commit 1e0cd8a

Browse files
committed
replied to comments
1 parent 4c1c9fb commit 1e0cd8a

File tree

4 files changed

+18
-22
lines changed

4 files changed

+18
-22
lines changed

nncf/quantization/algorithms/weight_compression/algorithm.py

+14-13
Original file line numberDiff line numberDiff line change
@@ -212,39 +212,40 @@ def _proportion_str(num_weights_list: List[int], total_num_weights: int, total_n
212212
return f"{percentage:.0f}% ({len(num_weights_list)} / {total_num_params})"
213213

214214
def _get_bitwidth_distribution_str(
215-
self, all_params: List[WeightCompressionParameters], internal_params: List[WeightCompressionParameters]
215+
self, all_params: List[WeightCompressionParameters], ratio_defining_params: List[WeightCompressionParameters]
216216
) -> str:
217217
"""
218218
Generates a table that shows the ratio of weights quantized to different number of bits.
219219
220-
:param all_params: List of information about each weight node.
221-
:param internal_params: List of information about weight nodes that are considered for mixed precision.
220+
:param all_params: Information about each weight node.
221+
:param ratio_defining_params: Information about weights that are used for calculating ratio between primary and
222+
backup precisions.
222223
:return: A string containing the table.
223224
"""
224225
num_bits_vs_num_weights_map = {}
225-
internal_weight_names = set(wp.weight_name for wp in internal_params)
226+
ratio_defining_weight_names = set(wp.weight_name for wp in ratio_defining_params)
226227
for data in all_params:
227228
num_bits = data.compression_config.num_bits
228-
n_total, n_internal = num_bits_vs_num_weights_map.get(num_bits, ([], []))
229-
if data.weight_name in internal_weight_names:
230-
n_internal.append(data.num_weights)
229+
n_total, n_ratio_defining = num_bits_vs_num_weights_map.get(num_bits, ([], []))
230+
if data.weight_name in ratio_defining_weight_names:
231+
n_ratio_defining.append(data.num_weights)
231232
n_total.append(data.num_weights)
232-
num_bits_vs_num_weights_map[num_bits] = (n_total, n_internal)
233+
num_bits_vs_num_weights_map[num_bits] = (n_total, n_ratio_defining)
233234

234-
num_internal_weights = sum(ws.num_weights for ws in internal_params)
235-
num_internal_params = len(internal_params)
235+
num_ratio_defining_weights = sum(ws.num_weights for ws in ratio_defining_params)
236+
num_ratio_defining_params = len(ratio_defining_params)
236237
num_total_weights = sum(ws.num_weights for ws in all_params)
237238
num_params = len(all_params)
238239
num_bits_vs_num_weights_map = OrderedDict(sorted(num_bits_vs_num_weights_map.items(), reverse=True))
239240
# Table creation
240-
header = ["Num bits (N)", "% all parameters (layers)", "% internal parameters (layers)"]
241+
header = ["Num bits (N)", "% all parameters (layers)", "% ratio-defining parameters (layers)"]
241242
rows = []
242-
for bitwidth, (n_total, n_internal) in num_bits_vs_num_weights_map.items():
243+
for bitwidth, (n_total, n_ratio_defining) in num_bits_vs_num_weights_map.items():
243244
rows.append(
244245
[
245246
bitwidth,
246247
self._proportion_str(n_total, num_total_weights, num_params),
247-
self._proportion_str(n_internal, num_internal_weights, num_internal_params),
248+
self._proportion_str(n_ratio_defining, num_ratio_defining_weights, num_ratio_defining_params),
248249
]
249250
)
250251

nncf/quantization/algorithms/weight_compression/backend.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,7 @@ def transform_model(
9696
Applies weight compression transformations to the model.
9797
9898
:param model: Model in which the weights will be compressed according to the weight compression description.
99-
:param graph: The graph ssociated with the model.
99+
:param graph: The graph associated with the model.
100100
:param weight_compression_parameters: List of weight compression parameters.
101101
:return: The transformed model.
102102
"""

nncf/quantization/algorithms/weight_compression/torch_backend.py

+1-7
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
# See the License for the specific language governing permissions and
1010
# limitations under the License.
1111

12-
from typing import Dict, Iterable, List, Optional, Tuple, Union
12+
from typing import Iterable, List, Optional, Tuple, Union
1313

1414
import torch
1515

@@ -238,9 +238,3 @@ def transform_model(
238238
transformed_model = PTModelTransformer(model).transform(transformation_layout)
239239

240240
return transformed_model
241-
242-
@staticmethod
243-
def dump_parameters(
244-
model: torch.nn.Module, parameters: Dict, algo_name: Optional[str] = "quantization", path: Optional[List] = None
245-
) -> None:
246-
pass

tests/torch/test_model_transformer.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -609,7 +609,8 @@ def _insert_external_op_mocked():
609609
model = NNCFNetwork(InsertionPointTestModel(), FillerInputInfo([FillerInputElement([1, 1, 10, 10])]))
610610
if compression_module_registered:
611611
model.nncf.register_compression_module_type(ExtraCompressionModuleType.EXTERNAL_OP)
612-
command = PTSharedFnInsertionCommand(tps, hook_instance, OP_UNIQUE_NAME, priority)
612+
unique_name = f"{OP_UNIQUE_NAME}[{';'.join([tp.target_node_name for tp in tps])}]"
613+
command = PTSharedFnInsertionCommand(tps, hook_instance, unique_name, priority)
613614
transformation_layout = PTTransformationLayout()
614615
transformation_layout.register(command)
615616

0 commit comments

Comments
 (0)