-
Notifications
You must be signed in to change notification settings - Fork 349
Add Int8Tensor for clearer interface #3038
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 14 commits
08e9095
db23cf3
b861dbc
9383550
2c84ba4
8ddddd3
bd6f58a
b5cb3c8
9a51cae
c53dad0
d300b02
c43a3ec
590e0b7
b3d4f3e
df79aa8
910906b
c61b36e
0a45f90
1251187
844d99d
a844678
2c0389a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change | ||
---|---|---|---|---|
@@ -0,0 +1,218 @@ | ||||
# Copyright (c) Meta Platforms, Inc. and affiliates. | ||||
# All rights reserved. | ||||
# | ||||
# This source code is licensed under the BSD 3-Clause license found in the | ||||
# LICENSE file in the root directory of this source tree. | ||||
|
||||
import copy | ||||
import unittest | ||||
from typing import Tuple | ||||
|
||||
import torch | ||||
from torch.testing._internal import common_utils | ||||
|
||||
from torchao.quantization import ( | ||||
Int8DynamicActivationInt8WeightConfig, | ||||
Int8WeightOnlyConfig, | ||||
PerRow, | ||||
PerTensor, | ||||
quantize_, | ||||
) | ||||
from torchao.quantization.quantize_.workflows.int8.int8_tensor import ( | ||||
Int8Tensor, | ||||
QuantizeTensorToInt8Kwargs, | ||||
) | ||||
from torchao.quantization.utils import compute_error | ||||
from torchao.testing.utils import TorchAOIntegrationTestCase | ||||
|
||||
|
||||
# TODO: Refactor after https://github.com/pytorch/ao/pull/2729 is merged | ||||
class ToyTwoLinearModel(torch.nn.Module): | ||||
def __init__( | ||||
self, | ||||
input_dim, | ||||
hidden_dim, | ||||
output_dim, | ||||
has_bias=False, | ||||
dtype=None, | ||||
device=None, | ||||
): | ||||
super().__init__() | ||||
self.dtype = dtype | ||||
self.device = device | ||||
self.linear1 = torch.nn.Linear( | ||||
input_dim, hidden_dim, bias=has_bias, dtype=dtype, device=device | ||||
) | ||||
self.linear2 = torch.nn.Linear( | ||||
hidden_dim, output_dim, bias=has_bias, dtype=dtype, device=device | ||||
) | ||||
|
||||
def forward(self, x): | ||||
x = self.linear1(x) | ||||
x = self.linear2(x) | ||||
return x | ||||
|
||||
|
||||
@unittest.skipIf(not torch.cuda.is_available(), "Need CUDA available") | ||||
@common_utils.instantiate_parametrized_tests | ||||
class TestInt8Tensor(TorchAOIntegrationTestCase): | ||||
def setUp(self): | ||||
super().setUp() | ||||
torch.manual_seed(42) | ||||
self.weight_fp = torch.randn(4, 3, dtype=torch.bfloat16) | ||||
self.input_fp = torch.randn(4, 3, dtype=torch.bfloat16) | ||||
self.bias = torch.randn(4, dtype=torch.bfloat16) | ||||
self.block_size = [4, 3] | ||||
|
||||
def test_creation_and_attributes(self): | ||||
"""Test tensor creation, dtypes, and ranges""" | ||||
tensor = Int8Tensor.from_hp(self.weight_fp, self.block_size) | ||||
|
||||
self.assertEqual(tensor.shape, (4, 3)) | ||||
self.assertEqual(tensor.qdata.dtype, torch.int8) | ||||
self.assertTrue( | ||||
torch.all(tensor.qdata >= -128) and torch.all(tensor.qdata <= 127) | ||||
) | ||||
|
||||
@common_utils.parametrize("dtype", [torch.bfloat16, torch.float16]) | ||||
@common_utils.parametrize( | ||||
"sizes", | ||||
[ | ||||
((128,), 256, 128), | ||||
((32, 128), 64, 256), | ||||
], | ||||
) | ||||
@common_utils.parametrize( | ||||
"config", | ||||
[ | ||||
Int8DynamicActivationInt8WeightConfig(version=2), | ||||
Int8WeightOnlyConfig(version=2), | ||||
], | ||||
) | ||||
def test_int8_linear_variants( | ||||
self, | ||||
dtype: torch.dtype, | ||||
sizes: Tuple, | ||||
config, | ||||
): | ||||
M, N, K = sizes | ||||
input_tensor = torch.randn(*M, K, dtype=dtype, device="cuda") | ||||
|
||||
# Create a linear layer | ||||
m = ToyTwoLinearModel(K, N, K).eval().to(dtype).to("cuda") | ||||
m_q = copy.deepcopy(m) | ||||
|
||||
# Quantize | ||||
quantize_(m_q, config) | ||||
|
||||
output_original = m(input_tensor) | ||||
output_quantized = m_q(input_tensor) | ||||
|
||||
error = compute_error(output_original, output_quantized) | ||||
assert compute_error(output_original, output_quantized) > 20, ( | ||||
f"Quantization error is too high got a SQNR of {error}" | ||||
) | ||||
|
||||
def test_linear_operations(self): | ||||
"""Test fp+int8 and int8+int8 linear ops""" | ||||
|
||||
weight_q8 = Int8Tensor.from_hp(self.weight_fp, self.block_size) | ||||
input_q8 = Int8Tensor.from_hp(self.input_fp, self.block_size) | ||||
|
||||
reference = torch.nn.functional.linear(self.input_fp, self.weight_fp, self.bias) | ||||
result_fp = torch.nn.functional.linear(self.input_fp, weight_q8, self.bias) | ||||
result_q8 = torch.nn.functional.linear(input_q8, weight_q8, self.bias) | ||||
|
||||
self.assertEqual(result_fp.shape, reference.shape) | ||||
self.assertEqual(result_q8.shape, reference.shape) | ||||
self.assertTrue(compute_error(result_fp, reference) > 10) | ||||
self.assertTrue(compute_error(result_q8, reference) > 10) | ||||
|
||||
def test_dynamic_quantization(self): | ||||
|
||||
"""Test dynamic activation quantization""" | ||||
weight_q8_dynamic = Int8Tensor.from_hp( | ||||
self.weight_fp, | ||||
self.block_size, | ||||
act_quant_kwargs=QuantizeTensorToInt8Kwargs(), | ||||
) | ||||
|
||||
reference = torch.nn.functional.linear(self.input_fp, self.weight_fp, self.bias) | ||||
result_dynamic = torch.nn.functional.linear( | ||||
self.input_fp, weight_q8_dynamic, self.bias | ||||
) | ||||
|
||||
self.assertEqual(result_dynamic.shape, reference.shape) | ||||
|
||||
|
||||
@unittest.skip("granularity parameter not supported in current API") | ||||
@common_utils.parametrize("granularity", [PerTensor(), PerRow()]) | ||||
def test_slice_preserves_aliasing(self, granularity): | ||||
config = Int8DynamicActivationInt8WeightConfig( | ||||
granularity=granularity, version=2 | ||||
) | ||||
l = torch.nn.Linear(1024, 1024).to("cuda").to(torch.bfloat16) | ||||
l.weight = torch.nn.Parameter( | ||||
torch.zeros(1024, 1024, dtype=torch.bfloat16, device="cuda") | ||||
) | ||||
quantize_(l, config) | ||||
param = l.weight | ||||
param_data = param.data | ||||
param_data = param_data.narrow(0, 0, 512) | ||||
# Making sure the aliasing is preserved in sliced quantized Tensor | ||||
assert param.data.qdata.data_ptr() == param_data.qdata.data_ptr() | ||||
assert param.data.scale.data_ptr() == param_data.scale.data_ptr() | ||||
|
||||
@common_utils.parametrize( | ||||
"config", | ||||
[ | ||||
Int8DynamicActivationInt8WeightConfig(version=2), | ||||
Int8WeightOnlyConfig(version=2), | ||||
], | ||||
) | ||||
@common_utils.parametrize("device", ["cpu", "cuda"]) | ||||
@common_utils.parametrize("dtype", [torch.bfloat16, torch.float16]) | ||||
def test_slice(self, config, device, dtype): | ||||
"""Test tensor slicing""" | ||||
dummy = torch.nn.Linear(256, 256, bias=False, dtype=dtype, device=device) | ||||
quantize_(dummy, config) | ||||
|
||||
weight1 = dummy.weight.clone().narrow(0, 0, 64) | ||||
weight2 = dummy.weight.clone().narrow(1, 0, 128) | ||||
|
||||
self.assertEqual(weight1.qdata, dummy.weight.qdata.narrow(0, 0, 64)) | ||||
self.assertEqual(weight2.qdata, dummy.weight.qdata.narrow(1, 0, 128)) | ||||
Comment on lines
+180
to
+181
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. nit: add assert for scale as well? |
||||
|
||||
def test_transpose(self): | ||||
|
||||
"""Test transpose operation""" | ||||
weight_q8 = Int8Tensor.from_hp(self.weight_fp, self.block_size) | ||||
transposed = weight_q8.transpose(0, 1) | ||||
|
||||
self.assertEqual(transposed.shape, (3, 4)) | ||||
self.assertEqual(transposed.block_size, [3, 4]) | ||||
|
||||
def test_select(self): | ||||
"""Test select operation""" | ||||
weight_q8 = Int8Tensor.from_hp(self.weight_fp, self.block_size) | ||||
selected = weight_q8.select(0, 0) | ||||
|
||||
self.assertEqual(selected.shape, (3,)) | ||||
|
def test_index_select(self): |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -78,6 +78,7 @@ | |
Int4PreshuffledTensor, | ||
Int4Tensor, | ||
Int4TilePackedTo4dTensor, | ||
Int8Tensor, | ||
IntxChooseQParamsAlgorithm, | ||
IntxOpaqueTensor, | ||
IntxPackingFormat, | ||
|
@@ -1362,10 +1363,12 @@ class Int8WeightOnlyConfig(AOBaseConfig): | |
Otherwise, applies per-group quantization with the specified group size. | ||
set_inductor_config: bool = True - If True, adjusts `torchinductor` settings to recommended values | ||
for better performance with this quantization scheme. | ||
version - Version of the config to use. Version 1 uses AffineQuantization for quantization, | ||
""" | ||
|
||
group_size: Optional[int] = None | ||
set_inductor_config: bool = True | ||
version: int = 1 | ||
|
||
def __post_init__(self): | ||
torch._C._log_api_usage_once("torchao.quantization.Int8WeightOnlyConfig") | ||
|
@@ -1376,22 +1379,30 @@ def __post_init__(self): | |
|
||
|
||
def _int8_weight_only_quantize_tensor(weight, config): | ||
mapping_type = MappingType.SYMMETRIC | ||
target_dtype = torch.int8 | ||
eps = torch.finfo(torch.float32).eps | ||
zero_point_dtype = torch.int64 | ||
group_size = config.group_size | ||
if group_size is None: | ||
group_size = weight.shape[-1] | ||
block_size = tuple([1 for x in range(weight.dim() - 1)] + [group_size]) | ||
new_weight = to_affine_quantized_intx( | ||
weight, | ||
mapping_type, | ||
block_size, | ||
target_dtype, | ||
eps=eps, | ||
zero_point_dtype=zero_point_dtype, | ||
) | ||
if config.version == 1: | ||
warnings.warn( | ||
"Config Deprecation: version 1 of Int8WeightOnlyConfig is deprecated and will no longer be supported in a future release, please use version 2, see https://github.com/pytorch/ao/issues/2752 for more details" | ||
) | ||
mapping_type = MappingType.SYMMETRIC | ||
target_dtype = torch.int8 | ||
eps = torch.finfo(torch.float32).eps | ||
zero_point_dtype = torch.int64 | ||
group_size = config.group_size | ||
if group_size is None: | ||
group_size = weight.shape[-1] | ||
block_size = tuple([1 for x in range(weight.dim() - 1)] + [group_size]) | ||
new_weight = to_affine_quantized_intx( | ||
weight, | ||
mapping_type, | ||
block_size, | ||
target_dtype, | ||
eps=eps, | ||
zero_point_dtype=zero_point_dtype, | ||
) | ||
else: | ||
assert config.version == 2, f"Unexpected version: {config.version}" | ||
block_size = [weight.shape[0], weight.shape[1]] | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this should be the same as L1393 I think, you can extract L1390-L1393 out of the first if branch and use that I think There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Isn't dividing logics much safer and easier to deprecate old API in the future? Other APIs like There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. it's fine to duplicate I think, but the current code for block_size doesn't support 3d though There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Okay then I will keep this branch and update the assert for 3D check. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Oh actually we already doing 3D-check at if w.dim() != 2 or len(block_size) != 2:
raise ValueError("Expected 2D tensor and block_size length 2") |
||
new_weight = Int8Tensor.from_hp(weight, block_size=block_size) | ||
return new_weight | ||
|
||
|
||
|
@@ -1519,12 +1530,14 @@ class Int8DynamicActivationInt8WeightConfig(AOBaseConfig): | |
in original precision during decode operations. | ||
set_inductor_config: bool = True - If True, adjusts `torchinductor` settings to recommended values | ||
for better performance with this quantization scheme. | ||
version (int): the version of the config, version 1 is using AffineQuantizedTensor that we plan to deprecate/split, version 2 is using Int8Tensor | ||
""" | ||
|
||
layout: Optional[Layout] = PlainLayout() | ||
act_mapping_type: Optional[MappingType] = MappingType.SYMMETRIC | ||
weight_only_decode: bool = False | ||
set_inductor_config: bool = True | ||
version: int = 1 | ||
|
||
def __post_init__(self): | ||
torch._C._log_api_usage_once( | ||
|
@@ -1572,19 +1585,31 @@ def get_weight_block_size(x): | |
else: | ||
input_quant_func = _int8_asymm_per_token_quant | ||
|
||
block_size = get_weight_block_size(weight) | ||
new_weight = to_affine_quantized_intx( | ||
weight, | ||
mapping_type, | ||
block_size, | ||
target_dtype, | ||
eps=eps, | ||
zero_point_dtype=zero_point_dtype, | ||
_layout=layout, | ||
zero_point_domain=weight_zero_point_domain, | ||
) | ||
new_weight = to_linear_activation_quantized(new_weight, input_quant_func) | ||
return new_weight | ||
if config.version == 1: | ||
warnings.warn( | ||
"Config Deprecation: version 1 of Int8DynamicActivationInt8WeightConfig is deprecated and will no longer be supported in a future release, please use version 2, see https://github.com/pytorch/ao/issues/2752 for more details" | ||
) | ||
block_size = get_weight_block_size(weight) | ||
quantized_weight = to_affine_quantized_intx( | ||
weight, | ||
mapping_type, | ||
block_size, | ||
target_dtype, | ||
eps=eps, | ||
zero_point_dtype=zero_point_dtype, | ||
_layout=layout, | ||
zero_point_domain=weight_zero_point_domain, | ||
) | ||
quantized_weight = to_linear_activation_quantized( | ||
quantized_weight, input_quant_func | ||
) | ||
else: | ||
quantized_weight = Int8Tensor.from_hp( | ||
jerryzh168 marked this conversation as resolved.
Show resolved
Hide resolved
jerryzh168 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
weight, | ||
block_size=get_weight_block_size(weight), | ||
|
||
) | ||
|
||
return quantized_weight | ||
|
||
|
||
@register_quantize_module_handler(Int8DynamicActivationInt8WeightConfig) | ||
|
Uh oh!
There was an error while loading. Please reload this page.