aimet_torch.utils

aimet_torch.utils.remove_all_quantizers(modules)[source]

Temporarily remove all quantizers

Example

>>> print(sim.model)
Sequential(
  (0): QuantizedConv2d(
    3, 3, kernel_size=(3, 3), stride=(1, 1)
    (param_quantizers): ModuleDict(
      (weight): QuantizeDequantize(shape=(3, 1, 1, 1), qmin=-128, qmax=127, symmetric=True)
      (bias): None
    )
    (input_quantizers): ModuleList(
      (0): QuantizeDequantize(shape=(), qmin=0, qmax=255, symmetric=False)
    )
    (output_quantizers): ModuleList(
      (0): QuantizeDequantize(shape=(), qmin=0, qmax=255, symmetric=False)
    )
  )
)
>>> with remove_all_quantizers(sim.model):
...     print(sim.model)
...
Sequential(
  (0): QuantizedConv2d(
    3, 3, kernel_size=(3, 3), stride=(1, 1)
    (param_quantizers): ModuleDict(
      (weight): None
      (bias): None
    )
    (input_quantizers): ModuleList(
      (0): None
    )
    (output_quantizers): ModuleList(
      (0): None
    )
  )
)
aimet_torch.utils.remove_activation_quantizers(modules)[source]

Temporarily remove all input and output quantizers

Example

>>> print(sim.model)
Sequential(
  (0): QuantizedConv2d(
    3, 3, kernel_size=(3, 3), stride=(1, 1)
    (param_quantizers): ModuleDict(
      (weight): QuantizeDequantize(shape=(3, 1, 1, 1), qmin=-128, qmax=127, symmetric=True)
      (bias): None
    )
    (input_quantizers): ModuleList(
      (0): QuantizeDequantize(shape=(), qmin=0, qmax=255, symmetric=False)
    )
    (output_quantizers): ModuleList(
      (0): QuantizeDequantize(shape=(), qmin=0, qmax=255, symmetric=False)
    )
  )
)
>>> with remove_activation_quantizers(sim.model):
...     print(sim.model)
...
Sequential(
  (0): QuantizedConv2d(
    3, 3, kernel_size=(3, 3), stride=(1, 1)
    (param_quantizers): ModuleDict(
      (weight): QuantizeDequantize(shape=(3, 1, 1, 1), qmin=-128, qmax=127, symmetric=True)
      (bias): None
    )
    (input_quantizers): ModuleList(
      (0): None
    )
    (output_quantizers): ModuleList(
      (0): None
    )
  )
)
aimet_torch.utils.remove_param_quantizers(modules)[source]

Temporarily remove all parameter quantizers

Example

>>> print(sim.model)
Sequential(
  (0): QuantizedConv2d(
    3, 3, kernel_size=(3, 3), stride=(1, 1)
    (param_quantizers): ModuleDict(
      (weight): QuantizeDequantize(shape=(3, 1, 1, 1), qmin=-128, qmax=127, symmetric=True)
      (bias): None
    )
    (input_quantizers): ModuleList(
      (0): QuantizeDequantize(shape=(), qmin=0, qmax=255, symmetric=False)
    )
    (output_quantizers): ModuleList(
      (0): QuantizeDequantize(shape=(), qmin=0, qmax=255, symmetric=False)
    )
  )
)
>>> with remove_param_quantizers(sim.model):
...     print(sim.model)
...
Sequential(
  (0): QuantizedConv2d(
    3, 3, kernel_size=(3, 3), stride=(1, 1)
    (param_quantizers): ModuleDict(
      (weight): None
      (bias): None
    )
    (input_quantizers): ModuleList(
      (0): QuantizeDequantize(shape=(), qmin=0, qmax=255, symmetric=False)
    )
    (output_quantizers): ModuleList(
      (0): QuantizeDequantize(shape=(), qmin=0, qmax=255, symmetric=False)
    )
  )
)
aimet_torch.utils.remove_input_quantizers(modules)[source]

Temporarily remove all input quantizers

Example

>>> print(sim.model)
Sequential(
  (0): QuantizedConv2d(
    3, 3, kernel_size=(3, 3), stride=(1, 1)
    (param_quantizers): ModuleDict(
      (weight): QuantizeDequantize(shape=(3, 1, 1, 1), qmin=-128, qmax=127, symmetric=True)
      (bias): None
    )
    (input_quantizers): ModuleList(
      (0): QuantizeDequantize(shape=(), qmin=0, qmax=255, symmetric=False)
    )
    (output_quantizers): ModuleList(
      (0): QuantizeDequantize(shape=(), qmin=0, qmax=255, symmetric=False)
    )
  )
)
>>> with remove_input_quantizers(sim.model):
...     print(sim.model)
...
Sequential(
  (0): QuantizedConv2d(
    3, 3, kernel_size=(3, 3), stride=(1, 1)
    (param_quantizers): ModuleDict(
      (weight): QuantizeDequantize(shape=(3, 1, 1, 1), qmin=-128, qmax=127, symmetric=True)
      (bias): None
    )
    (input_quantizers): ModuleList(
      (0): None
    )
    (output_quantizers): ModuleList(
      (0): QuantizeDequantize(shape=(), qmin=0, qmax=255, symmetric=False)
    )
  )
)
aimet_torch.utils.remove_output_quantizers(modules)[source]

Temporarily remove all output quantizers

Example

>>> print(sim.model)
Sequential(
  (0): QuantizedConv2d(
    3, 3, kernel_size=(3, 3), stride=(1, 1)
    (param_quantizers): ModuleDict(
      (weight): QuantizeDequantize(shape=(3, 1, 1, 1), qmin=-128, qmax=127, symmetric=True)
      (bias): None
    )
    (input_quantizers): ModuleList(
      (0): QuantizeDequantize(shape=(), qmin=0, qmax=255, symmetric=False)
    )
    (output_quantizers): ModuleList(
      (0): QuantizeDequantize(shape=(), qmin=0, qmax=255, symmetric=False)
    )
  )
)
>>> with remove_output_quantizers(sim.model):
...     print(sim.model)
...
Sequential(
  (0): QuantizedConv2d(
    3, 3, kernel_size=(3, 3), stride=(1, 1)
    (param_quantizers): ModuleDict(
      (weight): QuantizeDequantize(shape=(3, 1, 1, 1), qmin=-128, qmax=127, symmetric=True)
      (bias): None
    )
    (input_quantizers): ModuleList(
      (0): QuantizeDequantize(shape=(), qmin=0, qmax=255, symmetric=False)
    )
    (output_quantizers): ModuleList(
      (0): None
    )
  )
)