forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
FakeQuantAffine.h
67 lines (53 loc) · 1.75 KB
/
FakeQuantAffine.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
#pragma once
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
namespace at {
struct TensorIterator;
namespace native {
using fake_quant_tensor_cachemask_fn = void (*)(
Tensor& output,
Tensor& mask,
const Tensor& input,
float sc,
int64_t z_point,
int64_t quant_min,
int64_t quant_max);
using fake_quant_tensor_cachemask_tensor_qparams_fn = void (*)(
Tensor& output,
Tensor& mask,
const Tensor& input,
const Tensor& sc,
const Tensor& z_point,
const Tensor& fake_quant_enabled,
int64_t quant_min,
int64_t quant_max);
using fake_quant_learnable_grad_tensor_fn = void (*)(
TensorIterator& iter,
float scale,
float inv_scale,
int64_t zero_point,
int64_t quant_min,
int64_t quant_max,
float grad_factor);
DECLARE_DISPATCH(fake_quant_tensor_cachemask_fn, fake_quant_tensor_cachemask_stub);
DECLARE_DISPATCH(fake_quant_tensor_cachemask_tensor_qparams_fn, fake_quant_tensor_cachemask_tensor_qparams_stub);
DECLARE_DISPATCH(fake_quant_learnable_grad_tensor_fn, fake_quant_grad_learnable_tensor_stub);
using fake_quant_per_channel_fn = void (*)(
TensorIterator &iter,
int64_t quant_min,
int64_t quant_max);
using fake_quant_per_channel_cachemask_fn = void (*)(
TensorIterator &iter,
TensorIterator &iter_mask,
int64_t quant_min,
int64_t quant_max);
DECLARE_DISPATCH(fake_quant_per_channel_cachemask_fn, fake_quant_per_channel_cachemask_stub);
using fake_quant_learnable_per_channel_fn = void (*)(
TensorIterator &iter,
int64_t quant_min,
int64_t quant_max,
float grad_factor);
DECLARE_DISPATCH(fake_quant_learnable_per_channel_fn, fake_quant_grad_learnable_channel_stub);
} // namespace native
} // namespace at