Skip to content

Commit

Permalink
Merge pull request #20 from AlexMontgomerie/dev-petros
Browse files Browse the repository at this point in the history
dev-petros - Branch Optimization
  • Loading branch information
ptoupas authored Feb 9, 2024
2 parents 68d5a9f + 325bf3d commit 997f2fc
Show file tree
Hide file tree
Showing 99 changed files with 23,810 additions and 1,034 deletions.
4 changes: 4 additions & 0 deletions .github/workflows/integration-testing.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,10 @@ jobs:
python -m pip install .
python -m pip install ./fpgaconvnet-optimiser
- name: Run Model Unit Tests
run: |
python -m unittest discover ./tests
- name: Run Optimisation Tests
run: |
cd fpgaconvnet-optimiser
Expand Down
8 changes: 6 additions & 2 deletions fpgaconvnet/models/layers/ActivationLayer3D.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,16 @@ def __init__(
coarse: int = 1,
data_t: FixedPoint = FixedPoint(16,8),
backend: str = "chisel", # default to no bias for old configs
regression_model: str = "linear_regression"
regression_model: str = "linear_regression",
input_compression_ratio: list = [1.0],
output_compression_ratio: list = [1.0]
):

# initialise parent class
super().__init__(rows, cols, depth, channels,
coarse, coarse, data_t=data_t)
coarse, coarse, data_t=data_t,
input_compression_ratio=input_compression_ratio,
output_compression_ratio=output_compression_ratio)
# set the activation layer name based on the activation type
self.layer_name = f"{activation_type}3d"

Expand Down
8 changes: 6 additions & 2 deletions fpgaconvnet/models/layers/BatchNormLayer.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,15 @@ def __init__(
cols: int,
channels: int,
coarse: int = 1,
data_t: FixedPoint = FixedPoint(32,0)
data_t: FixedPoint = FixedPoint(32,0),
input_compression_ratio: list = [1.0],
output_compression_ratio: list = [1.0]
):

super().__init__(rows, cols, channels,
coarse, coarse, data_t=data_t)
coarse, coarse, data_t=data_t,
input_compression_ratio=input_compression_ratio,
output_compression_ratio=output_compression_ratio)

self.input_t = FixedPoint(32, 0)
self.scale_t = FixedPoint(32, 0)
Expand Down
48 changes: 34 additions & 14 deletions fpgaconvnet/models/layers/ChopLayer.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,17 @@
Takes one stream input and outputs several streams using the fork module.
"""

import math
import os
from typing import List
import pydot

import numpy as np
import os
import math
import pydot

from fpgaconvnet.data_types import FixedPoint

from fpgaconvnet.models.modules import Fork
from fpgaconvnet.models.layers import MultiPortLayer
from fpgaconvnet.models.modules import Fork


class ChopLayer(MultiPortLayer):
def __init__(
Expand All @@ -25,15 +26,19 @@ def __init__(
ports_out: int = 1,
data_t: FixedPoint = FixedPoint(16,8),
backend: str = "chisel",
regression_model: str = "linear_regression"
regression_model: str = "linear_regression",
input_compression_ratio: list = [1.0],
output_compression_ratio: list = [1.0]
):

# save split parameters
self.split = split

# initialise parent class
super().__init__([rows], [cols], [channels], [coarse], [coarse],
ports_out=ports_out, data_t=data_t)
ports_out=ports_out, data_t=data_t,
input_compression_ratio=input_compression_ratio,
output_compression_ratio=output_compression_ratio)

self.mem_bw_out = [100.0/self.ports_out] * self.ports_out

Expand All @@ -46,7 +51,7 @@ def __init__(
self.regression_model = regression_model

# parameters
self._coarse = coarse
self.coarse = coarse

# init modules
#One fork module, fork coarse_out corresponds to number of layer output ports
Expand All @@ -62,30 +67,42 @@ def coarse(self) -> int:

@property
def coarse_in(self) -> int:
return [self._coarse]
return self._coarse_in

@property
def coarse_out(self) -> int:
return [self._coarse]*self.ports_out
return self._coarse_out

@coarse.setter
def coarse(self, val: int) -> None:
assert all(self.split[0] == elem for elem in self.split), "Only equal splits are supported"
val = min(val, self.channels_in())
ratio = self.channels_in()/val
val_out = max(1, int(self.channels_out()/ratio))
self._coarse = val
self._coarse_in = [val]
self.coarse_out = [val]*self.ports_out
self._coarse_out = [val_out]*self.ports_out
# self.update()

@coarse_in.setter
def coarse_in(self, val: int) -> None:
assert all(self.split[0] == elem for elem in self.split), "Only equal splits are supported"
val = min(val, self.channels_in())
ratio = self.channels_in()/val
val_out = max(1, int(self.channels_out()/ratio))
self._coarse = val
self._coarse_in = [val]
self._coarse_out = [val]*self.ports_out
self._coarse_out = [val_out]*self.ports_out
# self.update()

@coarse_out.setter
def coarse_out(self, val: int) -> None:
self._coarse = val
self._coarse_in = [val]
assert all(self.split[0] == elem for elem in self.split), "Only equal splits are supported"
val = min(val, self.channels_out())
ratio = self.channels_out()/val
val_in = max(1, int(self.channels_in()/ratio))
self._coarse = val_in
self._coarse_in = [val_in]
self._coarse_out = [val]*self.ports_out
# self.update()

Expand Down Expand Up @@ -116,6 +133,9 @@ def rate_out(self, port_index=0):
assert(port_index < self.ports_out)
return self.channels_out(port_index)/self.channels_in()

def latency(self):
return max(self.latency_in(), self.latency_out())

def layer_info(self,parameters,batch_size=1):
MultiPortLayer.layer_info(self, parameters, batch_size)
parameters.coarse = self.coarse
Expand Down
8 changes: 6 additions & 2 deletions fpgaconvnet/models/layers/ConcatLayer.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,16 @@ def __init__(
data_t: FixedPoint = FixedPoint(16,8),
backend: str = "chisel", # default to no bias for old configs
regression_model: str = "linear_regression",
input_compression_ratio: list = [1.0],
output_compression_ratio: list = [1.0]
):

# initialise parent class
super().__init__([rows]*ports_in, [cols]*ports_in, channels,
[coarse]*ports_in, [coarse]*ports_in, ports_in=ports_in,
data_t=data_t)
data_t=data_t,
input_compression_ratio=input_compression_ratio,
output_compression_ratio=output_compression_ratio)

self.mem_bw_in = [100.0] * self.ports_in
# parameters
Expand Down Expand Up @@ -105,7 +109,7 @@ def update(self):
# concat
self.modules["concat"].rows = self.rows_in()
self.modules["concat"].cols = self.cols_in()
self.modules["concat"].channels = self.channels
self.modules["concat"].channels = [self.channels_in(i)//self.coarse for i in range(self.ports_in)]
self.modules["concat"].ports_in = self.ports_in

def layer_info(self,parameters,batch_size=1):
Expand Down
135 changes: 135 additions & 0 deletions fpgaconvnet/models/layers/ConcatLayer3D.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
import numpy as np
import math
import pydot
from typing import Union, List

from fpgaconvnet.data_types import FixedPoint

from fpgaconvnet.models.modules import Concat3D
from fpgaconvnet.models.layers import MultiPortLayer3D

from fpgaconvnet.models.layers.utils import get_factors

class ConcatLayer3D(MultiPortLayer3D):
def __init__(
self,
rows: int,
cols: int,
depth: int,
channels: List[int],
ports_in: int = 1,
coarse: int = 1,
data_t: FixedPoint = FixedPoint(16,8),
backend: str = "chisel", # default to no bias for old configs
regression_model: str = "linear_regression",
input_compression_ratio: list = [1.0],
output_compression_ratio: list = [1.0]
):

# initialise parent class
super().__init__([rows]*ports_in, [cols]*ports_in, [depth]*ports_in, channels,
[coarse]*ports_in, [coarse]*ports_in, ports_in=ports_in,
data_t=data_t,
input_compression_ratio=input_compression_ratio,
output_compression_ratio=output_compression_ratio)

self.mem_bw_in = [100.0] * self.ports_in
# parameters
self._coarse = coarse

# backend flag
assert backend in ["chisel"], f"{backend} is an invalid backend"
self.backend = backend

# regression model
assert regression_model in ["linear_regression", "xgboost"], f"{regression_model} is an invalid regression model"
self.regression_model = regression_model

# init modules
self.modules = {
# "concat3d" : Concat(self.rows_in(), self.cols_in(), self.depth_in(), self.channels, self.ports_in,
# backend=self.backend, regression_model=self.regression_model),
"concat3d" : Concat3D(self.rows_in(), self.cols_in(), self.depth_in(), self.channels, self.ports_in,
backend=self.backend, regression_model=self.regression_model),
}

# update the layer
self.update()

def channels_out(self, port_index=0):
assert port_index == 0, "ConcatLayer only has a single output port"
return sum(self.channels)

@property
def coarse(self) -> int:
return self._coarse

@property
def coarse_in(self) -> int:
return [self._coarse]*self.ports_in

@property
def coarse_out(self) -> int:
return [self._coarse]

@coarse.setter
def coarse(self, val: int) -> None:
self._coarse = val
self.update()

@coarse_in.setter
def coarse_in(self, val: int) -> None:
self._coarse = val
self.update()

@coarse_out.setter
def coarse_out(self, val: int) -> None:
self._coarse = val
self.update()

def rates_in(self, port_index=0):
assert port_index < self.ports_in
return self.modules["concat"].rate_in(port_index)

def rates_out(self, port_index=0):
assert port_index == 0, "ConcatLayer3D only has a single output port"
return self.modules["concat3d"].rate_out()

def get_coarse_in_feasible(self, port_index=0):
assert(port_index < self.ports_in)
factors = set( get_factors(self.channels_in(0)) )
for i in range(self.ports_in):
factors &= set( get_factors(self.channels_in(i)) )
return list(factors)

def get_coarse_out_feasible(self, port_index=0):
assert(port_index < self.ports_out)
return self.get_coarse_in_feasible()

def update(self):
# concat
self.modules["concat3d"].rows = self.rows_in()
self.modules["concat3d"].cols = self.cols_in()
self.modules["concat3d"].depth = self.depth_in()
self.modules["concat3d"].channels = [self.channels_in(i)//self.coarse for i in range(self.ports_in)]
self.modules["concat3d"].ports_in = self.ports_in

def layer_info(self,parameters,batch_size=1):
MultiPortLayer3D.layer_info(self, parameters, batch_size)
parameters.rows_in = self.rows_in()
parameters.cols_in = self.cols_in()
parameters.depth_in = self.depth_in()
parameters.rows_out = self.rows_out()
parameters.cols_out = self.cols_out()
parameters.depth_out = self.depth_out()
parameters.channels_out = self.channels_out()
parameters.coarse = self._coarse
# remove the repeated rows, cols and channels
del parameters.rows_in_array[:]
del parameters.cols_in_array[:]
del parameters.depth_in_array[:]
del parameters.rows_out_array[:]
del parameters.cols_out_array[:]
del parameters.depth_out_array[:]
del parameters.channels_out_array[:]

Loading

0 comments on commit 997f2fc

Please sign in to comment.